content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python3
# Switches workers between queues based on queue load and worker activity
import argparse
import logging
import traceback
import requests
import json
import time
import datetime
import sys
import threading
import worker_functions.constants as constants
# zookeeper
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.protocol.states import KazooState
import kazoo.exceptions
from worker_functions.zk_client import ZkClient
import worker_functions.connection_aux_functions as cf
# === Global config ===
# setup logging (required by kazoo)
log_formatter = logging.Formatter('%(asctime)s WATCHDOG %(levelname)s %(message)s')
stderr_handler = logging.StreamHandler()
stderr_handler.setFormatter(log_formatter)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# TODO - remove debug level
logger.setLevel(logging.DEBUG)
logger.addHandler(stderr_handler)
# TODO
# parse args
def parse_args():
parser = argparse.ArgumentParser(
'Adjusts worker configuration based on current statistics.'
)
parser.add_argument(
'-z', '--zookeeper',
help='List of zookeeper servers from where configuration will be downloaded. If port is omitted, default zookeeper port is used.',
nargs='+',
default=['127.0.0.1:2181']
)
parser.add_argument(
'-l', '--zookeeper-list',
help='File with list of zookeeper servers. One server per line.',
type=argparse.FileType('r')
)
parser.add_argument(
'-u', '--user',
help='Username for server authentication.',
default='guest' # this is for testing only!
)
parser.add_argument(
'-p', '--password',
help='Password for user authentication.',
default='guest' # this is for testing only!
)
parser.add_argument(
'--dry-run',
help='Runs calculation of priorities only once, does not switch workers.',
default=False,
action='store_true'
)
return parser.parse_args()
class WorkerWatchdog(ZkClient):
queue_request = 'http://{server}/api/queues'
minimal_message_number = 10 # minimal number of messages to process before worker can be switched
minimal_reconfiguration_time = 10 # time needed for worker to switch queue + time needed to apply configuration changes by watchdog
def __init__(self, zookeeper_servers, user='guest', password='guest', dry_run=False, mq_servers_monitoring = [], logger = logging.getLogger(__name__)):
# init zookeeper client
super().__init__(zookeeper_servers=zookeeper_servers, logger=logger)
# last time when statistics were downloaded - type datetime.datetime()
self.last_sample_time = None
# MQ server list
self.mq_servers_monitoring = mq_servers_monitoring
self.mq_servers_lock = threading.Lock()
# user and password for MQ monitoring api
self.user = user
self.password = password
# dry run for testing of priority calculation
self.dry_run = dry_run
def __del__(self):
super().__del__()
@staticmethod
def get_queue_parametric_priority(length, avg_message_time, waiting_time, n_workers, administrative_priority = 0):
"""
Calculates queue priority based on its parameters
:param length: queue length / number of messages in the queue
:param avg_message_time: average time needed to process one message from this queue
:param waiting_time: time how long messages are waiting (calcualted from time when oldest message was added to queue)
:param n_workers: number of workers processing the queue
:param administrative_priority: priority given by administrator to prioritize queue (default = 0, processing disabled = -1)
:return: queue priority
"""
# (length * avg_message_time) ## time needed for processing all messages in queue
return (length * avg_message_time) / (n_workers + 1) + waiting_time * (administrative_priority + 1)
@staticmethod
def get_queue_relative_priority(queue_priority, total_priority):
"""
Calculates queue priority in range <0, 1>
:param queue_priority: queue parametric priority
:param total_priority: priority of all queues added together
:return: queue priority relative to other queues
"""
# prevent division by zero
if not total_priority:
total_priority = 1
return queue_priority / total_priority
def zk_get_queues(self):
"""
Get queues and parameters defined in zookeeper
:return: dictionary of queues defined in zookeeper and their attributes
:raise: ZookeeperError if fails to get configurations from zookeeper
"""
# get queues with configuration in zk
try:
queues_with_config = self.zk.get_children(constants.QUEUE)
except kazoo.exceptions.NoNodeError:
self.logger.info('No processing queues are defined in the system!')
return {}
# get queue stats from zookeeper
queue_zk_stats = {}
for queue in queues_with_config:
try:
queue_zk_stats[queue] = {}
queue_zk_stats[queue]['modified'] = False
queue_zk_stats[queue]['waiting_since'] = self.zk.get(
constants.QUEUE_STATS_WAITING_SINCE_TEMPLATE.format(queue_name = queue)
)[0].decode('utf-8')
queue_zk_stats[queue]['avg_msg_time'] = self.zk.get(
constants.QUEUE_STATS_AVG_MSG_TIME_TEMPLATE.format(queue_name = queue)
)[0].decode('utf-8')
if queue_zk_stats[queue]['avg_msg_time']:
queue_zk_stats[queue]['avg_msg_time'] = float.fromhex(queue_zk_stats[queue]['avg_msg_time'])
else:
queue_zk_stats[queue]['avg_msg_time'] = 1
queue_zk_stats[queue]['administrative_priority'] = int.from_bytes(self.zk.get(
constants.QUEUE_CONFIG_ADMINISTRATIVE_PRIORITY_TEMPLATE.format(queue_name = queue))[0],
constants.ZK_INT_BYTEORDER
)
except kazoo.exceptions.NoNodeError:
self.logger.error(
'Queue {} does not have all fields defined in zookeeper!'
.format(queue)
)
self.logger.error('Skipping queue from scheduling!')
del queue_zk_stats[queue]
except ValueError:
self.logger.error(
'Wrong format of zookeeper nodes data for queue {}!'
.format(queue)
)
del queue_zk_stats[queue]
return queue_zk_stats
def mq_get_queues(self):
"""
Get queues and parameters defined in MQ broker
:return: list of queues defined in MQ
:raise: ConnectionError if fails to get status of queues from MQ servers
:raise: JSONDecodeError if fails to parse server response
"""
# check queue status using http api
response = None
self.mq_servers_lock.acquire()
try:
for server in self.mq_servers_monitoring:
try:
response = requests.get(
self.queue_request.format(server = cf.ip_port_to_string(server)),
auth=(self.user, self.password)
)
except requests.exceptions.RequestException:
self.logger.error(
'Failed to connect to broker monitoring api on server {}'
.format(cf.ip_port_to_string(record))
)
self.logger.error('Received error:\n{}'.format(traceback.format_exc()))
else:
if not response.ok:
self.logger.error(
'Failed to get queue status from server {}'
.format(cf.ip_port_to_string(server))
)
self.logger.error('Status code: {status}, message: {reason}'.format(
status = response.status_code,
reason = response.reason
))
else:
break
finally:
self.mq_servers_lock.release()
if not response or not response.ok:
raise ConnectionError('Failed to get status of queues from MQ servers!')
queue_mq_stats = json.loads(response.content)
return queue_mq_stats
def get_priorities(self, queue_zk_stats, queue_mq_stats, worker_stats):
"""
Get relative priorities of queues
:param queue_zk_stats: dictionary with queue statistics from zookeeper
:param queue_mq_stats: list of queue statistics from MQ monitoring
:return: dictionary with relative priorities of queues
"""
# calculate priorities of queues
queue_priorities = {}
total_priority = 0
for queue in queue_mq_stats:
# check if queue is processing queue
# (have worker config associated with it)
if queue['name'] not in queue_zk_stats:
continue
n_workers = 0
for worker in worker_stats:
if worker_stats[worker]['queue'] == queue['name']:
n_workers += 1
if not queue_zk_stats[queue['name']]['waiting_since']:
waiting_time = 0
elif n_workers > 0: # queue is not waiting for processing
waiting_time = 0
else:
try:
waiting_time = (self.last_sample_time - datetime.datetime.fromisoformat(queue_zk_stats[queue['name']]['waiting_since'])).total_seconds()
except Exception:
self.logger.warning('Failed to parse waiting time of queue {}, schduling might not be accurate!'.format(queue['name']))
waiting_time = 0
# calcuate priority for the queue
queue_priorities[queue['name']] = self.get_queue_parametric_priority(
length=queue['messages'],
avg_message_time=queue_zk_stats[queue['name']]['avg_msg_time'],
waiting_time=waiting_time,
n_workers=n_workers,
administrative_priority=queue_zk_stats[queue['name']]['administrative_priority']
)
total_priority += queue_priorities[queue['name']]
# calculate relative priorities
for queue in queue_priorities:
queue_priorities[queue] = self.get_queue_relative_priority(
queue_priorities[queue],
total_priority
)
# sort queues based on priority
queue_priorities = dict(
sorted(queue_priorities.items(), key=lambda item: item[1], reverse=True)
)
return queue_priorities
def get_worker_stats(self):
"""
Get worker statistics and informations from zookeeper
:return: dictionary with worker data
:raise: ZookeeperError if zookeeper connection/communication fails
"""
# get list of connected workers
try:
workers = self.zk.get_children(constants.WORKER_STATUS)
except kazoo.exceptions.NoNodeError:
self.logger.warning('Could not get worker statistics, no worker statistics are not defined in zookeeper!')
return {}
if not workers:
self.logger.warning('Could not get worker statistics, no workers are connected!')
return {}
worker_stats = {}
for worker in workers:
# get worker status
try:
status = self.zk.get(constants.WORKER_STATUS_TEMPLATE.format(
worker_id = worker))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
self.logger.warning(
'Worker {} does not have status field defind in zookeeper!'
.format(worker)
)
continue
# skip dead and failed workers
if status == constants.STATUS_DEAD:
continue
if status == constants.STATUS_FAILED:
self.logger.warning('Failed worker found! Worker id: {}'.format(worker))
# TODO
# notify admin
continue
# add worker statistics
try:
worker_stats[worker] = {}
worker_stats[worker]['modified'] = False
worker_stats[worker]['status'] = status
worker_stats[worker]['queue'] = self.zk.get(constants.WORKER_QUEUE_TEMPLATE.format(
worker_id = worker))[0].decode('utf-8')
worker_stats[worker]['unlock_time'] = self.zk.get(constants.WORKER_UNLOCK_TIME.format(
worker_id = worker))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
self.logger.warning(
'Worker {} does not have all fields defined in zookeeper!'
.format(worker)
)
del worker_stats[worker]
continue
if not worker_stats:
self.logger.info('No running workers found!')
return worker_stats
def switch_worker_to_queue(self, worker, queue, worker_stats, queue_stats):
"""
Switches worker to queue by modifying status.
:param worker: id of worker to switch
:param queue: name of queue to switch worker to
:param worker_stats: dictionary with stats of workers obtained from zookeeper
:param queue_stats: dictionary with stats of queues obtained from zookeeper
"""
unlock_time_offset = queue_stats[queue]['avg_msg_time'] * self.minimal_message_number + self.minimal_reconfiguration_time
worker_stats[worker]['unlock_time'] = (self.last_sample_time + datetime.timedelta(seconds=unlock_time_offset)).isoformat()
worker_stats[worker]['queue'] = queue
worker_stats[worker]['modified'] = True
if queue_stats[queue]['waiting_since']:
queue_stats[queue]['waiting_since'] = ''
queue_stats[queue]['modified'] = True
self.logger.info('Switching worker {worker} to queue {queue}'.format(
worker = worker,
queue = queue
))
def set_queue_to_waiting(self, worker_stats, queue_mq_stats, queue_zk_stats):
"""
Sets queue 'waiting_since' to current time if there are messages waiting in the queue
and no workers are processing it.
:param worker_stats: dictionary with stats of workers obtained from zookeeper
:param queue_mq_stats: list of queue statistics obtained from MQ
:param queue_zk_stats: dictionary with queue statistics obtained from zookeeper
"""
# get list of queues that are being processed
processed_queues = []
for worker in worker_stats:
if worker_stats[worker]['queue'] not in processed_queues:
processed_queues.append(worker_stats[worker]['queue'])
# get list of queues with messages
queues_with_messages = []
for queue in queue_mq_stats:
if queue['messages'] and queue['name'] not in queues_with_messages:
queues_with_messages.append(queue['name'])
self.logger.debug('Processed queues: {}'.format(processed_queues))
self.logger.debug('Queues with messages: {}'.format(queues_with_messages))
# set waiting queues 'waiting_since' to time when statistics were downloaded
for queue in queue_zk_stats:
if queue in queues_with_messages and queue not in processed_queues:
if not queue_zk_stats[queue]['waiting_since']:
queue_zk_stats[queue]['waiting_since'] = self.last_sample_time.isoformat()
queue_zk_stats[queue]['modified'] = True
self.logger.info('Setting queue {queue} to waiting since {time}'.format(
queue = queue,
time = self.last_sample_time.isoformat()
))
def apply_changes(self, worker_stats, queue_stats):
"""
Apply configuration and statistics changes in zookeeper
:param worker_stats: dictionary of worker config/stats to apply
:param queue_stats: dictionary of queue stats to apply
:raise: ZookeeperError if node does not exists or if zookeeper returns non-zero error code
"""
# apply worker configuration changes
for worker in worker_stats:
if worker_stats[worker]['modified']:
try:
self.zk.set(
path = constants.WORKER_UNLOCK_TIME.format(worker_id = worker),
value = worker_stats[worker]['unlock_time'].encode('utf-8')
)
self.zk.set(
path = constants.WORKER_QUEUE_TEMPLATE.format(worker_id = worker),
value = worker_stats[worker]['queue'].encode('utf-8')
)
except kazoo.exceptions.ZookeeperError:
logger.error(
'Failed to update worker {} configurations in zookeeper due to zookeeper error!'
.format(worker)
)
raise
# apply queue statistics changes
for queue in queue_stats:
if queue_stats[queue]['modified']:
try:
self.zk.set(
path = constants.QUEUE_STATS_WAITING_SINCE_TEMPLATE.format(queue_name = queue),
value = queue_stats[queue]['waiting_since'].encode('utf-8')
)
except kazoo.exceptions.ZookeeperError:
logger.error(
'Failed to update queue {} statistics in zookeeper due to zookeeper error!'
.format(queue)
)
raise
def adjust_processing(self):
"""
Adjusts processing by switching workers between queues.
Checks for queue status and calculates priorities of queues based on current data.
Updates waiting time of queues.
"""
# time when last statistics starts to download
self.last_sample_time = datetime.datetime.now(datetime.timezone.utc)
# get queues and statistics from zookeeper
# can raise ZookeeperError
queue_zk_stats = self.zk_get_queues()
if not queue_zk_stats:
self.logger.info('No queues with configuration found!')
return
self.logger.debug('Queue zookeeper statistics:\n{}'.format(json.dumps(queue_zk_stats, indent=4)))
# list of queues defined in MQ and their stats
# can raise ConnectionError, JSONDecodeError
queue_mq_stats = self.mq_get_queues()
if not queue_mq_stats:
self.logger.info('No queues found in MQ!')
return
self.logger.debug('Queue MQ statistics:\n{}'.format(json.dumps(queue_mq_stats, indent=4)))
# get worker data / statistics
worker_stats = self.get_worker_stats()
if not worker_stats:
return
self.logger.debug('Worker statistics:\n{}'.format(json.dumps(worker_stats, indent=4)))
# get queue priorities
queue_priorities = self.get_priorities(queue_zk_stats, queue_mq_stats, worker_stats)
self.logger.debug('Calculated queue priorities:\n{}'.format(json.dumps(queue_priorities, indent=4)))
if not queue_priorities[list(queue_priorities.keys())[0]]:
self.logger.info('All queues are empty, skipping scheduling')
return
# === calculate new system state ===
# get list of free workers
free_workers = []
for worker in worker_stats:
worker_queue = worker_stats[worker]['queue']
# worker is not assigned to any queue
if not worker_queue:
free_workers.append(worker)
# worker is processing queue with no messages
elif queue_priorities[worker_queue] == 0:
free_workers.append(worker)
# switch free workers to queues
if free_workers:
self.logger.debug('Switching free workers')
for worker in free_workers:
# switch worker to queue witch highest priority
self.switch_worker_to_queue(
worker = worker,
queue = list(queue_priorities.keys())[0],
worker_stats = worker_stats,
queue_stats = queue_zk_stats
)
# recalculate priorities based on current configuration
queue_priorities = self.get_priorities(queue_zk_stats, queue_mq_stats, worker_stats)
self.logger.debug('New queue priorities:\n{}'.format(json.dumps(queue_priorities, indent=4)))
else:
# switch worker from queue witch last priority that is being processed
# to queue with highes priority (only one worker at the time)
self.logger.debug('Switching worker from less prioritized queue')
worker_switched = False
queue_priorities_reverse = dict(sorted(queue_priorities.items(), key=lambda item: item[1]))
for queue in queue_priorities_reverse:
for worker in worker_stats:
try:
unlock_time = datetime.datetime.fromisoformat(worker_stats[worker]['unlock_time'])
except Exception:
self.logger.warning('Failed to parse worker unlock time of worker {}, scheduling might be inaccurate!'.format(worker))
unlock_time = self.last_sample_time
# check if worker can switch
if (unlock_time - self.last_sample_time).total_seconds() > 0:
continue
# switch worker to prioritized queue
if worker_stats[worker]['queue'] == queue:
self.switch_worker_to_queue(
worker=worker,
queue=list(queue_priorities.keys())[0],
worker_stats=worker_stats,
queue_stats=queue_zk_stats
)
worker_switched = True
break
if worker_switched:
break
if self.dry_run:
return
# update queue stats in zookeeper
self.set_queue_to_waiting(worker_stats, queue_mq_stats, queue_zk_stats)
# apply configuration and statistic changes
self.apply_changes(worker_stats, queue_zk_stats)
def zk_callback_update_mq_server_list(self, servers):
"""
Updates MQ server list if changes
:param servers: list of servers received from zookeeper
"""
self.mq_servers_lock.acquire()
self.mq_servers_monitoring = cf.server_list(servers)
# set default ports
for server in self.mq_servers_monitoring:
if not server['port']:
server['port'] = 15672
self.mq_servers_lock.release()
def run(self):
"""
Main function of the watchdog.
:return: execution status
"""
# connect to zookeeper
try:
self.zk_connect()
except KazooTimeoutError:
self.logger.critical('Failed to connect to zookeeper!')
return 1
# register updater for mq server list
self.zk.ensure_path(constants.WORKER_CONFIG_MQ_MONITORING_SERVERS)
self.zk.ChildrenWatch(
path=constants.WORKER_CONFIG_MQ_MONITORING_SERVERS,
func=self.zk_callback_update_mq_server_list
)
# TODO
# register watchdog as main, or put it to sleep if secondary (use kazoo lease)
error_count = 0
try:
while True:
try:
self.adjust_processing()
except kazoo.exceptions.ZookeeperError:
self.logger.error('Zookeeper returned non-zero error code!')
self.logger.error('Received error:\n{}'.format(traceback.format_exc()))
except ConnectionError as e:
self.logger.error(
'{}'.format(e)
)
except json.JSONDecodeError as e:
self.logger.error('Failed to parse message queue statistics! Wrong message format!')
self.logger.error(
'Received error: {}'
.format(e)
)
except Exception as e:
self.logger.error('Unknown error has occurred!')
self.logger.error('Received error:\n{}'.format(traceback.format_exc()))
if error_count > 2:
raise
error_count += 1
else:
# reset error counter
error_count = 0
# run only once if dryrun is defined
if self.dry_run:
break
# wait for 10 seconds until next run
time.sleep(10)
except KeyboardInterrupt:
self.logger.info('Keyboard interrupt received!')
except Exception:
self.logger.error('Failed to recover!')
self.logger.error('Exiting!')
return 1
return 0
def main():
args = parse_args()
zookeeper_servers = cf.zk_server_list(args.zookeeper)
if args.zookeeper_list:
zookeeper_servers = cf.zk_server_list(args.zookeeper_list)
watchdog = WorkerWatchdog(
zookeeper_servers=zookeeper_servers,
user=args.user,
password=args.password,
dry_run=args.dry_run
)
return watchdog.run()
if __name__ == "__main__":
sys.exit(main()) |
# -*- coding: utf-8 -*-
"""
Utilities to use nilearn.image from nipype
"""
def ni2file(**presuffixes):
from functools import wraps
""" Pick the nibabel image container output from `f` and stores it in a file.
If the shape attribute of the container is True, will save it into a file, otherwise
will directly return the scalar value.
To know the path where it has to save the file it looks into argument
values in this order:
- `out_file` kwarg in the call to `f`,
- `out_file` kwarg in the `ni2file` decorator,
- the first argument in the function call.
In the last case a presuffix must be defined in the decorator to avoid overwriting
an existing file.
"""
def _pick_an_input_file(*args, **kwargs):
"""Assume that either the first arg or the first kwarg is an input file."""
if args:
return args[0]
else:
return list(kwargs.values())[0]
def nifti_out(f):
@wraps(f)
def wrapped(*args, **kwargs):
res_img = f(*args, **kwargs)
if isinstance(res_img, list):
if len(res_img) == 1:
res_img = res_img[0]
else:
return res_img
if not res_img.shape: # the result is a scalar value
return res_img.get_data().flatten()[0]
import os.path as op
from nipype.utils.filemanip import fname_presuffix
out_file = kwargs.get('out_file', presuffixes.pop('out_file', None))
if out_file is not None:
if not presuffixes and op.exists(out_file):
raise IOError('The file {} already exists, please add a presuffix to the'
'decorator.'.format(out_file))
out_file = fname_presuffix(out_file, **presuffixes)
else:
in_file = kwargs.get('in_file', None)
if in_file is None:
in_file = _pick_an_input_file(*args, **kwargs)
if not op.exists(in_file):
raise IOError('Expected an existing file to use as reference for'
' the output file name, got {}.'.format(in_file))
out_file = fname_presuffix(op.basename(in_file), **presuffixes)
if not out_file:
raise ValueError("Could not find a output file name for this function: "
" {}({}, {}).".format(f.__name__, *args, **kwargs))
res_img.to_filename(out_file)
return op.abspath(out_file)
return wrapped
return nifti_out
@ni2file(out_file='nilearn_maths.nii.gz')
def math_img(formula, out_file='', **imgs):
""" Use nilearn.image.math_img.
This function in addition allows imgs to contain numerical scalar values.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import numpy as np
import nilearn.image as niimg
from six import string_types
for arg in list(imgs.keys()):
if isinstance(imgs[arg], string_types):
continue
if np.isscalar(imgs[arg]):
if arg not in formula:
raise ValueError("Could not find {} in the formula: {}.".format(arg, formula))
formula = formula.replace(arg, str(imgs[arg]))
imgs.pop(arg)
return niimg.math_img(formula=formula, **imgs)
@ni2file(suffix='_resampled')
def resample(in_file, **kwargs):
""" Use nilearn.image.resample_img.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
return niimg.resample_img(img=in_file, **kwargs)
@ni2file(suffix='_resampled')
def resample_to_img(in_file, target, **kwargs):
""" Use nilearn.image.resample_to_img.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
return niimg.resample_to_img(source_img=in_file, target_img=target, **kwargs)
@ni2file(out_file='concat_img.nii.gz')
def concat_imgs(in_files, out_file=None):
""" Use nilearn.image.concat_imgs to concat images of up to 4 dimensions.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
return niimg.concat_imgs(in_files)
@ni2file(out_file='concat_img.nii.gz')
def concat_3D_imgs(in_files, out_file=None):
""" Use nilearn.image.concat_imgs to concat 3D volumes into one 4D volume.
If `in_files` is a list of 3D volumes the return value is the path to one 4D volume.
Else if `in_files` is a list of 4D volumes the return value is `in_files`.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
from nilearn._utils import check_niimg_3d
all_3D = True
for idx, img in enumerate(in_files):
try:
_ = check_niimg_3d(img)
except Exception:
all_3D = False
break
if not all_3D:
#raise AttributeError('Expected all input images to be 3D volumes, but '
# ' at least the {}th is not.'.format(idx))
return in_files
else:
return niimg.concat_imgs(in_files)
@ni2file(suffix='_mean')
def mean_img(in_file, out_file=None):
""" Use nilearn.image.mean_img.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
return niimg.mean_img(in_file)
@ni2file(suffix='_smooth')
def smooth_img(in_file, fwhm, out_file=None):
""" Use nilearn.image.smooth_img.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
return niimg.smooth_img(in_file, fwhm=fwhm)
@ni2file(suffix='')
def copy_header(in_file, data_file, out_file=None):
""" Use nilearn.image.new_img_like to copy the header
from `in_file` to `data_file` and return the result.
Returns
-------
out_file: str
The absolute path to the output file.
"""
import nilearn.image as niimg
img = niimg.load_img(data_file)
return niimg.new_img_like(in_file, img.get_data(),
affine=img.affine, copy_header=True) |
from .Specification import Specification, TypedProperty
__all__ = ['RelationshipSpecification', 'ParentGroupingRelationship',
'WorkflowRelationship']
class RelationshipSpecification(Specification):
"""
RelationshipSpecifications are used mainly with \ref
python.implementation.ManagerInterfaceBase.ManagerInterfaceBase.getRelatedReferences
"ManagerInterface.getRelatedReferences", in order to describe the kind of
relation that is being requested, when a simply EntitySpecification will not
suffice.
"""
_prefix = "core.relationship"
class ParentGroupingRelationship(RelationshipSpecification):
"""
This relationship can be used to query the organisational parent of any given
entity. If called with an asset, or a version, etc.. it should give the Shot,
Sequence or other part of the hierarchy under which the Entity resides. If
called with some other group entity, then it should return the logical parent
of that group. For example, a Shot may return a Sequence, etc...
This is essential to allow cross-discipline publishing to take place. For
example, to determine the Shot that an asset resides under, so that a
different kind of asset can be published to the same place.
If you're asset system adds intermediate sub-groups underneath something
logically analgous to a 'Shot', (for example a 'compositing task', etc..)
then this should not be considered when determining the 'parent'.
Alternatively, if you do consider it, you may need additional logic in
'register' to verify the target task is suitable for the published asset.
An example being an ImageSpecification asset published under an 'editorial'
task type, under a Shot, may use this query to find the shot that a Nuke
Script should be published too to perform 'comp work' for the Shot.
"""
_type = "grouping.parent"
## @todo think of a better name
class WorkflowRelationship(RelationshipSpecification):
"""
A workflow relationship is used to build tracks of related media, etc... in
timeline contexts. The relationship is defined by a criteria string, (usually
supplied by a Manager UI element), that describes the relationship. For
example, it might be 'latest approved comps, by Bob'.
"""
_type = "workflow"
criteria = TypedProperty(str, doc="A description of the relationship that "
+"makes sense to the manager. This is generally derived from the "
+"manager itself, so could be any serialised/understood string.")
|
from .shield import Shield |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
from subprocess import Popen, PIPE
def run_command(cmd, cwd=None, env=None, throw=True, verbose=False, print_errors=True):
def say(*args):
if verbose:
print(*args)
say(cmd)
if not isinstance(cmd, list):
cmd = cmd.split()
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd, env=env)
result, err = process.communicate()
if not isinstance(result, str):
result = ''.join(map(chr, result))
result = result.strip()
say(result)
if process.returncode != 0:
if not isinstance(err, str):
err = ''.join(map(chr, err))
err_msg = ' '.join(cmd) + ' finished with error ' + err.strip()
if throw:
raise RuntimeError(err_msg)
elif print_errors:
print(err_msg)
return result, process.returncode
|
#!/usr/bin/env python3
from sys import argv, stderr, stdout
from bakery import render_path
if len(argv) != 2:
stderr.write("Usage: render.py NAME\n")
exit(1)
stdout.write(render_path(argv[1]))
|
#!/usr/bin/python3
# Convert a binary to a fake "standard" assembler file
import sys
import struct
with open(sys.argv[1], "rb") as f:
code = f.read()
for i in range(0, len(code), 2):
print(" data $%04x" %struct.unpack(">H", code[i:i+2]))
|
# 旋转数组的最小数字
# 把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。
# 输入一个非递减排序的数组的一个旋转,输出旋转数组的最小元素。
# 例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。
# NOTE:给出的所有元素都大于0,若数组大小为0,请返回0。
class Solution:
def minNumberInRotateArray(self, rotateArray):
minNum = 0
# 第一种方法,就是遍历所以的元素,找出最小的
for i in range(0, len(rotateArray)):
minNum = minNum if minNum < rotateArray[i] and minNum != 0 else rotateArray[i]
return minNum
# 二分查找法
# 有序的数组中使用
def bSearch(self, array, target):
left = 0
right = len(array) - 1
while left < right:
# 右移1位,相当于除以2
mid = (left + right) >> 1
if target == mid:
return mid
if target > mid:
left = mid + 1
else:
right = mid - 1
return None
# 有序的数组中使用
def minNumberInRotateArray2(self, rotateArray):
if not rotateArray:
return None
left = 0
right = len(rotateArray) - 1
while left <= right:
middle = (left + right) >> 1
# middle 比两边的都小,说明是最小值
if rotateArray[middle] < rotateArray[middle - 1]:
return rotateArray[middle]
elif rotateArray[middle] < rotateArray[right]:
right = middle - 1
else:
left = middle + 1
return 0
# 二分查找法(以下代码错误)
# 有序的数组中使用
def minNumberInRotateArray3(self, rotateArray):
if not rotateArray:
return None
left = 0
right = len(rotateArray) - 1
while left < right:
middle = (left + right) >> 1
# middle 比两边的都小,说明是最小值
if rotateArray[middle] < rotateArray[middle - 1]:
return rotateArray[middle]
elif abs(rotateArray[left] - rotateArray[middle]) < abs(rotateArray[right] - rotateArray[middle]):
left = middle + 1
elif abs(rotateArray[left] - rotateArray[middle]) > abs(rotateArray[right] - rotateArray[middle]):
right = middle - 1
return 0
if __name__ == '__main__':
print(Solution().minNumberInRotateArray2([3,4,5,2,3,6,7,8])) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from redis import ConnectionPool, StrictRedis
class RedisQ(object):
redis_client = StrictRedis
def __init__(self, connection_pool=None, url=None, **connection_params):
if url:
connection_pool = ConnectionPool.from_url(
url, decode_components=True
)
elif connection_pool is None:
connection_pool = ConnectionPool(**connection_params)
self.pool = connection_pool
self.conn = self.redis_client(connection_pool=connection_pool)
|
from importlib import import_module
import logging
import os
import subprocess
import sys
from typing import Any, Sequence
from alembic import script
from alembic.migration import MigrationContext
from flogging import flogging
from mako.template import Template
from sqlalchemy import any_, create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import operators, Values
from sqlalchemy.sql.compiler import OPERATORS
from sqlalchemy.sql.elements import BinaryExpression, BindParameter, Grouping
from sqlalchemy.sql.operators import ColumnOperators, in_op, notin_op
from athenian.precomputer.db import always_unequal, create_base # noqa: F401
class TupleWrapper(Sequence):
"""Pretend to be a sequence, wrap each element in a tuple."""
__slots__ = ("_items",)
def __init__(self, items: Sequence):
"""Initialize a new instance of TupleWrapper over `items`."""
self._items = items
def __len__(self):
"""Return the length of the underlying sequence."""
return len(self._items)
def __getitem__(self, item: int) -> Any:
"""Return element by index wrapped in a tuple."""
return (self._items[item],)
@compiles(BinaryExpression)
def compile_binary(binary, compiler, override_operator=None, **kw):
"""
If there are more than 10 elements in the `IN` set, inline them to avoid hitting the limit of \
the number of query arguments in Postgres (1<<15).
""" # noqa: D200
operator = override_operator or binary.operator
if operator is not in_op and operator is not notin_op:
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
if isinstance(binary.right, BindParameter):
right_len = len(binary.right.value)
else:
right_len = 0
if right_len >= 10:
left = compiler.process(binary.left, **kw)
kw["literal_binds"] = True
use_any = getattr(binary, "any_values", False) and compiler.dialect.name == "postgresql"
negate = use_any and operator is notin_op
if use_any:
# ANY(VALUES ...) seems to be performing the best among these three:
# 1. IN (...)
# 2. IN(ARRAY[...])
# 3. IN(VALUES ...)
right = any_(Grouping(Values(
binary.left, literal_binds=True,
).data(TupleWrapper(binary.right.value))))
operator = operators.eq
else:
right = binary.right
right = compiler.process(right, **kw)
sql = left + OPERATORS[operator] + right
if negate:
sql = "NOT (%s)" % sql
return sql
elif operator is in_op and right_len == 1:
# IN (<value>) -> = <value>
return compiler.process(binary.left == binary.right.value[0], **kw)
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
def in_any_values(self: ColumnOperators, other):
"""Implement = ANY(VALUES (...), (...), ...) PostgreSQL operator."""
expr = self.in_(other)
expr.any_values = True
return expr
def notin_any_values(self: ColumnOperators, other):
"""Implement NOT = ANY(VALUES (...), (...), ...) PostgreSQL operator."""
expr = self.notin_(other)
expr.any_values = True
return expr
ColumnOperators.in_any_values = in_any_values
ColumnOperators.notin_any_values = notin_any_values
flogging.trailing_dot_exceptions.add("alembic.runtime.migration")
class DBSchemaMismatchError(Exception):
"""Error raised if the DB schema versions do not match."""
def check_alembic_schema_version(name: str, conn_str: str, log: logging.Logger) -> None:
"""Raise DBSchemaVersionMismatchError if the real (connected) DB schema version \
does not match the required (declared in the code) version."""
template = import_module("%s.%s" % (__package__, name)).template
directory = script.ScriptDirectory(str(template.parent))
engine = create_engine(conn_str.split("?", 1)[0])
with engine.begin() as conn:
context = MigrationContext.configure(conn)
real_rev = context.get_current_revision()
req_rev = directory.get_current_head()
if real_rev != req_rev:
raise DBSchemaMismatchError(
"%s version: required: %s connected: %s" % (conn_str, req_rev, real_rev))
log.info("%s DB schema version: %s", name, real_rev)
def check_collation(conn_str: str) -> None:
"""Force the PostgreSQL collation to be "C"."""
engine = create_engine(conn_str.split("?", 1)[0])
if engine.dialect.name != "postgresql":
return
collation = engine.scalar(
"select datcollate from pg_database where datname='%s';" % engine.url.database)
if collation.lower() != "c.utf-8":
raise DBSchemaMismatchError(
"%s collation: required: C.UTF-8 connected: %s" % (conn_str, collation))
def migrate(name: str, url=None, exec=True):
"""
Migrate a database with alembic.
This script creates all the tables if they don't exist and migrates the DB to the most
recent version. It is to simplify the deployment.
As a bonus, you obtain a functional Alembic INI config for any `alembic` commands.
"""
root = import_module("%s.%s" % (__package__, name))
template_file_name = root.template
path = template_file_name.parent
with open("alembic.ini", "w") as fout:
fout.write(Template(filename=str(template_file_name)).render(url=url, path=path))
args = [sys.executable, sys.executable, "-m", "athenian.api.sentry_wrapper",
"alembic.config", "upgrade", "head"]
if os.getenv("OFFLINE"):
args.append("--sql")
if exec:
os.execlp(*args)
else:
subprocess.run(args[1:], check=True)
|
from common import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django_mongodb_engine', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'intranet_test', # Or path to database file if using sqlite3.
}
}
COMPRESS_ENABLED = True
COMPRESS_REBUILD_TIMEOUT = 1
CELERY_ALWAYS_EAGER = True
INSTALLED_APPS.append('devserver')
|
import logging
import os
import sys
import time
from typing import Union
from io import IOBase
from .base import Client
from tftpy.shared import TIMEOUT_RETRIES
from tftpy.packet import types
from tftpy.exceptions import TftpException,TftpTimeout,TftpFileNotFoundError
from tftpy.states import SentReadRQ,SentWriteRQ
logger = logging.getLogger('tftpy.context.client')
class Upload(Client):
"""The upload context for the client during an upload.
Note: If input is a hyphen, then we will use stdin."""
def __init__(self, host: str, port: int, timeout: int,
input: Union[IOBase,str], **kwargs) -> None:
"""Upload context for uploading data to a server.
Args:
host (str): Server Address
port (int): Server Port
timeout (int): socket timeout
input ([IOBase,str]): Input data, can be one of
- An open file object
- A path to a file
- a '-' indicating read from STDIN
"""
super().__init__(host, port, timeout, **kwargs)
# If the input object has a read() function, assume it is file-like.
if hasattr(input, 'read'):
self.fileobj = input
elif input == '-':
self.fileobj = sys.stdin
else:
self.fileobj = open(input, "rb")
logger.debug("tftpy.context.client.upload.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Main loop to read data in and send file to the server."""
logger.info(f"Sending tftp upload request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.WriteRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentWriteRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise
else:
logger.warning("resending last packet")
self.state.resend_last()
def end(self, *args):
"""Finish up the context."""
super().end()
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
class Download(Client):
"""The download context for the client during a download.
Note: If output is a hyphen, then the output will be sent to stdout."""
def __init__(self, host: str, port: int, timeout: int,
output: Union[IOBase,str], **kwargs) -> None:
"""Initalize the Download context with the server and
where to save the data
Args:
host (str): Server Address
port (int): Server port
timeout (int): Socket Timeout
output (Union[IOBase,str]): Output data, can be one of
- An open file object
- A path to a file
- '-' indicating write to STDOUT
Raises:
TftpException: unable to open the destiation file for writing
"""
super().__init__(host, port, timeout, **kwargs)
self.filelike_fileobj = False
# If the output object has a write() function, assume it is file-like.
if hasattr(output, 'write'):
self.fileobj = output
self.filelike_fileobj = True
# If the output filename is -, then use stdout
elif output == '-':
self.fileobj = sys.stdout
self.filelike_fileobj = True
else:
try:
self.fileobj = open(output, "wb")
except OSError as err:
raise TftpException("Could not open output file", err)
logger.debug("tftpy.context.client.Download.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Initiate the download.
Raises:
TftpTimeout: Failed to connect to the server
TftpFileNotFoundError: Recieved a File not fount error
"""
logger.info(f"Sending tftp download request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.ReadRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentReadRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise TftpTimeout("Max retries reached")
else:
logger.warning("resending last packet")
self.state.resend_last()
except TftpFileNotFoundError as err:
# If we received file not found, then we should not save the open
# output file or we'll be left with a size zero file. Delete it,
# if it exists.
logger.error("Received File not found error")
if self.fileobj is not None and not self.filelike_fileobj and os.path.exists(self.fileobj.name):
logger.debug(f"unlinking output file of {self.fileobj.name}")
os.unlink(self.fileobj.name)
raise TftpFileNotFoundError(err)
def end(self) -> None:
"""Finish up the context."""
super().end(not self.filelike_fileobj)
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute() |
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
import sys
import third_party
from util import run_output, build_path
out_filename = sys.argv[1]
args_list = run_output([
third_party.gn_path, "args",
build_path(), "--list", "--short", "--overrides-only"
],
quiet=True,
env=third_party.google_env(),
exit_on_fail=True).out
with open(out_filename, "w") as f:
f.write(args_list)
|
# Copyright (c) 2019 Nikita Tsarev
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains code for exporting to ejudge."""
import os
import sys
import io
import tarfile
import base64
import shlex
from shutil import copy2
TEST_PAT = "%02d"
CORR_PAT = "%02d.a"
CHECK_CMD = "check"
PATCHER = """#!/bin/sh
ME=$(readlink -f "$0")
DIR=$(dirname "$ME")
SRC="$DIR/contest.cfg"
TGT="$DIR/conf/serve.cfg"
sed '/PYGON_CONTEST_START/,/PYGON_CONTEST_END/d' "$TGT" > "$TGT.new"
echo >> "$TGT.new"
cat "$SRC" >> "$TGT.new"
mv "$TGT.new" "$TGT"
"""
def generate_config(problem, language=None, prefix=None):
f = io.StringIO()
print("[problem]", file=f)
if problem.input_file.stdio:
print("use_stdin = 1", file=f)
else:
print("use_stdin = 0", file=f)
print("input_file = \"{}\"".format(problem.input_file), file=f)
if problem.output_file.stdio:
print("use_stdout = 1", file=f)
else:
print("use_stdout = 0", file=f)
print("output_file = \"{}\"".format(problem.output_file), file=f)
print("use_corr = 1", file=f)
print("enable_testlib_mode = 1", file=f)
print("time_limit_millis = {}".format(round(1000 * problem.time_limit)), file=f)
mem_limit = "{}M".format(round(problem.memory_limit))
print("max_vm_size = {}".format(mem_limit), file=f)
print("max_stack_size = {}".format(mem_limit), file=f)
if prefix:
print("short_name = \"{}\"".format(prefix), file=f)
for i in problem.get_statements():
if i.language == language or language is None:
print("long_name = \"{}\"".format(i.name), file=f)
break
print("internal_name = \"{}\"".format(problem.internal_name), file=f)
print("test_pat = \"{}\"".format(TEST_PAT), file=f)
print("corr_pat = \"{}\"".format(CORR_PAT), file=f)
print("check_cmd = \"{}\"".format(CHECK_CMD), file=f)
f.seek(0)
return f.read()
def export_problem(problem, target, language=None, prefix=None):
"""Exports problem to target directory."""
os.makedirs(target, exist_ok=True)
os.makedirs(os.path.join(target, "tests"), exist_ok=True)
with open(os.path.join(target, "problem.cfg"), "w") as f:
f.write(generate_config(problem, language=language, prefix=prefix))
copy2(problem.active_checker.get_executable_path(),
os.path.join(target, CHECK_CMD))
main = problem.get_main_solution()
for test in problem.get_solution_tests():
copy2(test.get_input_path(),
os.path.join(target, "tests", TEST_PAT % test.index))
copy2(test.get_output_path(main.identifier),
os.path.join(target, "tests", CORR_PAT % test.index))
def export_contest(contest, target, language=None):
"""Exports contest to target directory."""
os.makedirs(target, exist_ok=True)
os.makedirs(os.path.join(target, "problems"), exist_ok=True)
for prefix, problem in contest.problems:
export_problem(problem,
os.path.join(target, "problems", problem.internal_name),
language=language, prefix=prefix)
last_id = 0
with open(os.path.join(target, "contest.cfg"), "w") as f:
print("# PYGON_CONTEST_START", file=f)
for prefix, problem in contest.problems:
cfg = generate_config(problem, language=language, prefix=prefix)
f.write(cfg)
last_id += 1
print("id = {}".format(last_id), file=f)
print(file=f)
print("# PYGON_CONTEST_END", file=f)
with open(os.path.join(target, "patch.sh"), "w") as f:
f.write(PATCHER)
def write_script(target, contest_dir=None, fd=sys.stdout):
archive = io.BytesIO()
with tarfile.open(fileobj=archive, mode="w:gz") as f:
f.add(target, arcname=".")
if contest_dir:
print("""#!/bin/sh
OLDPATH="$(pwd)"
cd %s
""" % shlex.quote(contest_dir), file=fd)
else:
print("""#!/bin/sh
if [ "x$1" = "x" ]; then
echo "Usage: $0 <contest directory>"
exit 1
fi
OLDPATH="$(pwd)"
cd "$1"
""", file=fd)
print("""
rm -rf problems contest.cfg patch.sh
cat << _EOF | base64 -d | tar xz""", file=fd)
archive.seek(0)
print(base64.b64encode(archive.read()).decode(), file=fd)
print("""_EOF
sh ./patch.sh
cd "$OLDPATH"
""", file=fd)
|
from django.test import TestCase
from _apis.models import Telegram
from _setup.models import Config
from random import randint
class TelegramTestCase(TestCase):
def test_message(self):
if Telegram().setup_done:
messages = Config('UNITTESTS.TELEGRAM_TEST_MESSAGES').value
selected_message = randint(0, len(messages)-1)
response = Telegram().message(messages[selected_message])
self.assertEqual(response, True)
|
Labels = (
('help_wanted', 'Help Wanted'),
('idea', 'Idea'),
('something_interesting', 'Something Interesting'),
)
|
from celery import Celery # noqa
"""
app = Celery('tasks', broker='pyamqp://guest@localhost//')
@app.task
def add(x, y):
return x + y
"""
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from model_bakery import baker
from game_hub.accounts.models import GameHubUser
from game_hub.games.models import Game, Comment, LikeGame
class TestGameCreateModel(TestCase):
def test_game_title_validator_only_letters_numbers_and_underscore_with_valid_data(self):
game = baker.make(Game)
game.title = 'Wow'
self.assertEqual('Wow', game.title)
def test_game_title_validator_only_letters_numbers_and_underscore_with_invalid_data_contain_sign_percent(self):
game = baker.make(Game)
game.title = 'Wow%'
with self.assertRaises(ValidationError) as context:
game.full_clean()
game.save()
self.assertIsNotNone(context.exception)
class TestCommentModel(TestCase):
def setUp(self) -> None:
self.test_user = GameHubUser(
email='[email protected]',
password=123
)
def test_create_comment_with_valid_data(self):
comment = Comment(
comment='Hello'
)
self.assertEqual('Hello', comment.comment)
def test_create_comment_with_invalid_data_contain_sign_percent(self):
comment = Comment(
comment='Hello%',
user=self.test_user)
with self.assertRaises(ValidationError) as context:
comment.full_clean()
comment.save()
self.assertIsNotNone(context.exception)
class TestLikeModel(TestCase):
def setUp(self) -> None:
self.test_user = GameHubUser(
email='[email protected]',
password=123
)
self.test_game = Game(
title='Wow',
category='Action',
max_level=5,
description='!!!',
)
def test_create_like(self):
like = LikeGame(
user=self.test_user,
)
self.assertIsNotNone(like)
|
from collections import defaultdict
from random import randint
from linked_list import LinkedList
import numpy as np
class Generator(object):
demand_bound = None
dmin = None
dmax = None
omin = None
omax = None
options = None
time_span = None
demand = None
number_of_different_solutions = None
possibles = None
already_generated = None
def __init__(self):
self.options = defaultdict(list)
self.possibles = list()
self.already_generated = list()
def generate_demand(self, timespan, demand_bound):
result = list()
for day in range(0, timespan):
result.append(randint(0, demand_bound))
return result
def generate_parameters(self, timespan=None, offdaysmin=None, offdaysmax=None, ondaysmin=None, ondaysmax=None, demand=None):
dummy = [5, 7, 14]
timespan = dummy[randint(0, len(dummy) - 1)] if timespan is None else timespan
offdaysmin = randint(0, timespan) if offdaysmin is None else offdaysmin
if offdaysmin == 0:
offdaysmax = randint(1, timespan) if offdaysmax is None else offdaysmax
else:
offdaysmax = randint(offdaysmin, timespan) if offdaysmax is None else offdaysmax
ondaysmin = randint(0, timespan) if ondaysmin is None else ondaysmin
if ondaysmin == 0:
ondaysmax = randint(1, timespan) if ondaysmax is None else ondaysmax
else:
ondaysmax = randint(ondaysmin, timespan) if ondaysmax is None else ondaysmax
demand_bound = 2*timespan
demand = self.generate_demand(timespan, demand_bound) if demand is None else demand
return timespan, offdaysmin, offdaysmax, ondaysmin, ondaysmax, demand
def get_number_of_different_solutions(self):
if self.number_of_different_solutions is None:
self.number_of_different_solutions = 1
for index, demand_day in enumerate(self.demand):
number_of_options = len(self.options[index])
power = pow(number_of_options, demand_day)
self.number_of_different_solutions *= power
return self.number_of_different_solutions
def filter_and_set_options(self, options):
for option in options:
for col in range(0, len(option)):
if option[col] == 1:
self.options[col].append(option)
def generate_initial_working_schedules(self, length, last_number=None, consecutive_numbers=0):
if length == 0:
return []
if last_number is None:
return [0, self.generate_initial_working_schedules(length - 1, 0, 1)], \
[1, self.generate_initial_working_schedules(length - 1, 1, 1)]
if last_number == 1:
if consecutive_numbers < self.dmin:
return [1, self.generate_initial_working_schedules(length - 1, 1, consecutive_numbers + 1)]
elif consecutive_numbers >= self.dmax:
return [0, self.generate_initial_working_schedules(length - 1, 0, 1)]
else:
return [1, self.generate_initial_working_schedules(length - 1, 1, consecutive_numbers + 1)], \
[0, self.generate_initial_working_schedules(length - 1, 0, 1)]
elif last_number == 0:
if consecutive_numbers < self.omin:
return [0, self.generate_initial_working_schedules(length - 1, 0, consecutive_numbers + 1)]
elif consecutive_numbers >= self.omax:
return [1, self.generate_initial_working_schedules(length - 1, 1, 1)]
else:
return [0, self.generate_initial_working_schedules(length - 1, 0, consecutive_numbers + 1)], \
[1, self.generate_initial_working_schedules(length - 1, 1, 1)]
def generate_solution_id(self, solution):
thing = solution.values
larger = ""
for t in thing:
larger += str(t)
larger = larger.replace(' ', '').replace('[', '').replace(']', '').replace('\n', '')
fingerprint = int(larger, 2)
return fingerprint
def is_valid(self, permutation):
max_conseq_ones = 0
min_conseq_ones = np.inf
max_conseq_zeros = 0
min_conseq_zeros = np.inf
conseq_ones = 0
conseq_zeros = 0
previous_element = None
for element in permutation:
if element == 0:
conseq_zeros += 1
else:
conseq_ones += 1
if previous_element is not None and element != previous_element:
if previous_element == 1:
if max_conseq_ones < conseq_ones:
max_conseq_ones = conseq_ones
if min_conseq_ones > conseq_ones:
min_conseq_ones = conseq_ones
conseq_ones = 0
else:
if max_conseq_zeros < conseq_zeros:
max_conseq_zeros = conseq_zeros
if min_conseq_zeros > conseq_zeros:
min_conseq_zeros = conseq_zeros
conseq_zeros = 0
previous_element = element
if max_conseq_ones < conseq_ones:
max_conseq_ones = conseq_ones
if min_conseq_ones > conseq_ones > 0:
min_conseq_ones = conseq_ones
if max_conseq_zeros < conseq_zeros:
max_conseq_zeros = conseq_zeros
if min_conseq_zeros > conseq_zeros > 0:
min_conseq_zeros = conseq_zeros
bool1 = min_conseq_zeros >= self.omin
bool2 = max_conseq_zeros <= self.omax
bool3 = min_conseq_ones >= self.dmin
bool4 = max_conseq_ones <= self.dmax
return bool1 and bool2 and bool3 and bool4
def flatten(self, element, ll):
if isinstance(element, list):
if len(element) == 0:
res = ll.couple()
self.possibles.append(res)
else:
head = element[0]
tail = element[1]
ll.add(head)
self.flatten(tail, ll)
elif isinstance(element, tuple):
# splits de linked list
fst = element[0]
snd = element[1]
newll1 = LinkedList(ll)
newll2 = LinkedList(ll)
self.flatten(fst, newll1)
self.flatten(snd, newll2)
elif isinstance(element, int):
ll.add(element) |
from math import gcd
def lcm(*num):
num = list(num)
if str(type(num[0])) == "<class 'list'>":
num = num[0]
len_num = len(num)
if len_num < 2:
return None
elif len_num == 2:
a = num[0]
b = num[1]
return int(a * b / gcd(a, b))
else:
num[0] = lcm(num[0], num[-1])
num.pop(-1)
return lcm(num)
# Puzzle Input
with open('Day13_Input.txt') as puzzle_input:
bus_info = puzzle_input.read().split('\n')
# Get the departure time and the IDs
bus_id = bus_info[1].split(',')
# Get the indexes for the buses, they have to leave in timestamp t such that: t + index = k * ID, where k is a integer
delta_t = []
for index, ID in enumerate(bus_id):
if ID != 'x':
delta_t += [index]
# Remove the x's
while 'x' in bus_id:
bus_id.remove('x')
# Convert the IDs to integers
bus_id = list(map(int, bus_id))
# Find the timestamp that satisfies all the requirements
time_add = bus_id[0]
timestamp = 0
for index, ID in enumerate(bus_id[1:]):
while (timestamp + delta_t[index + 1]) % ID != 0: # While the timestamp doesn't work for the next bus:
timestamp += time_add # Add to the timestamp a value that will mantain the solution for the previous buses
time_add = lcm(time_add, ID) # That value must be a multiple of the IDs of the buses for which we have solutions,
# because that way we keep the solution: t + index = k * ID, where k is a integer
print(timestamp) # (because time_add is a multiple of all solved IDs)
|
import pandas as pd
#날짜 형식의 문자열로 구성되는 리스트 정의
dates = ['2019-01-01','2020-03-01','2021-06-01']
#문자열 배열을 판다스 Timestamp로 변환
ts_dates = pd.to_datetime(dates)
print(ts_dates)
#Timestamp를 Period 로 변환
pr_day = ts_dates.to_period(freq='D')
print(pr_day)
pr_month = ts_dates.to_period(freq='M')
print(pr_month)
pr_year = ts_dates.to_period(freq='A')
print(pr_year) |
__id__ = "$Id: orthogonality.py 155 2007-08-31 20:11:54Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 155 $"
__date__ = "$Date: 2007-08-31 14:11:54 -0600 (Fri, 31 Aug 2007) $"
"""This script will investigate how orthogonal my basis vectors really are. """
import random
import math
import time
import scipy
import Gnuplot
import fissionBank
import fissionSource
import Markov
import Geometry
import CrossSection
import arnoldiMC
import powerMC
class Ortho(object):
def __init__(self, iterations, histories):
self.halfwidth = 0.5
self.iterations = iterations
self.histories = histories
self.xs = CrossSection.CrossSection(xS=0.5, nu=1.0, xF=0.5, xG=0.0)
def Orthogonal(self, hw, bins):
"""
Orthogonal will run Arnoldi's method and determine if the basis vectors
are orthogonal.
hw: halfwidth of geometry
bins: number of spatial bins
"""
geo = Geometry.Geometry(bins, [[-hw,hw]])
mark = Markov.Markov(geo, self.xs, self.histories)
amc = arnoldiMC.arnoldiMC(mark)
uSource = fissionSource.histogramSource(scipy.ones(bins))
Values, Vectors = amc.ERAM(uSource, 5, self.iterations)
# Values, Vectors = amc.arnoldi(uSource, self.iterations)
n = len(amc.Q)
O = scipy.zeros((n,n))
for i in xrange(n):
for j in xrange(n):
O[i,j] = scipy.dot(amc.Q[i], amc.Q[j])
print "Orthogonality:"
amc.printM(O)
if __name__ == "__main__":
bins = 100
iterations = 15
histories = 1000
Chart = Gnuplot.Gnuplot()
Chart.xlabel('Iteration')
Chart.ylabel('Estimated dominant eigenvalue')
Chart.title('Histories = %i' %(histories))
Chart('set logscale x')
Chart('set yrange [0.4:]')
Ort = Ortho(iterations, histories)
Ort.Orthogonal(0.5, bins)
|
x = 13
print(x.bit_length())
print(x.__doc__)
print(x.__int__())
|
from ibai_exceptions import ExistingAuctionException, AuctionException, CategoryException
class Category:
def __init__(self, name):
"""Initialize a new category
:param name: name of the category
"""
if len(name) < 1:
raise Exception("Invalid Name")
self.name = name
self.auctions = []
def add_auction(self, auction):
"""Add an auction to the category
:param auction: Auction object to be added
"""
if auction in self.auctions:
raise AuctionException("Auction " + auction.name + " already exists")
else:
self.auctions.append(auction)
def del_auction(self, auction):
self.auctions.remove(auction)
def search_auction(self, name):
"""
Search for an auction in this category
:param name: name of the auction
:return: Auction object
"""
for auction in self.auctions:
if auction.name == name:
return auction
raise ExistingAuctionException("Auction not found")
@staticmethod
def search_category(cat_list, name):
"""Search for a category by name in the list
:param cat_list: list of categories
:param name: name of the category to match
:return: Category object
"""
name = name
try:
if name not in cat_list:
raise CategoryException("Category not found")
return cat_list[name]
except KeyError, e:
raise CategoryException(str(e))
|
import connexion
from flask_cors import CORS
from os import environ
from nova_api import create_api_files
debug = environ.get('DEBUG') or '0'
if debug == '0':
debug = False
elif debug == '1':
debug = True
port = int(environ.get('PORT')) if environ.get('PORT') else 80
ENTITIES = environ.get('ENTITIES') or ''
ENTITIES = [entity.strip() for entity in ENTITIES.split(',')]
APIS = environ.get('APIS') or ''
APIS = [api.strip() for api in APIS.split(',')]
VERSION = environ.get('VERSION') or '1'
for entity in ENTITIES:
if entity == '':
continue
dao_class = entity + 'DAO'
mod = __import__(dao_class, fromlist=[dao_class])
entity_dao = getattr(mod, dao_class)
mod = __import__(entity, fromlist=[entity])
entity_class = getattr(mod, entity)
create_api_files(entity_class, entity_dao, VERSION)
dao = entity_dao()
dao.create_table_if_not_exists()
print("Done creating table and api files for {ent}".format(ent=entity))
# Create the application instance
app = connexion.App(__name__, specification_dir=".")
CORS(app.app)
for entity in ENTITIES:
if entity == '':
continue
dao_class = entity + 'DAO'
mod = __import__(dao_class, fromlist=[dao_class])
entity_dao = getattr(mod, dao_class)
mod = __import__(entity, fromlist=[entity])
entity_class = getattr(mod, entity)
create_api_files(entity_class, entity_dao, VERSION)
dao = entity_dao()
dao.create_table_if_not_exists()
print("Done creating table and api files for {ent}".format(ent=entity))
app.add_api(entity.lower() + "_api.yml")
print("Done adding api for {ent}".format(ent=entity))
for api in APIS:
if api == '':
continue
app.add_api(api)
print("Done adding api {api}".format(api=api))
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
app.run(debug=debug, port=port)
|
import subprocess
for i in range(87,119):
for j in range(1,4):
i = i%100
url = f"https://stepdatabase.maths.org/database/db/{i:02}/{i:02}-S{j}.pdf"
subprocess.run(["wget", url])
for i in range(87,119):
for j in range(1,4):
i = i%100
url = f"https://stepdatabase.maths.org/database/db/{i:02}/{i:02}-S{j}.tex"
subprocess.run(["wget", url])
|
from guizero import App, Box, Text, PushButton, Combo, Slider
def switch_screen(switch_to):
hide_all()
switch_to.show()
def hide_all():
for screen in all_screens:
screen.hide()
app = App("Multi box app", layout="grid")
# Create a blank list to hold all the different screens
all_screens = []
# Create a box to contain the menu buttons
menu = Box(app, grid=[0,0], layout="grid")
menu.tk.width = 900
menu.bg = "red"
# Option 1 box
option1 = Box(app, grid=[1,1])
text1 = Text(option1, text="This is the first page of stuff")
combo = Combo(option1, options=["Beef", "Chicken", "Fish", "Vegetarian"])
all_screens.append(option1)
# Option 2 box
option2 = Box(app, grid=[1,1])
text2 = Text(option2, text="This is the second page of stuff")
slider = Slider(option2)
all_screens.append(option2)
# Add the screens to the menu box
option1_button = PushButton(menu, text="Option 1", command=switch_screen, args=[option1], grid=[0,0], align="left")
option2_button = PushButton(menu, text="Option 2", command=switch_screen, args=[option2], grid=[1,0], align="left")
# Hide all screens and then show the first one
hide_all()
all_screens[0].show()
app.display()
|
import os
import os.path
import copy
import botocore.exceptions
from boto import cloudformation, sts
from troposphere import Parameter
from .template import Template
from . import cli
from . import resources as res
from fnmatch import fnmatch
from . import utility
from . import monitor
import json
TIMEOUT = 60
class ValidationError(Exception):
pass
class EnvConfig(object):
def __init__(self, config_handlers=None):
self.config_handlers = config_handlers if config_handlers else []
# self.stack_event_handlers = stack_event_handlers if stack_event_handlers else []
# self.deploy_handlers = deploy_handlers if deploy_handlers else {}
class EnvironmentBase(object):
"""
EnvironmentBase encapsulates functionality required to build and deploy a network and common resources for object storage within a specified region
"""
def __init__(self,
view=None,
env_config=EnvConfig(),
config_filename=(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0]),
config_file_override=None):
"""
Init method for environment base creates all common objects for a given environment within the CloudFormation
template including a network, s3 bucket and requisite policies to allow ELB Access log aggregation and
CloudTrail log storage.
:param view: View object to use.
:param create_missing_files: Specifies policy to use when local files are missing. When disabled missing files will cause an IOException
:param config_filename: The name of the config file to load by default. Note: User can still override this value from the CLI with '--config-file'.
:param config: Override loading config values from file by providing config setting directly to the constructor
"""
self.config_filename = config_filename
self.env_config = env_config
self.config_file_override = config_file_override
self.config = {}
self.globals = {}
self.template_args = {}
self.template = None
self.deploy_parameter_bindings = []
self.ignore_outputs = ['templateValidationHash', 'dateGenerated']
self.stack_outputs = {}
self._config_handlers = []
self.stack_monitor = None
self._ami_cache = None
self.cfn_connection = None
self.sts_credentials = None
self.boto_session = None
# self.env_config = env_config
for config_handler in env_config.config_handlers:
self._add_config_handler(config_handler)
self.add_config_hook()
# Load the user interface
self.view = view if view else cli.CLI()
# The view may override the config file location (i.e. command line arguments)
if hasattr(self.view, 'config_filename') and self.view.config_filename is not None:
self.config_filename = self.view.config_filename
# Allow the view to execute the user's requested action
self.view.process_request(self)
def create_hook(self):
"""
Override in your subclass for custom resource creation. Called after config is loaded and template is
initialized. After the hook completes the templates are serialized and written to file and uploaded to S3.
"""
pass
def add_config_hook(self):
"""
Override in your subclass for adding custom config handlers.
Called after the other config handlers have been added.
After the hook completes the view is loaded and started.
"""
pass
def deploy_hook(self):
"""
Extension point for modifying behavior of deploy action. Called after config is loaded and before
cloudformation deploy_stack is called. Some things you can do in deploy_hook include modifying
config or deploy_parameter_bindings or run arbitrary commands with boto.
"""
pass
def delete_hook(self):
"""
Extension point for modifying behavior of delete action. Called after config is loaded and before cloudformation
deploy_stack is called. Can be used to manage out-of-band resources with boto.
"""
pass
def stack_event_hook_wrapper(self, event_data):
"""
Write the stack outputs to file before calling the stack_event_hook that the user overrides
"""
if self.config.get('global').get('write_stack_outputs'):
self.write_stack_outputs_to_file(event_data)
self.stack_event_hook(event_data)
def stack_event_hook(self, event_data):
"""
Extension point for reacting to the cloudformation stack event stream. If global.monitor_stack is enabled in
config this function is used to react to stack events. Once a stack is created a notification topic will begin
emitting events to a queue. Each event is passed to this call for further processing. Details about the event
data can be read here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-listing-event-history.html
:param event_data: The event_data hash provided the following mappings from the raw cloudformation event:
"status" = "ResourceStatus"
"type" = "ResourceType"
"name" = "LogicalResourceId"
"id" = "PhysicalResourceId"
"reason" = "ResourceStatusReason"
"props" = "ResourceProperties"
:return bool: Indicates that processing is complete, false indicates that you are not yet done
"""
return True
def init_action(self):
"""
Default init_action invoked by the CLI
Generates config and ami_cache files
Override in your subclass for custom initialization steps
"""
self.generate_config()
self.generate_ami_cache()
def s3_prefix(self):
"""
Allows subclasses to modify the default s3 prefix
"""
return self.config.get('template').get('s3_prefix')
def stack_outputs_directory(self):
"""
Allows subclasses to modify the default stack outputs directory
"""
return self.config.get('global').get('stack_outputs_directory') or 'stack_outputs'
def _ensure_template_dir_exists(self):
template_dir = self.s3_prefix()
if not os.path.exists(template_dir):
os.makedirs(template_dir)
return template_dir
@staticmethod
def serialize_templates_helper(template, s3_client, s3_upload=True):
# Create stack resources for template and all child templates
raw_json = template.to_template_json()
# Recursively iterate through each child template to serialize it and process its children
for child, _, _, _, _ in template._child_templates:
EnvironmentBase.serialize_templates_helper(
template=child,
s3_client=s3_client,
s3_upload=s3_upload)
if s3_upload:
# Upload the template to the s3 bucket under the template_prefix
s3_client.Bucket(Template.template_bucket_default).put_object(
Key=template.resource_path,
Body=raw_json,
ACL=Template.upload_acl
)
# Save the template locally with the same file hierarchy as on s3
with open(template.resource_path, 'w') as output_file:
reloaded_template = json.loads(raw_json)
output_file.write(json.dumps(reloaded_template, indent=4, separators=(',', ':')))
print(("Generated {} template".format(template.name)))
if s3_upload:
print(("S3:\t{}".format(utility.get_template_s3_url(Template.template_bucket_default, template.resource_path))))
print(("Local:\t{}\n".format(template.resource_path)))
def serialize_templates(self):
s3_client = utility.get_boto_resource(self.config, 's3')
self._ensure_template_dir_exists()
s3_upload = self.config.get('template').get('s3_upload', True)
EnvironmentBase.serialize_templates_helper(
template=self.template,
s3_client=s3_client,
s3_upload=s3_upload)
def estimate_cost(self, template_name=None, template_url=None, stack_params=None):
cfn_conn = utility.get_boto_client(self.config, 'cloudformation')
if not template_url:
return None
estimate_cost_url = cfn_conn.estimate_template_cost(
TemplateURL=template_url,
Parameters=stack_params)
# else:
# template_body = self._load_template(template_name)
# estimate_cost_url = cfn_conn.estimate_template_cost(
# TemplateBody=template_body,
# Parameters=stack_params)
return estimate_cost_url.get('Url')
def _root_template_path(self):
"""
Construct the root template resource path
It never includes a timestamp because we need to find it by convention in the deploy step
"""
return utility.get_template_s3_resource_path(
prefix=self.s3_prefix(),
template_name=self.globals.get('environment_name'),
include_timestamp=False)
def _root_template_url(self):
"""
Construct the root template S3 URL
"""
return utility.get_template_s3_url(
bucket_name=self.template_args.get('s3_bucket'),
resource_path=self._root_template_path())
def create_action(self):
"""
Default create_action invoked by the CLI
Loads and validates config, initializes a new template instance, and writes it to file.
Override the create_hook in your environment to inject all of your cloudformation resources
"""
self.load_config()
self.initialize_template()
# Do custom troposphere resource creation in your overridden copy of this method
self.create_hook()
self.serialize_templates()
def _ensure_stack_is_deployed(self, stack_name='UnnamedStack', sns_topic=None, stack_params=[]):
"""
Deploys the root template to cloudformation using boto
First attempts to issue an update stack command
If this fails because the stack does not yet exist, then issues a create stack command
"""
is_successful = False
notification_arns = []
if sns_topic:
notification_arns.append(sns_topic.arn)
template_url = self._root_template_url()
cfn_conn = utility.get_boto_client(self.config, 'cloudformation')
try:
cfn_conn.update_stack(
StackName=stack_name,
TemplateURL=template_url,
Parameters=stack_params,
NotificationARNs=notification_arns,
Capabilities=['CAPABILITY_IAM'])
is_successful = True
print(("\nSuccessfully issued update stack command for %s\n" % stack_name))
# Else stack doesn't currently exist, create a new stack
except botocore.exceptions.ClientError as update_e:
if "does not exist" in update_e.message:
try:
cfn_conn.create_stack(
StackName=stack_name,
TemplateURL=template_url,
Parameters=stack_params,
NotificationARNs=notification_arns,
Capabilities=['CAPABILITY_IAM'],
DisableRollback=True,
TimeoutInMinutes=TIMEOUT)
is_successful = True
print(("\nSuccessfully issued create stack command for %s\n" % stack_name))
except botocore.exceptions.ClientError as create_e:
print(("Deploy failed: \n\n%s\n" % create_e.message))
else:
raise
return is_successful
def add_parameter_binding(self, key, value):
"""
Deployment parameters are used to provide values for parameterized templates
The deploy_parameter_bindings is populated with hashes of the form:
{
'ParameterKey': <key>,
'ParameterValue': <value>
}
:param key: String representing an input Parameter name in the root template
:param value: Troposphere value for the Parameter
"""
self.deploy_parameter_bindings.append({
'ParameterKey': key,
'ParameterValue': value
})
def deploy_action(self):
"""
Default deploy_action invoked by the CLI.
Loads and validates config, then deploys the root template to cloudformation using boto
Override the deploy_hook in your environment to intercept the deployment process
This can be useful for creating resources using boto outside of cloudformation
"""
self.load_config()
self.deploy_hook()
stack_name = self.config['global']['environment_name']
# initialize stack event monitor
topic = None
queue = None
if self.stack_monitor and self.stack_monitor.has_handlers():
(topic, queue) = self.stack_monitor.setup_stack_monitor(self.config)
try:
# First try to do an update-stack... if it doesn't exist, then try create-stack
is_successful = self._ensure_stack_is_deployed(
stack_name,
sns_topic=topic,
stack_params=self.deploy_parameter_bindings)
if self.stack_monitor and is_successful:
self.stack_monitor.start_stack_monitor(queue, stack_name, debug=self.globals['print_debug'])
except KeyboardInterrupt:
if self.stack_monitor:
print('KeyboardInterrupt: calling cleanup')
self.stack_monitor.cleanup_stack_monitor(topic, queue)
raise
if self.stack_monitor:
self.stack_monitor.cleanup_stack_monitor(topic, queue)
def delete_action(self):
"""
Default delete_action invoked by CLI
Loads and validates config, then issues the delete stack command to the root stack
Override the delete_hook in your environment to intercept the delete process with your own code
This can be useful for deleting any resources that were created outside of cloudformation
"""
self.load_config()
self.delete_hook()
cfn_conn = utility.get_boto_client(self.config, 'cloudformation')
stack_name = self.config['global']['environment_name']
cfn_conn.delete_stack(StackName=stack_name)
print(("\nSuccessfully issued delete stack command for %s\n" % stack_name))
def _validate_config_helper(self, schema, config, path):
# Check each requirement
for (req_key, req_value) in schema.items():
# Check for key match, usually only one match but parametrized keys can have multiple matches
# Uses 'filename' match, similar to regex but only supports '?', '*', [XYZ], [!XYZ]
filter_fun = lambda candidate_key: fnmatch(candidate_key, req_key)
# Find all config keys matching the requirement
matches = list(filter(filter_fun, list(config.keys())))
if not matches:
message = "Config file missing section " + str(path) + ('.' if path != '' else '') + req_key
raise ValidationError(message)
# Validate each matching config entry
for matching_key in matches:
new_path = path + ('.' if path != '' else '') + matching_key
# ------------ value check -----------
if isinstance(req_value, str):
req_type = res.get_type(req_value)
if not isinstance(config[matching_key], req_type):
message = "Type mismatch in config, %s should be of type %s, not %s" % \
(new_path, req_value, type(config[matching_key]).__name__)
raise ValidationError(message)
# else:
# print("%s validated: %s == %s" % (new_path, req_value, type(config[matching_key]).__name__))
# if the schema is nested another level .. we must go deeper
elif isinstance(req_value, dict):
matching_value = config[matching_key]
if not isinstance(matching_value, dict):
message = "Type mismatch in config, %s should be a dict, not %s" % \
(new_path, type(matching_value).__name__)
raise ValidationError(message)
self._validate_config_helper(req_value, matching_value, new_path)
elif isinstance(req_value, list):
matching_value = config[matching_key]
if not isinstance(matching_value, list):
message = "Type mismatch in config, %s should be a list, not %s" % \
(new_path, type(matching_value).__name__)
raise ValidationError(message)
def _validate_region(self, config):
"""
Checks boto.region_name against the list of valid regions raising an exception if not.
"""
valid_regions = config['global']['valid_regions']
region_name = config['boto']['region_name']
if region_name not in valid_regions:
raise ValidationError('Unrecognized region name: ' + region_name)
def _validate_config(self, config, factory_schema=res.CONFIG_REQUIREMENTS):
"""
Compares provided dict against TEMPLATE_REQUIREMENTS. Checks that required all sections and values are present
and that the required types match. Throws ValidationError if not valid.
:param config: dict to be validated
"""
config_reqs_copy = copy.deepcopy(factory_schema)
# Merge in any requirements provided by config handlers
for handler in self._config_handlers:
config_reqs_copy.update(handler.get_config_schema())
self._validate_config_helper(config_reqs_copy, config, '')
# # Validate region
# self._validate_region(config)
def _add_config_handler(self, handler):
"""
Register classes that will augment the configuration defaults and/or validation logic here
"""
if not hasattr(handler, 'get_factory_defaults') or not callable(getattr(handler, 'get_factory_defaults')):
raise ValidationError('Class %s cannot be a config handler, missing get_factory_defaults()' % type(handler).__name__)
if not hasattr(handler, 'get_config_schema') or not callable(getattr(handler, 'get_config_schema')):
raise ValidationError('Class %s cannot be a config handler, missing get_config_schema()' % type(handler).__name__)
self._config_handlers.append(handler)
@staticmethod
def _config_env_override(config, path, print_debug=False):
"""
Update config value with values from the environment variables. If the environment variable exists
the config value is replaced with its value.
For config parameters like template.ec2_key_default this function will expect an environment
variable matching the <section label>_<config_key> in all caps (e.g. TEMPLATE_EC2_KEY_DEFAULT).
For environment variables containing multiple subsections the same pattern applies.
For example: self._update_config_from_env('db', 'password') for the config file:
{
...
'db': {
'label1': {
...
'password': 'changeme'
},
'label2': {
...
'password': 'changeme]'
}
}
}
Would replace those two database passwords if the following is run from the shell:
> export DB_LABEL1_PASSWORD=myvoiceismypassword12345
> export DB_LABEL2_PASSWORD=myvoiceismyotherpassword12345
"""
for key, val in config.items():
new_path = path + ('.' if path != '' else '') + key
env_name = '_'.join(new_path.split('.')).upper()
if not isinstance(val, dict):
env_value = os.environ.get(env_name)
if print_debug:
print(("Checking %s (%s)" % (env_name, new_path)))
if env_value is None:
continue
# TODO: Need better schema validation for non-string values from env vars
# Convert true/false strings to booleans for schema validation
if env_value.lower() == 'true':
env_value = True
elif env_value.lower() == 'false':
env_value = False
default_value = config.get(key)
config[key] = env_value if env_value is not None else default_value
if env_value is not None:
print(("* Updating %s from '%s' to value of '%s'" % (new_path, default_value, env_name)))
else:
EnvironmentBase._config_env_override(config[key], new_path, print_debug=print_debug)
def generate_config(self):
"""
Generate config dictionary from defaults
Add defaults from all registered config handlers (added patterns, etc.)
Write file to self.config_filename
"""
if os.path.isfile(self.config_filename):
overwrite = input("%s already exists. Overwrite? (y/n) " % self.config_filename).lower()
print()
if not overwrite == 'y':
return
config = copy.deepcopy(res.FACTORY_DEFAULT_CONFIG)
# Merge in any defaults provided by registered config handlers
for handler in self._config_handlers:
config.update(handler.get_factory_defaults())
with open(self.config_filename, 'w') as f:
f.write(json.dumps(config, indent=4, sort_keys=True, separators=(',', ': ')))
print(('Generated config file at %s\n' % self.config_filename))
def load_config(self, view=None, config=None):
"""
Load config from self.config_filename, break if it doesn't exist
Load any overrides from environment variables
Validate all loaded values
"""
# Allow overriding the view for testing purposes
if not view:
view = self.view
# Allow overriding of the entire config object
if self.config_file_override:
config = self.config_file_override
# Else read from file
else:
config = res.load_file('', self.config_filename)
# Load in cli config overrides
view.update_config(config)
# record value of the debug variable
debug = config['global']['print_debug']
# Check the environment variables for any overrides
self._config_env_override(config, '', print_debug=debug)
# Validate and save results
self._validate_config(config)
self.config = config
# Save shortcut references to commonly referenced config sections
self.globals = self.config.get('global', {})
self.template_args = self.config.get('template', {})
# Register all stack handlers
if self.globals['monitor_stack']:
self.stack_monitor = monitor.StackMonitor(self.globals['environment_name'])
self.stack_monitor.add_handler(self)
def initialize_template(self):
"""
Create new Template instance, set description and common parameters and load AMI cache.
"""
print(('\nGenerating templates for {} stack\n'.format(self.globals['environment_name'])))
# Configure Template class with S3 settings from config
Template.template_bucket_default = self.template_args.get('s3_bucket')
Template.s3_path_prefix = self.s3_prefix()
Template.stack_timeout = self.template_args.get("timeout_in_minutes")
Template.upload_acl = self.template_args.get('s3_upload_acl')
Template.include_timestamp = self.template_args.get('include_timestamp')
Template.include_templateValidationHash_output = self.template_args.get('include_templateValidationHash_output')
Template.include_dateGenerated_output = self.template_args.get('include_dateGenerated_output')
# Create the root template object
self.template = Template(self.globals.get('environment_name', 'default_template'))
self.template.description = self.template_args.get('description', 'No Description Specified')
self.template.resource_path = self._root_template_path()
ec2_key = self.config.get('template').get('ec2_key_default', 'default-key')
self.template._ec2_key = self.template.add_parameter(
Parameter(
'ec2Key',
Type='String',
Default=ec2_key,
Description='Name of an existing EC2 KeyPair to enable SSH access to the instances',
AllowedPattern=res.get_str('ec2_key'),
MinLength=1,
MaxLength=255,
ConstraintDescription=res.get_str('ec2_key_message')
)
)
bucket_name = self.config.get('logging').get('s3_bucket')
self.template.add_utility_bucket(name=bucket_name)
self.template.add_log_group()
self.template.add_vpcflowlogs_role()
ami_filename = self.config['template'].get('ami_map_file')
if ami_filename:
ami_cache = res.load_yaml_file(ami_filename)
self.template.add_ami_mapping(ami_cache)
def generate_ami_cache(self):
"""
Generate ami_cache.json file from defaults
"""
ami_cache_filename = res.DEFAULT_AMI_CACHE_FILENAME + res.EXTENSIONS[0]
if os.path.isfile(ami_cache_filename):
overwrite = input("%s already exists. Overwrite? (y/n) " % ami_cache_filename).lower()
print()
if not overwrite == 'y':
return
with open(ami_cache_filename, 'w') as f:
f.write(json.dumps(res.FACTORY_DEFAULT_AMI_CACHE, indent=4, separators=(',', ': ')))
print(("Generated AMI cache file at %s\n" % ami_cache_filename))
def to_json(self):
"""
Centralized method for outputting the root template with a timestamp identifying when it
was generated and for creating a SHA256 hash representing the template for validation purposes
Also recursively processess all child templates
"""
return self.template.to_template_json()
# Called after add_child_template() has attached common parameters and some instance attributes:
# - RegionMap: Region to AMI map, allows template to be deployed in different regions without updating AMI ids
# - ec2Key: keyname to use for ssh authentication
# - vpcCidr: IP block claimed by whole VPC
# - vpcId: resource id of VPC
# - commonSecurityGroup: sg identifier for common allowed ports (22 in from VPC)
# - utilityBucket: S3 bucket name used to send logs to
# - [public|private]Subnet[0-9]: indexed and classified subnet identifiers
#
# and some instance attributes referencing the attached parameters:
# - self.vpc_cidr
# - self.vpc_id
# - self.common_security_group
# - self.utility_bucket
# - self.subnets: keyed by type, layer, and AZ index (e.g. self.subnets['public']['web'][1])
def add_child_template(self, child_template, merge=False, depends_on=[]):
"""
Saves reference to provided template. References are processed in write_template_to_file().
:param child_template: The Environmentbase Template you want to associate with the current instances
:param depends_on: List of upstream resources that must be processes before the provided template
:param merge: Determines whether the resource is attached as a child template or all of its resources merged
into the current template
"""
return self.template.add_child_template(child_template, merge=merge, depends_on=depends_on)
def write_stack_outputs_to_file(self, event_data):
"""
Given the stack event data, determine if the stack has finished executing (CREATE_COMPLETE or UPDATE_COMPLETE)
If it has, write the stack outputs to file
"""
if event_data['type'] == 'AWS::CloudFormation::Stack' and (event_data['status'] == 'CREATE_COMPLETE' or event_data['status'] == 'UPDATE_COMPLETE'):
self.write_stack_output_to_file(stack_id=event_data['id'], stack_name=event_data['name'])
def write_stack_output_to_file(self, stack_id, stack_name):
"""
Given a CFN stack's physical resource ID, query the stack for its outputs
Save outputs to file as JSON at ./<stack_outputs_dir>/<stack_name>.json
"""
# Grab all the outputs from the cfn stack object as k:v pairs
stack_outputs = {}
for output in self.get_cfn_stack_obj(stack_id).outputs:
stack_outputs[output.key] = output.value
stack_outputs_dir = self.stack_outputs_directory()
# Ensure <stack_outputs_dir> directory exists
if not os.path.isdir(stack_outputs_dir):
os.mkdir(stack_outputs_dir)
# Write the JSON-formatted stack outputs to ./<stack_outputs_dir>/<stack_name>.json
stack_output_filename = os.path.join(stack_outputs_dir, stack_name + '.json')
with open(stack_output_filename, 'w') as output_file:
output_file.write(json.dumps(stack_outputs, indent=4, separators=(',', ':')))
if self.globals['print_debug']:
print(("Outputs for {0} written to {1}\n".format(stack_name, stack_output_filename)))
def get_stack_output(self, stack_id, output_name):
"""
Given the PhysicalResourceId of a Stack and a specific output key, return the output value
Raise an exception if the output key is not found
Example:
def stack_event_hook(self, event_data):
elb_dns_name = self.get_stack_output(event_data['id'], 'ElbDnsName')
"""
stack_obj = self.get_cfn_stack_obj(stack_id)
for output in stack_obj.outputs:
if output.key == output_name:
return output.value
# If the output wasn't found in the stack, raise an exception
raise Exception("%s did not output %s" % (stack_obj.stack_name, output_name))
def get_cfn_stack_obj(self, stack_id):
"""
Given the unique physical stack ID, return exactly one cloudformation stack object
"""
return self.get_cfn_connection().describe_stacks(stack_id)[0]
def get_cfn_connection(self):
"""
We persist the CFN connection so that we don't create a new session with each request
"""
if not self.cfn_connection:
self.cfn_connection = cloudformation.connect_to_region(self.config.get('boto').get('region_name'))
return self.cfn_connection
def get_sts_credentials(self, role_session_name, role_arn):
"""
We persist the STS credentials so that we don't create a new session with each request
"""
if not self.sts_credentials:
sts_connection = sts.STSConnection()
assumed_role = sts_connection.assume_role(
role_arn=role_arn,
role_session_name=role_session_name
)
self.sts_credentials = assumed_role.credentials
return self.sts_credentials
|
'''
경로 압축 알고리즘이면
union에서 그냥 Parent 찾아도 될 거 같긴한데,
그래도 root를 찾는 함수를 쓰도록 하자! (헷갈리지 않게)
7 8
0 1 3
1 1 7
0 7 6
1 7 1
0 3 7
0 4 2
0 1 1
1 1 1
>>
NO
NO
YES
'''
N, M = map(int, input().split())
data = []
for _ in range(M):
data.append(list(map(int, input().split())))
# debug
# print(N, M)
# print(data)
def find_parent(parent, a):
if parent[a] != a:
parent[a] = find_parent(parent, parent[a])
return a
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a > b:
parent[b] = a
else:
parent[a] = b
# parent 초기화
parent = [0] * (N + 1)
for i in range(1, N+1):
parent[i] = i # 자기 자신으로 초기화
# data 처리
for line in data:
if line[0] == 0: # Union
union_parent(parent, line[1], line[2])
elif line[0] == 1: # Find
root_a = find_parent(parent, line[1])
root_b = find_parent(parent, line[2])
if root_a == root_b:
print('YES')
else:
print('NO')
'''
<Answer>
# 특정 원소가 속한 집합을 찾기
def find_parent(parent, x):
# 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
# 두 원소가 속합 집합을 합치기
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
n, m = map(int, input().split())
parent = [0] * (n + 1) # 부모 테이블 초기화
# 부모 테이블상에서, 부모를 자기 자신으로 초기화
for i in range(0, n+1):
parent[i] = i
# 각 연산을 하나씩 확인
for i in range(m):
oper, a, b = map(int, input().split())
# 합집합 union 연산일 경우
if oper == 0:
union_parent(parent, a, b)
# 찾기 연산일 경우
elif oper == 1:
if find_parent(parent, a) == find_parent(parent, b):
print('YES')
else:
print('NO')
''' |
# coding: utf-8
#Tom G.
from PIL import Image
import time
#Constantes
_DEBUTCHAINE = bytearray([0,129])
_FINCHAINE = bytearray([129,0])
def steganographie_ecrire(cheminImage, texteACacher):
image = ouvrir_fichier(cheminImage)
if(image != 0):
tailleImage = taille_image(image)
tableauCaracteres = bytearray(texteACacher.encode('utf8'))
tableauCaracteres = _DEBUTCHAINE + tableauCaracteres + _FINCHAINE
chaineBinaire = ""
for i in range(0, len(tableauCaracteres)):
chaineBinaire += bin(tableauCaracteres[i])[2:].zfill(8) #Convertion en binaire
if (tailleImage >= len(tableauCaracteres)):
colonne, ligne = image.size
x = 0
ligne = int(len(chaineBinaire)/(colonne*3))+ 1 #on reduit le nombre de lignes à parcourir en fonction du nombre de données à cacher
for l in range(0, ligne):
for c in range(0, colonne):
if (x < len(chaineBinaire)):
pixel = image.getpixel((c,l))
rouge = pixel[0]
vert = pixel[1]
bleu = pixel[2]
rouge = rouge - (rouge%2) #On met le dernier bit du pixel à 0
rouge = rouge + int(chaineBinaire[x])
x = x + 1
if (x < len(chaineBinaire)):
vert = vert - (vert%2)
vert = vert + int(chaineBinaire[x])
x = x + 1
if (x < len(chaineBinaire)):
bleu = bleu - (bleu%2)
bleu = bleu + int(chaineBinaire[x])
x = x + 1
image.putpixel((c,l),(rouge,vert,bleu))
image.save(cheminImage)
image.close()
return len(tableauCaracteres)*8, tailleImage
else:
image.close()
raise ValueError("Texte à cacher trop long")
else:
raise ValueError("Chemin invalide")
def steganographie_lire(cheminImage):
image = ouvrir_fichier(cheminImage)
if(image != 0):
colonne, ligne = image.size
chaineBinaire = ""
for l in range(0, ligne): #On parcours les pixels de l'image
for c in range(0, colonne):
pixel = image.getpixel((c,l))
rouge = pixel[0]
vert = pixel[1]
bleu = pixel[2]
chaineBinaire += (str(rouge%2) + str(vert%2)+ str(bleu%2)) #On recupère le dernier bit de chaque pixel
tableauOctets = []
for i in range(0, int((len(chaineBinaire) - len(chaineBinaire)%8)/8)): #Conversion binaire vers décimal optimisée (gain de temps pour le calcul)
x = int(chaineBinaire[i*8]) * 128
x = x + int(chaineBinaire[i*8 + 1]) * 64
x = x + int(chaineBinaire[i*8 + 2]) * 32
x = x + int(chaineBinaire[i*8 + 3]) * 16
x = x + int(chaineBinaire[i*8 + 4]) * 8
x = x + int(chaineBinaire[i*8 + 5]) * 4
x = x + int(chaineBinaire[i*8 + 6]) * 2
x = x + int(chaineBinaire[i*8 + 7]) * 1
if(i > 0 and x == 0 and tableauOctets[i-1] == 129):
i = int((len(chaineBinaire) - len(chaineBinaire)%8)/8)
tableauOctets.append(x)
tableauOctets = bytearray(tableauOctets) #conversion en tableau d'octets
try:
debut = tableauOctets.index(_DEBUTCHAINE) #on cherche le début des données
fin = tableauOctets.index(_FINCHAINE) #on cherche la fin des données
donnees = tableauOctets[(debut+2):fin] #on recupere les donnees qui nous interessent
return donnees.decode("utf-8")
except: #si aucunes données n'ont pas été trouvées
raise ValueError("Pas de données dans l'image")
else:
raise ValueError("Chemin invalide")
def ouvrir_fichier(chemin):
try:
image=Image.open(chemin)
return image
except:
return 0
def taille_image(image):
colonne, ligne = image.size
tailleEnBits = colonne * ligne * 3
return int(tailleEnBits/8)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2019 University of Utah Student Computing Labs.
# All Rights Reserved.
#
# Author: Thackery Archuletta
# Creation Date: Oct 2018
# Last Updated: March 2019
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and this permission notice appear
# in supporting documentation, and that the name of The University
# of Utah not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission. This software is supplied as is without expressed or
# implied warranties of any kind.
################################################################################
'''pytests for secure_erase_internals.py. Run "python -m unittest test_secure_erase_internals" or
"python test_secure_erase_internals.py"
'''
import os
import sys
import logging
import unittest
import subprocess as sp
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from blade_runner.secure_erase import secure_erase_internals as sei
logging.getLogger(__name__).addHandler(logging.NullHandler())
class TestSecureEraseInternalDisks(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""This will only run once, regardless of how many tests there are."""
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Setup command to create a test disk.
cls.vol_name = 'test_disk'
create_dmg = [
'hdiutil',
'create',
'-size',
'1g',
'-fs',
'HFS+J',
'-volname',
cls.vol_name,
os.path.join("/tmp", cls.vol_name)
]
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
cls.test_disk_path = os.path.join("/tmp", cls.vol_name + ".dmg")
# Create the test disk.
try:
sp.check_output(create_dmg)
except:
print('The file {}.dmg already exists. Please delete it and try again.'.format(cls.vol_name))
delete_dmg = ['rm', '-f', cls.test_disk_path]
sp.check_output(delete_dmg)
raise SystemExit("The file {}.dmg already existed. It has now been deleted. Try again.".format(cls.vol_name))
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Mount the test disk.
mount_dmg = ['hdiutil', 'mount', cls.test_disk_path]
sp.check_output(mount_dmg)
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Path to disk.
cls.test_disk = os.path.join('/dev/', sei.whole_disks(cls.vol_name)[0])
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Create a file inside the test disk volume.
create_file_in_dmg = ['touch', os.path.join('/Volumes/', cls.vol_name, 'test.txt')]
sp.check_output(create_file_in_dmg)
@classmethod
def tearDownClass(cls):
"""This will only run once, regardless of how many tests there are."""
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Detach the test disk.
detach_dmg = ['hdiutil', 'detach', cls.test_disk]
sp.check_output(detach_dmg)
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Delete the test disk dmg.
delete_dmg = ['rm','-f', cls.test_disk_path]
sp.check_output(delete_dmg)
def setUp(self):
'''This will be run before each test case.'''
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Save vol_name and test_disk to self.
self.vol_name = self.__class__.vol_name
self.test_disk = self.__class__.test_disk
def test_erase_valid_disk(self):
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Erase the test disk.
is_erased = sei.secure_erase_disks([self.test_disk])
self.assertTrue(is_erased)
if __name__ == '__main__':
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Ensure run as root.
if os.geteuid() != 0:
raise SystemExit("Must be run as root.")
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Set up logging vars.
fmt = '%(asctime)s %(process)d: %(levelname)8s: %(name)s.%(funcName)s: %(message)s'
script_name = os.path.splitext(os.path.basename(__file__))[0]
log_dir = os.path.join(os.path.expanduser("~"), "Library/Logs/Blade Runner")
filepath = os.path.join(log_dir, script_name + ".log")
# Create log path
try:
os.mkdir(log_dir)
except OSError as e:
if e.errno != 17:
raise e
# Set up logger.
logging.basicConfig(level=logging.DEBUG, format=fmt, filemode='a', filename=filepath)
logger = logging.getLogger(script_name)
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# Start unit tests.
unittest.main(verbosity=1) |
#https://nordvpn.com/
import os
import requests
from db.db import Database
class nordvpn:
def __init__(self,):
self.db = Database()
url = 'https://nordvpn.com/wp-admin/admin-ajax.php?searchParameters[0][name]=proxy-country&searchParameters[0][value]=&searchParameters[1][name]=proxy-ports&searchParameters[1][value]=&offset=0&limit=100000&action=getProxies'
r = requests.get(url,timeout=10)
ips = list()
for proxy in r.json():
ips.append(proxy['ip'])
self.db.insert(ips)
if __name__ == '__main__':
nordvpn()
|
# -*- coding: utf-8 -*-
"""
movies related genres component module.
"""
from pyrin.application.decorators import component
from pyrin.application.structs import Component
from charma.movies.related_genres import RelatedGenresPackage
from charma.movies.related_genres.manager import RelatedGenresManager
@component(RelatedGenresPackage.COMPONENT_NAME)
class RelatedGenresComponent(Component, RelatedGenresManager):
"""
movies related genres component class.
"""
pass
|
import unittest
from .door2 import solve, solve2
class ExampleTestsPartOne(unittest.TestCase):
def test_example(self):
self.assertEqual(solve('5 1 9 5\n7 5 3\n2 4 6 8'), 18)
def test_1(self):
self.assertEqual(solve('5 2 9 5\n7 5 3 12\n1 4 6 8'), 23)
def test_2(self):
self.assertEqual(solve('5 2 9 5\n7 5 3 12\n1 4 6 8\n1 5 8 5 5 3\n1 2 4 3'), 33)
class ExampleTestsPartTwo(unittest.TestCase):
def test_example(self):
self.assertEqual(solve2('5 9 2 8\n9 4 7 3\n3 8 6 5'), 9)
def test_1(self):
self.assertEqual(solve2('4 2 9\n7 5 3 12\n4 6 8'), 8)
def test_2(self):
self.assertEqual(solve2('5 10 14\n3 6 11\n2 14 3'), 11)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: UTF-8 -*-
"""Client CLI entrypoint"""
import argparse
import sys
import requests
import six
from scriptd.app import util
from scriptd.app.exceptions import AuthenticationError
import scriptd.app.protocol
from scriptd.app.protocol import ScriptdProtocol
def main():
argparser = argparse.ArgumentParser(description="Scriptd client")
argparser.add_argument("-H", "--host", type=six.text_type, default=u"127.0.0.1",
help="Server ip or name, default: 127.0.0.1")
argparser.add_argument("-p", "--port", type=int, default=u"8182",
help="Server port, default: 8182")
key_group = argparser.add_mutually_exclusive_group(required=False)
key_group.add_argument("-k", "--key", type=six.text_type, default=u"",
help="Authentication key, default: empty")
key_group.add_argument("--key-file", type=six.text_type,
help="Authentication key file. Key will be derived from its hash.")
argparser.add_argument("command", type=six.text_type,
help="Name of the script to run on server")
args = argparser.parse_args()
if args.key_file is not None:
key = util.derive_key_from_key_file(args.key_file)
else:
key = args.key.encode("UTF-8")
protocol_ = ScriptdProtocol()
protocol_.set_key(key)
token_resp = requests.post(
"http://{}:{}/token".format(args.host, args.port),
data=protocol_.emit_frame(scriptd.app.protocol.TOKEN_REQUEST_CONTENT),
headers={"Expect": ""}
)
token_resp.raise_for_status()
token = protocol_.parse_frame(token_resp.content)
execution_request_payload = token + args.command.encode("UTF-8")
resp = requests.post("http://{}:{}/execute".format(args.host, args.port),
data=protocol_.emit_frame(execution_request_payload),
headers={"Expect": ""},
stream=True)
response_empty = True
try:
while True:
frame = protocol_.read_frame_from(resp.raw)
if frame is None:
break
frame_data = protocol_.parse_frame(frame)
if six.PY3:
sys.stdout.buffer.write(frame_data)
else:
sys.stdout.write(frame_data)
response_empty = False
except AuthenticationError:
six.print_("Authentication failed, fake server?", file=sys.stderr)
if response_empty:
six.print_("Empty response, incorrect key or invalid server?", file=sys.stderr)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from actors.internal.mailbox import InternalMailbox
from .mock_compat import Mock
@pytest.fixture
def actor():
actor = Mock()
actor.handle_message = Mock()
actor.handle_system_message = Mock()
return actor
@pytest.fixture
def mailbox(actor):
return InternalMailbox(Mock(), actor)
def test_should_invoke_handle_message_on_actor(actor, mailbox):
message = object()
mailbox.enqueue(message)
mailbox.process_messages()
actor.handle_message.assert_called_once_with(message)
def test_should_invoke_handle_system_message_on_actor(actor, mailbox):
message = object()
mailbox.enqueue_system(message)
mailbox.process_messages()
actor.handle_system_message.assert_called_once_with(message)
def test_should_process_system_messages_before_user_messages(actor, mailbox):
mailbox.enqueue(1)
mailbox.enqueue_system(2)
def assert_handle_message_not_called(*args, **kwargs):
assert not actor.handle_message.called
actor.handle_system_message.side_effect = assert_handle_message_not_called
mailbox.process_messages()
actor.handle_system_message.assert_called_once_with(2)
actor.handle_message.assert_called_once_with(1)
def test_should_stop_processing_when_reaching_throughput_limit(actor):
mailbox = InternalMailbox(Mock(), actor, throughput=2)
mailbox.enqueue(1)
mailbox.enqueue(1)
mailbox.enqueue(1)
mailbox.process_messages()
assert actor.handle_message.call_count == 2
def test_should_not_processing_user_messages_when_suspended(actor, mailbox):
mailbox.suspend()
mailbox.enqueue(Mock())
mailbox.process_messages()
assert not actor.handle_system_message.called
def test_should_process_system_messages_when_suspended(actor, mailbox):
mailbox.suspend()
mailbox.enqueue_system(Mock())
mailbox.process_messages()
assert actor.handle_system_message.called
def test_should_not_process_messages_when_closed(actor, mailbox):
mailbox.close()
mailbox.enqueue(Mock())
mailbox.enqueue_system(Mock())
mailbox.process_messages()
assert not actor.handle_message.called
assert not actor.handle_system_message.called
def test_should_process_user_messages_on_resume(actor, mailbox):
mailbox.suspend()
mailbox.enqueue(Mock())
mailbox.resume()
mailbox.process_messages()
assert actor.handle_message.called
def test_suspending_in_handler_interrupts_processing(actor):
mailbox = InternalMailbox(Mock(), actor, throughput=5)
actor.handle_message.side_effect = lambda *args: mailbox.suspend()
mailbox.enqueue(Mock())
mailbox.enqueue(Mock())
mailbox.process_messages()
assert actor.handle_message.call_count == 1
def test_closing_in_handler_interrupts_processing(actor):
mailbox = InternalMailbox(Mock(), actor, throughput=5)
actor.handle_system_message.side_effect = lambda *args: mailbox.close()
mailbox.enqueue_system(Mock())
mailbox.enqueue_system(Mock())
mailbox.enqueue(Mock())
mailbox.enqueue(Mock())
mailbox.process_messages()
assert actor.handle_system_message.call_count == 1
def test_set_scheduled_while_already_scheduled_should_fail(mailbox):
assert mailbox.set_scheduled() is True
assert mailbox.is_scheduled()
assert mailbox.set_scheduled() is False
def test_sets_itself_idle_after_processing_messages(mailbox):
mailbox.enqueue(Mock())
mailbox.set_scheduled()
mailbox.process_messages()
assert mailbox.is_idle()
def test_flushing_when_open_should_raise(mailbox):
mailbox.enqueue(Mock())
with pytest.raises(Exception):
mailbox.flush_messages()
def test_flushing_when_closed_should_return_user_messages_in_queue(mailbox):
message = object()
mailbox.enqueue(message)
mailbox.close()
assert mailbox.flush_messages() == [message]
|
from .base import SkeletonType
from .openpose_base import (
BODY_25,
BODY_25_JOINTS,
BODY_25_LINES,
FACE_LINES,
HAND_LINES,
UPPER_BODY_25_LINES,
)
from .reducer import SkeletonReducer
from .utils import incr, root_0_at
def compose_body(body=None, left_hand=None, right_hand=None, face=None):
lines = {}
if body is not None:
lines["body"] = body
if left_hand is not None:
lines["left hand"] = root_0_at(HAND_LINES, 7, 25)
if right_hand is not None:
lines["right hand"] = root_0_at(HAND_LINES, 4, 45)
if face is not None:
lines["face"] = incr(65, FACE_LINES)
return lines
BODY_25_HANDS_LINES = compose_body(BODY_25_LINES, HAND_LINES, HAND_LINES)
BODY_135_LINES = compose_body(BODY_25_LINES, HAND_LINES, HAND_LINES, FACE_LINES)
LEFT_HAND_IN_BODY_25_LINES = compose_body(left_hand=HAND_LINES)
RIGHT_HAND_IN_BODY_25_LINES = compose_body(right_hand=HAND_LINES)
UPPER_BODY_25_LEFT_HAND_LINES = compose_body(UPPER_BODY_25_LINES, left_hand=HAND_LINES)
UPPER_BODY_25_RIGHT_HAND_LINES = compose_body(
UPPER_BODY_25_LINES, right_hand=HAND_LINES
)
UPPER_BODY_25_HANDS_LINES = compose_body(UPPER_BODY_25_LINES, HAND_LINES, HAND_LINES)
UPPER_BODY_135_LINES = compose_body(
UPPER_BODY_25_LINES, HAND_LINES, HAND_LINES, FACE_LINES
)
BODY_25_HANDS = SkeletonType(BODY_25_HANDS_LINES, BODY_25_JOINTS)
BODY_135 = SkeletonType(BODY_135_LINES, BODY_25_JOINTS)
UPPER_BODY_25 = SkeletonType(UPPER_BODY_25_LINES, BODY_25_JOINTS)
LEFT_HAND_IN_BODY_25 = SkeletonType(
LEFT_HAND_IN_BODY_25_LINES, BODY_25_JOINTS, one_sided="left"
)
RIGHT_HAND_IN_BODY_25 = SkeletonType(
RIGHT_HAND_IN_BODY_25_LINES, BODY_25_JOINTS, one_sided="right"
)
UPPER_BODY_25_LEFT_HAND = SkeletonType(
UPPER_BODY_25_LEFT_HAND_LINES, BODY_25_JOINTS, one_sided="left"
)
UPPER_BODY_25_RIGHT_HAND = SkeletonType(
UPPER_BODY_25_RIGHT_HAND_LINES, BODY_25_JOINTS, one_sided="right"
)
UPPER_BODY_25_HANDS = SkeletonType(UPPER_BODY_25_HANDS_LINES, BODY_25_JOINTS)
UPPER_BODY_135 = SkeletonType(UPPER_BODY_135_LINES, BODY_25_JOINTS)
FACE_IN_BODY_25_ALL_LINES = compose_body(face=FACE_LINES)
FACE_IN_BODY_25_ALL = SkeletonType(FACE_IN_BODY_25_ALL_LINES, BODY_25_JOINTS)
FACE_IN_BODY_25_ALL_REDUCER = SkeletonReducer(FACE_IN_BODY_25_ALL)
MODE_SKELS = {
"BODY_25": BODY_25,
"BODY_25_ALL": BODY_135,
"BODY_135": BODY_135,
}
|
import numpy as np
import torch
from torch.autograd import Variable
'''
Sample a random unit vector in d dimensions by normalizing i.i.d gaussian vector
Input: d - dimensions
'''
def sample_random_unit(d):
gaussian = np.random.normal(size=d)
unit = gaussian / np.linalg.norm(gaussian)
return torch.tensor(unit)
'''
Implementation of the FKM algorithm (OGD without a gradient).
Input: x_t - current point in the decision set K_delta
f_t - loss function incurred
d - dimensionality of the feasible set
delta - perturbation parameter
step - step size
project - a projection oracle onto set K_delta
Returns: tuple of next point as a torch tensor, and the loss incurred.
'''
def fkm_step(x_t, f_t, d, delta, step, project):
u_t = sample_random_unit(d)
y_t = x_t + delta * u_t
loss = float(f_t(y_t).data.numpy())
g_t = (d * loss / delta) * u_t
new_point = x_t - step * g_t
update = project(new_point.numpy())
return torch.tensor(update), loss
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
class IsInstanceTest(unittest.TestCase):
def test_isinstance_metaclass(self):
class AlwaysFalse(type):
def __instancecheck__(cls, instance):
return False
class A(metaclass=AlwaysFalse):
pass
self.assertFalse(isinstance(int, A))
self.assertTrue(isinstance(A(), A)) # does not call __instancecheck__
class AlwaysTrue(type):
def __instancecheck__(cls, instance):
return True
class B(metaclass=AlwaysTrue):
pass
self.assertTrue(isinstance(int, B))
self.assertTrue(isinstance(B(), B)) # does not call __instancecheck__
def test_isinstance_bigint(self):
# check that isinstance(x, int) returns True on BigInteger values
l = sys.maxsize + 1
if sys.implementation.name == "ironpython":
# https://github.com/IronLanguages/ironpython3/issues/52
self.assertNotEqual(type(0), type(l))
self.assertTrue(isinstance(l, int))
def test_isinstance_tuple_subclass(self):
"""https://github.com/IronLanguages/ironpython3/issues/1255"""
class T(tuple):
def __iter__(self):
yield self
# isinstance should not be invoking __iter__ on the subclass
self.assertFalse(isinstance(3, T()))
if __name__ == '__main__':
unittest.main()
|
# base_trainer.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class BaseTrainer(object):
def __init__(self, config, data, logger, model, session):
self.config = config
self.data = data
self.logger = logger
self.model = model
self.session = session
self.session.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
def train(self):
for epoch in range(self.model.epoch.eval(self.session), self.config.num_epochs + 1, 1):
self.train_epoch()
self.validate_epoch()
self.session.run(self.model.increment_epoch)
def evaluate_data(self, data, save_fn):
raise NotImplementedError
def train_epoch(self):
raise NotImplementedError
def train_step(self):
raise NotImplementedError
def validate_epoch(self):
raise NotImplementedError
def validate_step(self):
raise NotImplementedError
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# Codes are based on:
#
# <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/normalization.py>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from dragon.vm.tensorflow.framework import tensor_shape
from dragon.vm.tensorflow.layers import base
from dragon.vm.tensorflow.ops import init_ops
class BatchNormalization(base.Layer):
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(trainable=trainable, name=name, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.renorm = renorm
self.fused = fused
self.trainable = trainable
if renorm:
raise ValueError('renorm is currently not supported.')
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = input_shape.ndims
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
if axis + 1 == ndim:
self._data_format = 'NHWC'
elif axis == 1:
self._data_format = 'NCHW'
else:
raise ValueError(
'Only axis 1 or last axis are currently supported dimensions for '
'batch norm. Got `axis` dimension: ', axis)
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ', input_shape)
self.input_spec = base.InputSpec(ndim=ndim, axes={self.axis: param_dim.value})
self.moving_mean = self.add_variable(
name='moving_mean',
shape=(param_dim.value,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=(param_dim.value,),
initializer=self.moving_variance_initializer,
trainable=False)
self.gamma = self.add_variable(
name='gamma',
shape=(param_dim.value,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=self.scale)
self.beta = self.add_variable(
name='beta',
shape=(param_dim.value,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=self.center)
self.built = True
def call(self, inputs, training=False, *args, **kwargs):
use_stats = 0 if training else 1
return dragon.ops.BatchNorm([
inputs,
self.moving_mean,
self.moving_variance,
self.gamma,
self.beta],
axis=self.axis,
momentum=self.momentum,
eps=self.epsilon,
use_stats=use_stats)
def batch_normalization(
inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=False):
return BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name).apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization |
import torch
import torch.nn.functional as F
import horovod.torch as hvd
def accuracy(output, target):
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
return pred.eq(target.view_as(pred)).cpu().float().mean()
def save_checkpoint(model, optimizer, checkpoint_format, epoch):
if hvd.rank() == 0:
filepath = checkpoint_format.format(epoch=epoch + 1)
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filepath)
class LabelSmoothLoss(torch.nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothLoss, self).__init__()
self.smoothing = smoothing
def forward(self, input, target):
log_prob = F.log_softmax(input, dim=-1)
weight = input.new_ones(input.size()) * \
self.smoothing / (input.size(-1) - 1.)
weight.scatter_(-1, target.unsqueeze(-1), (1. - self.smoothing))
loss = (-weight * log_prob).sum(dim=-1).mean()
return loss
def metric_average(val_tensor):
avg_tensor = hvd.allreduce(val_tensor)
return avg_tensor.item()
# Horovod: average metrics from distributed training.
class Metric(object):
def __init__(self, name):
self.name = name
self.sum = torch.tensor(0.)
self.n = torch.tensor(0.)
def update(self, val, n=1):
self.sum += float(hvd.allreduce(val.detach().cpu(), name=self.name))
self.n += n
@property
def avg(self):
return self.sum / self.n
def create_lr_schedule(workers, warmup_epochs, decay_schedule, alpha=0.1):
def lr_schedule(epoch):
lr_adj = 1.
if epoch < warmup_epochs:
lr_adj = 1. / workers * (epoch * (workers - 1) / warmup_epochs + 1)
else:
decay_schedule.sort(reverse=True)
for e in decay_schedule:
if epoch >= e:
lr_adj *= alpha
return lr_adj
return lr_schedule
|
Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[4]
[71]
[2]
[4, 71]
[123]
[6]
[1]
[3]
[6, 123]
[1, 3]
[2, 4, 71]
[1, 3, 6, 123]
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[2, 4, 71, 123, 6, 1, 3]
[2, 4, 71]
[4, 71]
[123, 6, 1, 3]
[123, 6]
[1, 3]
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
[4]
[2]
[123]
[1]
[6, 123]
[2, 4, 71]
[1, 2, 3, 4, 6, 71, 123]
>>>
RESTART: C:\Users\Akshat\AppData\Local\Programs\Python\Python35\mergesort.py
left [4]
right [71]
left [2]
right [4, 71]
left [123]
right [6]
left [1]
right [3]
left [6, 123]
right [1, 3]
left [2, 4, 71]
right [1, 3, 6, 123]
[1, 2, 3, 4, 6, 71, 123]
>>>
|
#!/usr/bin/env python
print "Content-type: text/plain\n\n"
import psycopg2
conn = psycopg2.connect("host=localhost dbname=siki user=siki password=qwerty")
cur = conn.cursor()
cur.execute("SELECT * FROM pdata")
for row in cur:
print row
cur.close()
conn.close()
|
"""Test cases for the document module."""
import pytest
from pytest_mock import MockFixture
from rdflib import Graph
from skolemizer.testutils import skolemization
from modelldcatnotordf.document import FoafDocument
from tests.testutils import assert_isomorphic
def test_instantiate_document() -> None:
"""It does not raise an exception."""
try:
_ = FoafDocument()
except Exception:
pytest.fail("Unexpected Exception ..")
def test_to_graph_should_return_identifier_set_at_constructor() -> None:
"""It returns a title graph isomorphic to spec."""
document = FoafDocument("http://example.com/documents/1")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/documents/1> a foaf:Document
.
"""
g1 = Graph().parse(data=document.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_title_and_identifier() -> None:
"""It returns a title graph isomorphic to spec."""
"""It returns an identifier graph isomorphic to spec."""
document = FoafDocument()
document.identifier = "http://example.com/documents/1"
document.title = {"nb": "Tittel 1", "en": "Title 1"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/documents/1> a foaf:Document;
dct:title "Title 1"@en, "Tittel 1"@nb ;
.
"""
g1 = Graph().parse(data=document.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_document_skolemized(mocker: MockFixture) -> None:
"""It returns a title graph isomorphic to spec."""
"""It returns an identifier graph isomorphic to spec."""
document = FoafDocument()
document.title = {"nb": "Tittel 1", "en": "Title 1"}
mocker.patch(
"skolemizer.Skolemizer.add_skolemization", return_value=skolemization,
)
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
a foaf:Document;
dct:title "Title 1"@en, "Tittel 1"@nb ;
.
"""
g1 = Graph().parse(data=document.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_language() -> None:
"""It returns an identifier graph isomorphic to spec."""
document = FoafDocument()
document.identifier = "http://example.com/documents/1"
document.language = "http://example.com/languages/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/documents/1> a foaf:Document;
dct:language "http://example.com/languages/1"^^dct:LinguisticSystem
.
"""
g1 = Graph().parse(data=document.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_format_and_see_also() -> None:
"""It returns an identifier graph isomorphic to spec."""
document = FoafDocument()
document.identifier = "http://example.com/documents/1"
document.format = "https://www.iana.org/assignments/media-types/application/pdf"
document.rdfs_see_also = "http://example.com/link"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/documents/1> a foaf:Document;
rdfs:seeAlso <http://example.com/link> ;
dct:format
"https://www.iana.org/assignments/media-types/application/pdf"^^dct:MediaType
.
"""
g1 = Graph().parse(data=document.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
|
"""Module to create, remove, and manage trades currently in play in running strategies"""
import logging
logger = logging.getLogger(__name__)
class Position:
def __init__(self, market, amount, price):
self.market = market
self.amount = amount
self.price = price
def update(self):
pass
class LongPosition(Position):
"""This class will handle a position's orders, stop losses, and exit/entry"""
def __init__(self, market, amount, price, fixed_stoploss_percent, trailing_stoploss_percent, profit_target_percent):
super().__init__(market, amount, price)
self.is_open = False
self.profit_target_percent = profit_target_percent
self.trailing_stoploss_percent = self.price * trailing_stoploss_percent
self.trailing_stoploss = self.calculate_trailing_stoploss()
self.fixed_stoploss = price * fixed_stoploss_percent # we can pass in an actual value to keep our fixed loss at
self.profit_target = self.calculate_profit_target()
self.initial_order = None
def open(self):
"""Use the market to place the order"""
self.initial_order = self.market.limit_buy(self.amount, self.price)
self.is_open = True
def update(self, sell=False):
"""Use this method to trigger position to check if profit target has been met, and re-set trailiing stop loss"""
if not self.is_open:
pass
elif self.market.get_best_bid() < self.trailing_stoploss or \
self.market.get_best_bid() < self.fixed_stoploss or \
self.market.get_best_bid() >= self.profit_target or \
sell is True: # check price against last calculated trailing stoploss
self.liquidate_position()
# re-calculate trailing stoploss
self.trailing_stoploss = self.calculate_trailing_stoploss()
# calculate trailing stoploss based on percent (passed in as decimal for now)
# for example if trailing_stoploss_percent = .97
# and latest candle low is $100
# the trailing_stoploss will be $97
# using low for now, but we can change this
def calculate_trailing_stoploss(self):
return self.price * self.trailing_stoploss_percent
# calculate profit target based on a percent (passed in as decimal for now)
# if buy price was $100 and profit_target_percent = 1.03
# profit target will be $103
def calculate_profit_target(self):
return self.price * self.profit_target_percent
def update_trailing_stoploss(self):
"""Will use this method to actually create the order that will serve as the stop loss"""
pass
def liquidate_position(self):
"""Will use this method to actually create the order that liquidates the position"""
logger.info("Liquidating long position of " + self.amount + " | " + self.market.analysis_pair)
self.market.limit_sell(self.amount, self.market.get_best_bid())
self.is_open = False
class ShortPosition(Position):
"""Short position is basically just to close out the order successfully ie liquidate_position"""
def __init__(self, market, amount, price):
super().__init__(market, amount, price)
self.initial_order = None
def open(self):
self.market.limit_sell(self.amount, self.price)
def confirm_sell_order(self):
pass
def open_long_position(market, amount, price, fixed_stoploss_percent, trailing_stoploss_percent, profit_target_percent):
position = LongPosition(market, amount, price, fixed_stoploss_percent, trailing_stoploss_percent, profit_target_percent)
position.open()
return position
def open_short_position(market, amount, price):
position = ShortPosition(market, amount, price)
position.open()
return position
def calculate_transaction_fee(exchange, pair):
return exchange.load_market(pair)['fee']
def calculate_drawdown():
pass
|
# coding: utf-8
import time
from nose.tools import ok_
from mockidp.saml.response import saml_timestamp
def test_saml_timestamp():
t = time.time()
x = saml_timestamp(t)
ok_(len(x) > 1)
|
import test_support
import time
import unittest
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assert_(time.ctime(self.t)
== time.asctime(time.localtime(self.t)))
self.assert_(long(time.mktime(time.localtime(self.t)))
== long(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
def test_main():
test_support.run_unittest(TimeTestCase)
if __name__ == "__main__":
test_main()
|
import math
from functools import reduce
import torch
import torch.nn as nn
import pytorch_acdc as dct
from torch.utils.checkpoint import checkpoint
class ACDC(nn.Module):
"""
A structured efficient layer, consisting of four steps:
1. Scale by diagonal matrix
2. Discrete Cosine Transform
3. Scale by diagonal matrix
4. Inverse Discrete Cosine Transform
"""
def __init__(self, in_features, out_features, groups=1, bias=True):
super(ACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
assert in_features == out_features, "output size must equal input"
self.A = nn.Parameter(torch.Tensor(1, in_features))
self.D = nn.Parameter(torch.Tensor(1, out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(1,out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.groups = groups
self.pack, self.unpack = PackGroups(groups), UnPackGroups(groups)
self.riffle = Riffle()
def reset_parameters(self):
# used in original code: https://github.com/mdenil/acdc-torch/blob/master/FastACDC.lua
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
if self.bias is not None:
stdv = 1. / math.sqrt(self.out_features)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
n, d = x.size()
x = self.A*x # first diagonal matrix
x = self.pack(x)
x = dct.dct(x) # forward DCT
x = self.unpack(x)
x = self.D*x # second diagonal matrix
x = self.pack(x)
x = self.riffle(x)
x = dct.idct(x) # inverse DCT
x = self.unpack(x)
if self.bias is not None:
return x + self.bias
else:
return x
class BlockDiagonalACDC(nn.Module):
def __init__(self, in_features, out_features, groups=1, bias=True):
super(BlockDiagonalACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
self.groups = groups
assert in_features == out_features, "output size must equal input"
c = self.in_features
self.A = nn.Conv1d(c, c, 1, bias=False, groups=groups)
self.D = nn.Conv1d(c, c, 1, bias=False, groups=groups)
if bias:
self.bias = nn.Parameter(torch.Tensor(1,out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.riffle = Riffle()
def reset_parameters(self):
if self.bias is not None:
stdv = 1. / math.sqrt(self.out_features)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
n, d = x.size()
x = self.A(x.view(n,d,1)) # first block diagonal matrix
x = dct.dct(x.view(n,d)) # forward DCT
x = self.D(x.view(n,d,1)) # second block diagonal matrix
x = dct.idct(x.view(n,d)) # inverse DCT
x = self.riffle(x)
if self.bias is not None:
return x + self.bias
else:
return x
class LinearACDC(nn.Linear):
"""Implement an ACDC layer in one matrix multiply (but more matrix
operations for the parameterisation of the matrix)."""
def __init__(self, in_features, out_features, bias=False, original=False):
#assert in_features == out_features, "output size must equal input"
assert out_features >= in_features, "%i must be greater than %i"%(out_features, in_features)
assert out_features%in_features == 0
self.expansion = out_features//in_features
super(LinearACDC, self).__init__(in_features, out_features, bias=bias)
self.riffle = Riffle()
self.original = original # whether to use original parameterisation
def reset_parameters(self):
super(LinearACDC, self).reset_parameters()
# this is probably not a good way to do this
if 'A' not in self.__dict__.keys():
self.A = nn.Parameter(torch.Tensor(self.out_features, 1))
self.D = nn.Parameter(torch.Tensor(self.out_features, 1))
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
# need to have DCT matrices stored for speed
# they have to be Parameters so they'll be
N = self.out_features
self.dct = dct.dct(torch.eye(N))
self.idct = dct.idct(torch.eye(N))
# remove weight Parameter
del self.weight
def forward(self, x):
n, d = x.size()
if self.expansion > 1:
x = x.repeat(1, self.expansion)
self.dct = self.dct.to(self.A.device)
AC = self.A*self.dct
self.idct = self.idct.to(self.D.device)
DC = self.D*self.idct
if self.original:
ACDC = torch.matmul(AC,DC)
else:
ACDC = torch.matmul(self.riffle(AC),DC)
self.weight = ACDC.t() # monkey patch
return super(LinearACDC, self).forward(x)
def kernel_matrix_to_weights(W, c_out, c_in, k):
"""Maps to 4D weight tensor from the kernel matrix used in im2col."""
assert k == 1 # yeah this function is quite pointless now
return W.view(c_out, c_in, k, k)
class ConvACDC(nn.Conv2d):
"""Implements an ACDC convolutional layer by replacing the weights in a
convolutional layer with the effective weights of an ACDC layer. After
replacing the weights it operates precisely like a convolutional layer."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, original=False):
assert out_channels >= in_channels, "channels: %i must be greater than %i"%(out_channels, in_channels)
assert out_channels%in_channels == 0
assert bias == False # likely to accidentally set this and break things
assert groups == 1
self.expansion = out_channels//in_channels
if kernel_size == 1:
super(ConvACDC, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif kernel_size > 1:
super(ConvACDC, self).__init__(out_channels, out_channels, 1,
groups=1, bias=bias)
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.riffle = Riffle()
self.original = original
def reset_parameters(self):
super(ConvACDC, self).reset_parameters()
# this is probably not a good way to do this
assert self.kernel_size[0] == self.kernel_size[1], "%s"%self.kernel_size
N = self.out_channels
if 'A' not in self.__dict__.keys():
self.A = nn.Parameter(torch.Tensor(N, 1))
self.D = nn.Parameter(torch.Tensor(N, 1))
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
# initialise DCT matrices
self.dct = dct.dct(torch.eye(N))
self.idct = dct.idct(torch.eye(N))
# remove weight Parameter
del self.weight
def acdc(self, device):
k = self.kernel_size[0]
c_out = self.out_channels
# check our stored DCT matrices are on the right device
if self.dct.device != device:
self.dct = self.dct.to(device)
self.idct = self.idct.to(device)
AC = self.A*self.dct
DC = self.D*self.idct
if self.original:
return torch.matmul(AC, DC)
else:
return torch.matmul(self.riffle(AC), DC)
def forward(self, x):
if hasattr(self, 'grouped'):
x = self.grouped(x)
n, c_in, h, w = x.size()
k = self.kernel_size[0]
c_in, c_out = self.in_channels, self.out_channels
if self.expansion > 1:
x = x.repeat(1, self.expansion, 1, 1)
ACDC = self.acdc(x.device)
self.weight = kernel_matrix_to_weights(ACDC, c_out, c_in, k)
return super(ConvACDC, self).forward(x)
class Riffle(nn.Module):
def forward(self, x):
# based on shufflenet shuffle
# and https://en.wikipedia.org/wiki/Shuffling#Riffle
dim = x.dim()
if dim == 2:
n, d = x.data.size()
assert d%2 == 0, "dim must be even, was %i"%d
groups = d//2
x = x.view(n, groups, 2).permute(0,2,1).contiguous()
return x.view(n, d)
elif dim == 4:
N,C,H,W = x.size()
g = 2
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
else:
raise ValueError("Shape of x not supported: %s"%x.size())
class Permute(nn.Module):
"""Assuming 2d input, permutes along last dimension using a fixed
permutation."""
def __init__(self, d):
self.d = d
super(Permute, self).__init__()
self.reset_parameters()
def reset_parameters(self):
self.permute_idxs = torch.randperm(self.d)
def to(self, device):
self.permute_idxs.to(device)
super(Permute, self).to(device)
def forward(self, x):
return x[:,self.permute_idxs]
class PackGroups(nn.Module):
def __init__(self, groups):
super(PackGroups, self).__init__()
self.groups = groups
def forward(self, x):
n, d = x.size()
return x.view(n*self.groups, d//self.groups)
class UnPackGroups(nn.Module):
def __init__(self, groups):
super(UnPackGroups, self).__init__()
self.groups = groups
def forward(self, x):
n, d = x.size()
return x.view(n//self.groups, d*self.groups)
class PadLinearTo(nn.Linear):
"""Pad by concatenating a linear layer."""
def __init__(self, input_features, to):
super(PadLinearTo, self).__init__(input_features, to-input_features, bias=False)
def forward(self, x):
pad = super(PadLinearTo, self).forward(x)
return torch.cat([x, pad], 1)
class DropLinearTo(nn.Linear):
"""Drop dimensions after providing shortcut by Linear Layer. Not expecting
to use this much."""
def __init__(self, input_features, to):
super(DropLinearTo, self).__init__(input_features-to, to, bias=False)
self.to = to
def forward(self, x):
#residual = super(DropLinearTo, self).forward(x[:,self.to:])
return x[:, :self.to] #+ residual
class StackedACDC(nn.Module):
"""
A series of ACDC layers, with batchnorm, relu and riffle shuffles in between.
Input is divided into groups, groups are rounded to nearest power of 2 and
padding or dropping groups is used to map between different input sizes.
"""
def __init__(self, in_features, out_features, n_layers, groups=1):
super(StackedACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
self.n_layers = n_layers
# for non-matching input/output sizes
if in_features != out_features:
# nearest power of 2 in input groups
group_size = 2**(math.ceil(math.log(in_features//groups,2)))
# number of groups we'll need at output (minimum)
n_groups_out = math.ceil(float(out_features)/group_size)
# how many more is this than we start with?
n_groups_in = math.ceil(float(in_features)/group_size)
n_groups_diff = n_groups_out - n_groups_in
# evenly spread the steps in groups over the number of layers we have
steps = [n_groups_in+round(n_groups_diff*i/float(n_layers+1))
for i in range(1,n_layers+1)]
# steps in dimensionality
dim_steps = [group_size*s for s in steps]
else:
dim_steps = [in_features]*n_layers
layers = []
d = in_features
for n, d_to in zip(range(n_layers), dim_steps):
if d_to > d:
layers.append(PadLinearTo(d, d_to))
elif d_to < d:
layers.append(DropLinearTo(d, d_to))
d = d_to
acdc = ACDC(d, d, groups=groups, bias=False)
#bn = nn.BatchNorm1d(d, affine=False)
riffle = Riffle()
#relu = nn.ReLU()
layers += [acdc, riffle]
# remove the last relu
#_ = layers.pop(-1)
if self.out_features < d:
layers.append(DropLinearTo(d, self.out_features))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class StackedLinearACDC(nn.Module):
def __init__(self, in_features, out_features, n_layers, bias=False,
original=False):
super(StackedLinearACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
assert out_features%in_features == 0
self.n_layers = n_layers
layers = []
d = in_features
for n in range(n_layers):
acdc = LinearACDC(d, out_features,
bias=False if n < n_layers-1 else bias, original=original)
d = out_features
permute = Riffle()
relu = nn.ReLU()
layers += [acdc, permute]
# remove the last relu
# _ = layers.pop(-1)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class StackedConvACDC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, n_layers, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(StackedConvACDC, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
assert out_channels%in_channels == 0
self.n_layers = n_layers
layers = []
d = in_channels
for n in range(n_layers):
acdc = ConvACDC(d, out_channels, kernel_size,
stride=stride if n==0 else 1, padding=padding,
dilation=dilation, groups=groups, bias=bias)
d = out_channels
permute = Riffle()
relu = nn.ReLU()
layers += [acdc, permute, relu]
# remove the last relu
_ = layers.pop(-1)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ChannelContract(nn.Module):
def __init__(self, in_channels, out_channels):
super(ChannelContract, self).__init__()
assert in_channels%out_channels == 0, \
f"{in_channels} not divisible by {out_channels}"
self.in_channels = in_channels
self.out_channels = out_channels
def forward(self, x):
n, c, h, w = x.size()
f = self.in_channels//self.out_channels
x = x.view(n, c//f, f, h, w)
return x.sum(2)
class ChannelExpand(nn.Module):
"""Concatenate channels to expand by `c_to_add` channels."""
def __init__(self, c_to_add):
super(ChannelExpand, self).__init__()
self.c = c_to_add
def forward(self,x):
x_add = x[:,:self.c,:,:]
return torch.cat([x,x_add],1)
def acdc_kernelmat(A, D, dct, idct, riffle, device):
# check our stored DCT matrices are on the right device
if dct.device != device:
dct = dct.to(device)
idct = idct.to(device)
AC = A*dct
DC = D*idct
return torch.matmul(riffle(AC), DC)
def create_kernelmat_function(device, layers, permute):
# only keep the dct and idct from the first layer
dct, idct = layers[0].dct, layers[0].idct
for layer in layers[1:]:
del layer.dct
del layer.idct
# args for function will be the differentiable parameters
kmat_args = [param for layer in layers for param in [layer.A, layer.D]]
def kernelmat(*args):
# pair args to match with layers
params = [(A,D) for A,D in zip(args[:-1:2], args[1::2])]
# iterate and build component ACDC matrices
acdcs = []
for (A,D), layer in zip(params, layers):
acdcs.append(acdc_kernelmat(A, D, dct, idct,
layer.riffle, device))
# riffle them all
acdcs = [permute(ACDC) for ACDC in acdcs]
# and combine them
return reduce(torch.matmul, acdcs)
return kernelmat, kmat_args
class FastStackedConvACDC(nn.Conv2d):
"""A Stacked ACDC layer that just combines all of the weight marices of all
of the layers in the stack before implementing the layer with a
convolution. This means that there is no ReLUs in it, though, which may
hinder representational capacity."""
def __init__(self, in_channels, out_channels, kernel_size, n_layers,
stride=1, padding=0, dilation=1, groups=1, bias=True,
original=False, checkpoint=False):
self.n_layers = n_layers
if kernel_size == 1:
super(FastStackedConvACDC, self).__init__(in_channels,
out_channels, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif kernel_size > 1:
assert groups == 1
super(FastStackedConvACDC, self).__init__(in_channels,
out_channels, 1, bias=bias)
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
if out_channels > in_channels:
add_channels = 0
while out_channels%(in_channels+add_channels) != 0:
add_channels += 1
self.expand_channels = ChannelExpand(add_channels)
self.in_channels += add_channels
in_channels = self.in_channels
else:
self.expand_channels = lambda x: x
self.expansion = out_channels//in_channels
layers = []
for n in range(n_layers):
channels = max(out_channels, in_channels)
acdc = ConvACDC(channels, channels, 1, bias=bias,
original=original)
layers += [acdc]
# remove the last relu
self.permute = Riffle()
_ = layers.pop(-1)
self.layers = nn.Sequential(*layers)
if out_channels < in_channels:
self.collapse = ChannelContract(in_channels, out_channels)
else:
self.collapse = lambda x: x
# for checkpointing
self.checkpoint = True
def reset_parameters(self):
del self.weight
def forward(self, x):
if hasattr(self, 'grouped'):
x = self.grouped(x)
x = self.expand_channels(x)
if self.expansion > 1:
x = x.repeat(1, self.expansion, 1, 1)
k = self.kernel_size[0]
c = max(self.out_channels, self.in_channels)
if not self.checkpoint:
# gather ACDC matrices from each layer
acdcs = [layer.acdc(x.device) for layer in self.layers]
# riffle them all
acdcs = [self.permute(ACDC) for ACDC in acdcs]
# and combine them
ACDC = reduce(torch.matmul, acdcs)
else:
if not hasattr(self, 'combining_function'):
self.combining_function, self.args = create_kernelmat_function(x.device, self.layers, self.permute)
ACDC = checkpoint(self.combining_function, *self.args)
self.weight = kernel_matrix_to_weights(ACDC, c, c, k)
return self.collapse(super(FastStackedConvACDC, self).forward(x))
if __name__ == '__main__':
# check ConvACDC
x = torch.Tensor(16,128,8,8)
x.normal_(0,1)
conv_acdc = ConvACDC(128,128,3)
assert not hasattr(conv_acdc, 'weight')
param_names = [n for n,p in conv_acdc.named_parameters()]
assert 'weight' not in param_names, param_names
_ = conv_acdc(x)
param_names = [n for n,p in conv_acdc.named_parameters()]
assert 'weight' not in param_names, param_names
x = torch.Tensor(128,200)
x.normal_(0,1)
acdc = ACDC(200,200,bias=False)
y = x
for i in range(10):
y = acdc(y)
print(y.mean()) # tends to zero?
print(torch.sqrt(y.var(1)).mean(0)) # doesn't tend to one? not good
# check sanity of LinearACDC
lin_acdc = LinearACDC(200,200)
lin_acdc.A.data.fill_(1.)
lin_acdc.D.data.fill_(1.)
acdc.A.data.fill_(1.)
acdc.D.data.fill_(1.)
error = torch.abs(acdc(x) - lin_acdc(x)).max()
print("LienarACDC error", error.item())
assert error < 1e-3
acdc = StackedACDC(200,400,12, groups=4)
y = x
y = acdc(y)
print(y.mean()) # tends to zero?
print(torch.sqrt(y.var(1)).mean(0)) # doesn't tend to one? not good
print(y.size())
# speed test
import timeit
setup = "from __main__ import ACDC,LinearACDC; import torch; x = torch.Tensor(1000,4096); model = {0}(4096,4096); model = model.to('cuda').eval(); x = x.to('cuda'); x.normal_(0,1)"
print("Linear: ", timeit.timeit("_ = model(x)", setup=setup.format("torch.nn.Linear"), number=100))
print("ACDC: ", timeit.timeit("_ = model(x)", setup=setup.format("ACDC"), number=100))
print("Linear ACDC: ", timeit.timeit("_ = model(x)", setup=setup.format("LinearACDC"), number=100))
setup = "from __main__ import StackedConvACDC,FastStackedConvACDC; import torch; x = torch.Tensor(100,256,4,4); model = {0}(256,256,1,12,bias=False); model=model.to('cuda').eval(); x = x.to('cuda'); x.normal_(0,1)"
print("StackedConvACDC: ", timeit.timeit("_ = model(x)", setup=setup.format("StackedConvACDC"), number=100))
print("FastStackedConvACDC: ", timeit.timeit("_ = model(x)", setup=setup.format("FastStackedConvACDC"), number=100))
|
import random
from cpu.cpu import Cpu
from game.transforms import Board
from .services import find_winning_position
# Author: Xavier
class Experimental(Cpu):
def name(self):
return 'Xavier''s AI 1.0'
def play(self, board: Board, player_side, opposing_side):
winning_position = find_winning_position(board, player_side)
if winning_position is not None:
return winning_position
losing_position = find_winning_position(board, opposing_side)
if losing_position is not None:
return losing_position
# board is empty
if board.played_moves_count() == 0:
return 0
if board.played_moves_count() == 2:
if board.data[8] == ' ':
return 8
else:
return 6
if board.played_moves_count() == 4:
if board.data[0] == player_side and board.data[6] == player_side:
return 2
else:
raise RuntimeError("Not reachable")
if board.played_moves_count() == 1:
if board.data[4] == opposing_side:
return 8
else:
return 4
if board.played_moves_count() == 3:
if board.data[0] == opposing_side and board.data[8] == opposing_side:
return 1
if board.data[2] == opposing_side and board.data[6] == opposing_side:
return 1
if board.data[2] == opposing_side and board.data[6] == opposing_side:
return 1
if board.data[4] == opposing_side and board.data[0] == opposing_side:
return 2
if board.data[1] == opposing_side and board.data[5] == opposing_side:
return 2
if board.data[1] == opposing_side and board.data[3] == opposing_side:
return 0
if board.data[5] == opposing_side and board.data[7] == opposing_side:
return 8
if board.data[3] == opposing_side and board.data[7] == opposing_side:
return 6
if board.data[1] == opposing_side and board.data[8] == opposing_side:
return 2
if board.data[2] == opposing_side and board.data[7] == opposing_side:
return 8
if board.data[2] == opposing_side and board.data[3] == opposing_side:
return 0
if board.data[0] == opposing_side and board.data[5] == opposing_side:
return 2
if board.data[1] == opposing_side and board.data[6] == opposing_side:
return 0
if board.data[0] == opposing_side and board.data[7] == opposing_side:
return 6
if board.data[3] == opposing_side and board.data[8] == opposing_side:
return 6
if board.data[6] == opposing_side and board.data[5] == opposing_side:
return 8
if board.played_moves_count() == 5:
if board.data[1] == opposing_side and board.data[3] == opposing_side and board.data[0] == ' ':
return 0
if board.data[1] == opposing_side and board.data[5] == opposing_side and board.data[2] == ' ':
return 2
if board.data[3] == opposing_side and board.data[7] == opposing_side and board.data[6] == ' ':
return 6
if board.data[5] == opposing_side and board.data[7] == opposing_side and board.data[8] == ' ':
return 8
possible_plays = []
for i in range(9):
if board.data[i] == ' ':
possible_plays.append(i)
return random.choice(possible_plays)
|
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/minimum-window-substring/description/
class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
wi, wj = None, None
ds, dt = {}, {}
for c in t:
ds[c], dt[c] = 0, dt.get(c, 0) + 1
count, i = 0, 0
for j, c in enumerate(s):
if c not in dt:
continue
ds[c] += 1
if ds[c] == dt[c]:
count += 1
if count < len(dt):
continue
while i <= j and ds.get(s[i], 0) > dt.get(s[i], -1):
if s[i] in ds:
ds[s[i]] -= 1
i += 1
if wi is None or j - i < wj - wi:
wi, wj = i, j
count -= 1
ds[s[i]] -= 1
i += 1
return '' if wi is None else s[wi:wj+1] |
from django.urls import path
from . import views
urlpatterns = [
path('', views.account_login, name="account_login"),
path('register/', views.account_register, name="account_register"),
path('logout/', views.account_logout, name="account_logout"),
]
|
import unittest
import random
from LinkedList import SinglyLinkedList
RANDMAX = 100
'''
ATTRIBUTES:-
self.test_list -> A python list which contains the elements in order
self.test_SinglyLinkedList -> The actual Singly Linked List Object to do operations
test_insert -> A random test element to insert in the List
test_insert_pos -> A random position to insert the test element at
APPROACH:-
Similar operation is done in both the self.test_list and self.test_SinglyLinkedList
Then they are asserted to equal.
At certain test-cases, warnings are tested: The test case pases when the warning is correctly raised
'''
class TestInsert(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestInsert, self).__init__(*args, **kwargs)
self.test_list = []
self.test_SinglyLinkedList = SinglyLinkedList()
# self.test_list = random.sample(range(0, RANDMAX), 7) # TODO: change this randomizing parameters
def test_InsertAtBeginning_once(self):
test_insert = random.randint(0, RANDMAX)
self.test_list.insert(0, test_insert)
self.test_SinglyLinkedList.insertAtBeginning(test_insert)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
def test_InsertAtBeginning_multiple(self):
for _ in range(random.randint(0, RANDMAX//2)):
test_insert = random.randint(0, RANDMAX)
self.test_list.insert(0, test_insert)
self.test_SinglyLinkedList.insertAtBeginning(test_insert)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
def test_insertAtEnd_once(self):
test_insert = random.randint(0, RANDMAX)
self.test_list.append(test_insert)
self.test_SinglyLinkedList.insertAtEnd(test_insert)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
def test_insertAtEnd_multiple(self):
for _ in range(random.randint(0, RANDMAX//2)):
test_insert = random.randint(0, RANDMAX)
self.test_list.append(test_insert)
self.test_SinglyLinkedList.insertAtEnd(test_insert)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
def test_insertAt0_once(self):
test_insert = random.randint(0, RANDMAX)
self.test_list.append(test_insert)
self.test_SinglyLinkedList.insertAt(test_insert, 0)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
def test_insertAt_warning(self):
test_insert = random.randint(0, RANDMAX)
test_insert_pos = random.randint(1, RANDMAX)
with self.assertWarns(RuntimeWarning):
self.test_SinglyLinkedList.insertAt(test_insert, test_insert_pos)
def test_insertAt_random(self):
for i in range(random.randint(0, RANDMAX//2)):
test_insert = random.randint(0, RANDMAX)
test_insert_pos = random.randint(0, i)
self.test_list.insert(test_insert_pos, test_insert)
self.test_SinglyLinkedList.insertAt(test_insert, test_insert_pos)
self.assertEqual(
list(self.test_SinglyLinkedList.toList()), self.test_list)
class TestDelete(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestInsert, self).__init__(*args, **kwargs)
self.test_list = random.sample(range(0, RANDMAX), RANDMAX//2)
self.test_SinglyLinkedList = SinglyLinkedList()
for i in self.test_list:
self.test_SinglyLinkedList.insertAtEnd(i)
if __name__ == '__main__':
unittest.main()
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class FunctionController(BaseController):
class Meta:
label = 'function'
help = '京东云function接口'
description = '''
function cli 子命令,function相关接口。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/function-service/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 别名所属函数名称 """, dest='functionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询别名列表 ''',
description='''
查询别名列表。
示例: jdc function list-alias --function-name xxx
''',
)
def list_alias(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.ListAliasRequest import ListAliasRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ListAliasRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 别名所属函数名称 """, dest='functionName', required=True)),
(['--alias-name'], dict(help="""(string) 别名名称 """, dest='aliasName', required=True)),
(['--description'], dict(help="""(string) 别名描述信息 """, dest='description', required=False)),
(['--version'], dict(help="""(string) 别名对应的版本名称 """, dest='version', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建别名 ''',
description='''
创建别名。
示例: jdc function create-alias --function-name xxx --alias-name xxx
''',
)
def create_alias(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.CreateAliasRequest import CreateAliasRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateAliasRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 别名所属函数名称 """, dest='functionName', required=True)),
(['--alias-name'], dict(help="""(string) 别名名称 """, dest='aliasName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询别名详情 ''',
description='''
查询别名详情。
示例: jdc function get-alias --function-name xxx --alias-name xxx
''',
)
def get_alias(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.GetAliasRequest import GetAliasRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetAliasRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 别名所属函数名称 """, dest='functionName', required=True)),
(['--alias-name'], dict(help="""(string) 别名名称 """, dest='aliasName', required=True)),
(['--description'], dict(help="""(string) 别名描述信息 """, dest='description', required=True)),
(['--version'], dict(help="""(string) 别名对应版本 """, dest='version', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 更新别名 ''',
description='''
更新别名。
示例: jdc function update-alias --function-name xxx --alias-name xxx --description xxx --version xxx
''',
)
def update_alias(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.UpdateAliasRequest import UpdateAliasRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = UpdateAliasRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 别名所属函数名称 """, dest='functionName', required=True)),
(['--alias-name'], dict(help="""(string) 别名名称 """, dest='aliasName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除别名 ''',
description='''
删除别名。
示例: jdc function delete-alias --function-name xxx --alias-name xxx
''',
)
def delete_alias(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.DeleteAliasRequest import DeleteAliasRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteAliasRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--list-all'], dict(help="""(bool) 是否返回所有函数 """, dest='listAll', required=True)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小 """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) functionId -函数ID,精确匹配,支持多个; functionName - 函数名称,模糊匹配,支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询函数列表 ''',
description='''
查询函数列表。
示例: jdc function list-function --list-all true
''',
)
def list_function(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.ListFunctionRequest import ListFunctionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ListFunctionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--name'], dict(help="""(string) 函数名称 """, dest='name', required=False)),
(['--description'], dict(help="""(string) 函数描述信息 """, dest='description', required=False)),
(['--entrance'], dict(help="""(string) 函数入口,格式为入口文件.入口函数名 """, dest='entrance', required=False)),
(['--memory'], dict(help="""(int) 函数运行最大内存 """, dest='memory', type=int, required=False)),
(['--run-time'], dict(help="""(string) 函数运行环境 """, dest='runTime', required=False)),
(['--over-time'], dict(help="""(int) 函数运行超时时间 """, dest='overTime', type=int, required=False)),
(['--version'], dict(help="""(string) 函数版本,默认为LATEST """, dest='version', required=False)),
(['--code'], dict(help="""(code) 函数代码包 """, dest='code', required=False)),
(['--environment'], dict(help="""(env) 函数运行时环境变量 """, dest='environment', required=False)),
(['--log-set-id'], dict(help="""(string) 函数指定的日志集Id """, dest='logSetId', required=False)),
(['--log-topic-id'], dict(help="""(string) 函数指定的日志主题Id """, dest='logTopicId', required=False)),
(['--vpc-id'], dict(help="""(string) 函数配置的VPCId """, dest='vpcId', required=False)),
(['--subnet-id'], dict(help="""(string) 函数配置的子网Id """, dest='subnetId', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建函数 ''',
description='''
创建函数。
示例: jdc function create-function
''',
)
def create_function(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.CreateFunctionRequest import CreateFunctionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateFunctionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询函数详情 ''',
description='''
查询函数详情。
示例: jdc function get-function --function-name xxx
''',
)
def get_function(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.GetFunctionRequest import GetFunctionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetFunctionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--description'], dict(help="""(string) 函数描述信息 """, dest='description', required=False)),
(['--entrance'], dict(help="""(string) 函数入口,格式为入口文件.入口函数名 """, dest='entrance', required=False)),
(['--memory'], dict(help="""(int) 函数运行最大内存 """, dest='memory', type=int, required=False)),
(['--run-time'], dict(help="""(string) 函数运行环境 """, dest='runTime', required=False)),
(['--over-time'], dict(help="""(int) 函数运行超时时间 """, dest='overTime', type=int, required=False)),
(['--version'], dict(help="""(string) 函数版本 """, dest='version', required=False)),
(['--code'], dict(help="""(code) 函数代码包 """, dest='code', required=False)),
(['--environment'], dict(help="""(env) 函数运行时环境变量 """, dest='environment', required=False)),
(['--log-set-id'], dict(help="""(string) 函数指定的日志集Id """, dest='logSetId', required=False)),
(['--log-topic-id'], dict(help="""(string) 函数指定的日志主题Id """, dest='logTopicId', required=False)),
(['--vpc-id'], dict(help="""(string) 函数配置的VPCId """, dest='vpcId', required=False)),
(['--subnet-id'], dict(help="""(string) 函数配置的子网Id """, dest='subnetId', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 更新函数 ''',
description='''
更新函数。
示例: jdc function update-function --function-name xxx
''',
)
def update_function(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.UpdateFunctionRequest import UpdateFunctionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = UpdateFunctionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除函数 ''',
description='''
删除函数。
示例: jdc function delete-function --function-name xxx
''',
)
def delete_function(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.DeleteFunctionRequest import DeleteFunctionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteFunctionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--version-name'], dict(help="""(string) 版本名称 """, dest='versionName', required=True)),
(['--event'], dict(help="""(string) 执行函数的输入事件 """, dest='event', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 控制台测试执行函数 ''',
description='''
控制台测试执行函数。
示例: jdc function invoke --function-name xxx --version-name xxx --event xxx
''',
)
def invoke(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.InvokeRequest import InvokeRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = InvokeRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--version-name'], dict(help="""(string) 版本名称 """, dest='versionName', required=True)),
(['--event'], dict(help="""(string) 异步执行函数的输入事件 """, dest='event', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 异步测试执行函数 ''',
description='''
异步测试执行函数。
示例: jdc function async-invoke --function-name xxx --version-name xxx --event xxx
''',
)
def async_invoke(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.AsyncInvokeRequest import AsyncInvokeRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AsyncInvokeRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--version-name'], dict(help="""(string) 版本名称 """, dest='versionName', required=True)),
(['--trigger-id'], dict(help="""(string) 触发器Id """, dest='triggerId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 获取触发器详情 ''',
description='''
获取触发器详情。
示例: jdc function get-trigger --function-name xxx --version-name xxx --trigger-id xxx
''',
)
def get_trigger(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.GetTriggerRequest import GetTriggerRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetTriggerRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询版本列表 ''',
description='''
查询版本列表。
示例: jdc function list-version --function-name xxx
''',
)
def list_version(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.ListVersionRequest import ListVersionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ListVersionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--description'], dict(help="""(string) 版本描述 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建版本 ''',
description='''
创建版本。
示例: jdc function create-version --function-name xxx
''',
)
def create_version(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.CreateVersionRequest import CreateVersionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateVersionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--version-name'], dict(help="""(string) 版本名称 """, dest='versionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 获取版本详情 ''',
description='''
获取版本详情。
示例: jdc function get-version --function-name xxx --version-name xxx
''',
)
def get_version(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.GetVersionRequest import GetVersionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetVersionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--function-name'], dict(help="""(string) 函数名称 """, dest='functionName', required=True)),
(['--version-name'], dict(help="""(string) 版本名称 """, dest='versionName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除版本 ''',
description='''
删除版本。
示例: jdc function delete-version --function-name xxx --version-name xxx
''',
)
def delete_version(self):
client_factory = ClientFactory('function')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.function.apis.DeleteVersionRequest import DeleteVersionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteVersionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['list-alias','create-alias','get-alias','update-alias','delete-alias','list-function','create-function','get-function','update-function','delete-function','invoke','async-invoke','get-trigger','list-version','create-version','get-version','delete-version',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('function', self.app.pargs.api)
skeleton.show()
|
from RNA_describe import *
from ORF_eliminator import *
from RNA_gen import *
class Simulator:
def get_random_sequence(self):
'''
This is Professor Miller's class that generates a RNA sequence
without any restriction.
'''
generator = Collection_Generator()
sequence_list = generator.get_sequences() #Generates sequence in list forrmat
sequences =''.join( sequence_list)
#print('The sequence below is generated from Collection generator. It may or maynot have a ORF')
return sequences
def get_sequence_no_orf(self,rna):
'''
This function uses ORF_eliminator.
ORF_eliminator is a class that kills any orfs created in random sequence.
For example, 'ATGAAATAG' has a orf of length 9. My program kills the orf
by replacing 'T' in stop codon ('TAG'in the example above) repeatedly
unless we get orf length =0.
There are two methods that i implemented. However, the first is more
optimized.
'''
eliminator = ORF_eliminator()
seq_with_no_orf = eliminator.eliminate_ORF(rna)
#print(' \n The sequence below doesnot have any orf for sure')
return seq_with_no_orf
def get_lengths_codons(self,rna):
'''
This functions are meant for statistical purposes.
It will employ function which are found in RNA_describe.
This calculates the number of start and stop codons.
'''
counter = ORF_RE()
number_of_codons = counter.get_codon_number(rna)
return number_of_codons
def get_number_bases(self, rna):
'''
This functions are meant for statistical purposes.
It will employ function which are found in RNA_describe.
This calculates the number of 'A', 'C', 'T', 'G'.
'''
counter = ORF_RE()
number_of_bases = counter.get_number_bases(rna)
return number_of_bases
if __name__ == "__main__":
simulator = Simulator()
random_RNA = simulator.get_random_sequence()
print('Random sequence generated from Collection_Generator')
print(random_RNA)#Prints the random sequence generated by Collection_Genenator
start = simulator.get_lengths_codons(random_RNA)[0]
stop = simulator.get_lengths_codons(random_RNA)[1]
a = simulator.get_number_bases(random_RNA)[0]
c = simulator.get_number_bases(random_RNA)[1]
t = simulator.get_number_bases(random_RNA)[2]
g = simulator.get_number_bases(random_RNA)[3]
print('THere are {} Start codons and {} Stop codons. '.format(start,stop))
print('There are {} a, {} c, {} t, {} g ' . format(a,c,t,g))
print('\n \n Sequence with no ORF from ORF_eliminator')
rna = simulator.get_sequence_no_orf(random_RNA)
start = simulator.get_lengths_codons(rna)[0]
stop = simulator.get_lengths_codons(rna)[1]
a = simulator.get_number_bases(rna)[0]
c = simulator.get_number_bases(rna)[1]
t = simulator.get_number_bases(rna)[2]
g = simulator.get_number_bases(rna)[3]
print('THere are {} Start codons and {} Stop codons. '.format(start,stop))
print('There are {} a, {} c, {} t, {} g ' . format(a,c,t,g))
print(rna)
|
import re
import time
import praw
from prawcore.exceptions import PrawcoreException
from database import TaskerNetDatabase
from utils import TASKERNET_RE, PRAW_SITE_NAME, MONITORED_SUBREDDITS
reddit = praw.Reddit(PRAW_SITE_NAME)
subreddit = reddit.subreddit(MONITORED_SUBREDDITS)
db = TaskerNetDatabase()
running = True
while running:
try:
for comment in subreddit.stream.comments():
if comment.author and comment.author.name != 'taskernet-collector':
taskernet_links = TASKERNET_RE.findall(comment.body)
source_link = f'https://reddit.com/comments/{comment.link_id[3:]}/_/{comment.id}'
for link in taskernet_links:
db.add_share(link, source_link)
except KeyboardInterrupt:
print('Ending now')
running = False
except PrawcoreException:
time.sleep(15)
|
import json
import scipy.io as scio
def pa_out(name_data, label_data, attributes_dict):
# 筛选列表
stander_list_upper_wear = ["ShortSleeve", "LongSleeve", "LongCoat"]
stander_list_lower_wear = ["Trousers", "Shorts", "Skirt&Dress"]
stander_list_upperbodycolor = []
stander_list_lowerbodycolor = []
stander_list_hat = ["Hat"]
stander_list_bag = ["HandBag", "ShoulderBag"]
stander_list_footwear = ["boots"]
images_dict = {}
for i in range(len(name_data)): # TODO 去掉范围限制[0:3]
label_dict = {"clothing": "Null", "upper_color": "Null", "lower_color": "Null", "headwear": "Null", "bag": "Null", "footwear": "Null"}
# 图片名字
images_name = name_data[i][0][0][:-4]
images_dict[images_name] = label_dict
# 图片属性
label = label_data[i]
label_dict["clothing"] = []
for i in range(len(label)):
# if label[i] == 1:
# print(i, attributes_dict[i])
# clothing
if label[i] == 1 and attributes_dict[i] in stander_list_upper_wear + stander_list_lower_wear:
label_dict["clothing"].append(attributes_dict[i])
# upper_color
elif label[i] == 1 and attributes_dict[i] in stander_list_upperbodycolor:
label_dict["upper_color"] = []
label_dict["upper_color"].append(attributes_dict[i])
# lower_color
elif label[i] == 1 and attributes_dict[i] in stander_list_lowerbodycolor:
label_dict["lower_color"] = []
label_dict["lower_color"].append(attributes_dict[i])
# headwear
elif label[i] == 1 and attributes_dict[i] in stander_list_hat:
label_dict["headwear"] = []
label_dict["headwear"].append(attributes_dict[i])
# bag
elif label[i] == 1 and attributes_dict[i] in stander_list_bag:
label_dict["bag"] = []
label_dict["bag"].append(attributes_dict[i])
# footwear
elif label[i] == 1 and attributes_dict[i] in stander_list_footwear:
label_dict["footwear"] = []
label_dict["footwear"].append(attributes_dict[i])
# label_dict
# print(label_dict)
for lab in label_dict["clothing"]:
if lab in stander_list_upper_wear:
break
else:
label_dict["clothing"].append("upper_wear_null")
if lab in stander_list_lower_wear:
break
else:
label_dict["clothing"].append("lower_wear_null")
# print(images_list)
return images_dict
def pa_open_mat(datafile):
data = scio.loadmat(datafile)
# print(data.keys())
# 属性字典
attributes_dict = {}
for i in range(len(data["attributes"])):
attributes_dict[i] = data["attributes"][i][0][0]
# print(attributes_dict)
# test_images
name_data = data["test_images_name"]
label_data = data["test_label"]
test_images_dict = pa_out(name_data, label_data, attributes_dict)
# train_images
name_data = data["train_images_name"]
label_data = data["train_label"]
train_images_dict = pa_out(name_data, label_data, attributes_dict)
# val_images
name_data = data["val_images_name"]
label_data = data["val_label"]
val_images_dict = pa_out(name_data, label_data, attributes_dict)
# 结果(为了输出顺序)
images_dict = {}
images_dict.update(train_images_dict)
images_dict.update(val_images_dict)
images_dict.update(test_images_dict)
# print(images_list)
images_dict = json.dumps(images_dict)
return images_dict
|
"""
Convert RUSA Perms report CSV to
the CSV format we'll use for mapping.
Besides selecting and renaming columns, the
transformations are:
* Inactive routes are omitted
* If a route is reversible AND point-to-point,
it is duplicated with from and two locations
reversed
"""
import csv
import sys
import schemata
schema_in = schemata.rusa_report
schema_out = schemata.rusa_snarf
def process(record, output):
"""
Row is in form of a dict, to minimize dependence on schema,
although we still depend on the fields we need being present.
Output is also through a dictwriter, driven by the schema. If
we have not matched the schema.
"""
if not record["Active?"]:
return
outrow = { }
outrow["Perm_id"] = record["Route #"]
outrow["State"] = record["Start State"].strip()
outrow["City"] = record["Start City"].strip()
outrow["Perm_km"] = record["Distance"]
name_parts = record["Route name"].split(":")
outrow["Perm_name"] = name_parts[1].strip()
outrow["Perm_owner"] = record["Owner Name"]
outrow["Perm_notes"] = extract_notes(record).strip()
outrow["Perm_states"] = record["Within State(s)"]
output.writerow(outrow)
## Also the reversed route?
if record["Type"] == "PP" and record["Reversible?"] == "Y":
outrow["State"] = record["End State"].strip()
outrow["City"] = record["End City"].strip()
outrow["Perm_notes"] = extract_notes(record, reverse=True)
output.writerow(outrow)
def extract_notes(record, reverse=False):
"""
Synthesize notes from record fields.
"""
notes = ""
sep = ""
if record["Type"] == "PP":
if reverse:
notes += "(Reversed) to {}, {}".format(
record["Start City"], record["Start State"])
else:
notes += "to {}, {}".format(
record["End City"], record["End State"])
sep = "; "
if record["Free-route?"] == "yes":
notes += sep + "Free-route"
sep = ";"
if record["Super Randonnée?"] == "yes":
notes += sep + "Super randonnée"
sep = ";"
return notes
def main(reader, writer):
writer.writeheader()
for row in reader:
process(row,writer)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Prepare RUSA CSV report")
parser.add_argument('input', help="PermReport in CSV form directly from RUSA",
type=argparse.FileType('r', encoding="utf-8", errors="replace"),
nargs="?", default=sys.stdin)
parser.add_argument('output', help="Output CSV in the format accepted by add_latlon",
type=argparse.FileType('w'),
nargs="?", default=sys.stdout)
args = parser.parse_args()
reader = csv.DictReader(args.input)
writer = csv.DictWriter(args.output, schema_out)
main(reader,writer)
args.output.close()
|
# kytten/dialog.py
# Copyrighted (C) 2009 by Conrad "Lynx" Wong
import pyglet
from pyglet import gl
from widgets import Widget, Control, Label
from button import Button
from frame import Wrapper, Frame
from layout import GetRelativePoint, ANCHOR_CENTER
from layout import VerticalLayout, HorizontalLayout
class DialogEventManager(Control):
def __init__(self):
"""
Creates a new event manager for a dialog.
@param content The Widget which we wrap
"""
Control.__init__(self)
self.controls = []
self.control_areas = {}
self.control_map = {}
self.hover = None
self.focus = None
self.wheel_hint = None
self.wheel_target = None
def get_value(self, id):
widget = self.get_widget(id)
if widget is not None:
return widget.get_value()
def get_values(self):
retval = {}
for widget in self.controls:
if widget.is_input() and widget.id is not None:
retval[widget.id] = widget.get_value()
return retval
def get_widget(self, id):
return self.control_map.get(id)
def hit_control(self, x, y, control):
left, right, top, bottom = self.control_areas[control]
if x >= left and x < right and y >= bottom and y < top:
return control.hit_test(x, y)
else:
return False
def on_key_press(self, symbol, modifiers):
"""
TAB and ENTER will move us between fields, holding shift will
reverse the direction of our iteration. We don't handle ESCAPE.
Otherwise, we pass keys to our child elements.
@param symbol Key pressed
@param modifiers Modifiers for key press
"""
if symbol in [pyglet.window.key.TAB, pyglet.window.key.ENTER]:
focusable = [x for x in self.controls
if x.is_focusable() and not x.is_disabled()]
if not focusable:
return
if modifiers & pyglet.window.key.MOD_SHIFT:
dir = -1
else:
dir = 1
if self.focus is not None and self.focus in focusable:
index = focusable.index(self.focus)
else:
index = 0 - dir
new_focus = focusable[(index + dir) % len(focusable)]
self.set_focus(new_focus)
new_focus.ensure_visible()
# If we hit ENTER, and wrapped back to the first focusable,
# pass the ENTER back so the Dialog can call its on_enter callback
if symbol != pyglet.window.key.ENTER or \
new_focus != focusable[0]:
return pyglet.event.EVENT_HANDLED
elif symbol != pyglet.window.key.ESCAPE:
if self.focus is not None and hasattr(self.focus, 'on_key_press'):
return self.focus.on_key_press(symbol, modifiers)
def on_key_release(self, symbol, modifiers):
"""Pass key release events to the focus
@param symbol Key released
@param modifiers Modifiers for key released
"""
if self.focus is not None and hasattr(self.focus, 'on_key_release'):
return self.focus.on_key_release(symbol, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""
Handles mouse dragging. If we have a focus, pass it in.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param dx Delta X
@param dy Delta Y
@param buttons Buttons held while moving
@param modifiers Modifiers to apply to buttons
"""
if self.focus is not None:
self.focus.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
return pyglet.event.EVENT_HANDLED
def on_mouse_motion(self, x, y, dx, dy):
"""
Handles mouse motion. We highlight controls that we are hovering
over.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param dx Delta X
@param dy Delta Y
"""
if self.hover is not None and not self.hit_control(x, y, self.hover):
self.hover.dispatch_event('on_mouse_motion', x, y, dx, dy)
new_hover = None
for control in self.controls:
if self.hit_control(x, y, control):
new_hover = control
break
self.set_hover(new_hover)
if self.hover is not None:
self.hover.dispatch_event('on_mouse_motion', x, y, dx, dy)
def on_mouse_press(self, x, y, button, modifiers):
"""
If the focus is set, and the target lies within the focus, pass the
message down. Otherwise, check if we need to assign a new focus.
If the mouse was pressed within our frame but no control was targeted,
we may be setting up to drag the Dialog around.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param button Button pressed
@param modifiers Modifiers to apply to button
"""
if self.focus is not None and self.hit_control(x, y, self.focus):
self.focus.dispatch_event('on_mouse_press',
x, y, button, modifiers)
return pyglet.event.EVENT_HANDLED
else:
if self.hit_test(x, y):
self.set_focus(self.hover)
if self.focus is not None:
self.focus.dispatch_event('on_mouse_press',
x, y, button, modifiers)
return pyglet.event.EVENT_HANDLED
else:
self.set_focus(None)
def on_mouse_release(self, x, y, button, modifiers):
"""
Button was released. We pass this along to the focus, then we
generate an on_mouse_motion to handle changing the highlighted
Control if necessary.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param button Button released
@param modifiers Modifiers to apply to button
"""
self.is_dragging = False
if self.focus is not None:
self.focus.dispatch_event('on_mouse_release',
x, y, button, modifiers)
DialogEventManager.on_mouse_motion(self, x, y, 0, 0)
return pyglet.event.EVENT_HANDLED
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
"""
Mousewheel was scrolled. See if we have a wheel target, or
failing that, a wheel hint.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param scroll_x Number of clicks horizontally mouse was moved
@param scroll_y Number of clicks vertically mouse was moved
"""
if self.wheel_target is not None and \
self.wheel_target in self.controls:
self.wheel_target.dispatch_event('on_mouse_scroll',
x, y, scroll_x, scroll_y)
return pyglet.event.EVENT_HANDLED
elif self.wheel_hint is not None and \
self.wheel_hint in self.controls:
self.wheel_hint.dispatch_event('on_mouse_scroll',
x, y, scroll_x, scroll_y)
return pyglet.event.EVENT_HANDLED
def on_text(self, text):
if self.focus and text != u'\r':
try:
return getattr(self.focus, 'on_text')(text)
except KeyError:
return pyglet.event.EVENT_UNHANDLED
def on_text_motion(self, motion):
if self.focus:
try:
return getattr(self.focus, 'on_text_motion')(motion)
except KeyError:
return pyglet.event.EVENT_UNHANDLED
def on_text_motion_select(self, motion):
if self.focus:
try:
return getattr(self.focus, 'on_text_motion_select')(motion)
except KeyError:
return pyglet.event.EVENT_UNHANDLED
def on_update(self, dt):
"""
We update our layout only when it's time to construct another frame.
Since we may receive several resize events within this time, this
ensures we don't resize too often.
@param dialog The Dialog containing the controls
@param dt Time passed since last update event (in seconds)
"""
for control in self.controls:
control.dispatch_event('on_update', dt)
def set_focus(self, focus):
"""
Sets a new focus, dispatching lose and gain focus events appropriately
@param focus The new focus, or None if no focus
"""
if self.focus == focus:
return
if self.focus is not None:
self.focus.dispatch_event('on_lose_focus')
self.focus = focus
if focus is not None:
focus.dispatch_event('on_gain_focus')
def set_hover(self, hover):
"""
Sets a new highlight, dispatching lose and gain highlight events
appropriately
@param hover The new highlight, or None if no highlight
"""
if self.hover == hover:
return
if self.hover is not None:
self.hover.dispatch_event('on_lose_highlight')
self.hover = hover
if hover is not None:
hover.dispatch_event('on_gain_highlight')
def set_wheel_hint(self, control):
self.wheel_hint = control
def set_wheel_target(self, control):
self.wheel_target = control
def teardown(self):
self.controls = []
self.control_map = {}
self.focus = None
self.hover = None
self.wheel_hint = None
self.wheel_target = None
def update_controls(self):
"""Update our list of controls which may respond to user input."""
controls = self._get_controls()
self.controls = []
self.control_areas = {}
self.control_map = {}
for control, left, right, top, bottom in controls:
self.controls.append(control)
self.control_areas[control] = (left, right, top, bottom)
if control.id is not None:
self.control_map[control.id] = control
if self.hover is not None and self.hover not in self.controls:
self.set_hover(None)
if self.focus is not None and self.focus not in self.controls:
self.set_focus(None)
kytten_next_dialog_order_id = 0
def GetNextDialogOrderId():
global kytten_next_dialog_order_id
kytten_next_dialog_order_id += 1
return kytten_next_dialog_order_id
class DialogGroup(pyglet.graphics.OrderedGroup):
"""
Ensure that all Widgets within a Dialog can be drawn with
blending enabled, and that our Dialog will be drawn in a particular
order relative to other Dialogs.
"""
def __init__(self, parent=None):
"""
Creates a new DialogGroup. By default we'll be on top.
@param parent Parent group
"""
pyglet.graphics.OrderedGroup.__init__(
self, GetNextDialogOrderId(), parent)
self.real_order = self.order
def __cmp__(self, other):
"""
When compared with other DialogGroups, we'll return our real order
compared against theirs; otherwise use the OrderedGroup comparison.
"""
if isinstance(other, DialogGroup):
return cmp(self.real_order, other.real_order)
else:
return OrderedGroup.__cmp__(self, other)
def is_on_top(self):
"""
Are we the top dialog group?
"""
global kytten_next_dialog_order_id
return self.real_order == kytten_next_dialog_order_id
def pop_to_top(self):
"""
Put us on top of other dialog groups.
"""
self.real_order = GetNextDialogOrderId()
def set_state(self):
"""
Ensure that blending is set.
"""
gl.glPushAttrib(gl.GL_ENABLE_BIT | gl.GL_CURRENT_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
def unset_state(self):
"""
Restore previous blending state.
"""
gl.glPopAttrib()
class Dialog(Wrapper, DialogEventManager):
"""
Defines a new GUI. By default it can contain only one element, but that
element can be a Layout of some kind which can contain multiple elements.
Pass a Theme in to set the graphic appearance of the Dialog.
The Dialog is always repositioned in relationship to the window, and
handles resize events accordingly.
"""
def __init__(self, content=None, window=None, batch=None, group=None,
anchor=ANCHOR_CENTER, offset=(0, 0), parent=None,
theme=None, movable=True, on_enter=None, on_escape=None):
"""
Creates a new dialog.
@param content The Widget which we wrap
@param window The window to which we belong; used to set the
mouse cursor when appropriate. If set, we will
add ourself to the window as a handler.
@param batch Batch in which we are to place our graphic elements;
may be None if we are to create our own Batch
@param group Group in which we are to place our graphic elements;
may be None
@param anchor Anchor point of the window, relative to which we
are positioned. If ANCHOR_TOP_LEFT is specified,
our top left corner will be aligned to the window's
top left corner; if ANCHOR_CENTER is specified,
our center will be aligned to the window's center,
and so forth.
@param offset Offset from the anchor point. A positive X is always
to the right, a positive Y to the upward direction.
@param theme The Theme which we are to use to generate our graphical
appearance.
@param movable True if the dialog is able to be moved
@param on_enter Callback for when user presses enter on the last
input within this dialog, i.e. form submit
@param on_escape Callback for when user presses escape
"""
assert isinstance(theme, dict)
Wrapper.__init__(self, content=content)
DialogEventManager.__init__(self)
self.window = window
self.anchor = anchor
self.offset = offset
self.theme = theme
self.is_movable = movable
self.on_enter = on_enter
self.on_escape = on_escape
if batch is None:
self.batch = pyglet.graphics.Batch()
self.own_batch = True
else:
self.batch = batch
self.own_batch = False
self.root_group = DialogGroup(parent=group)
self.panel_group = pyglet.graphics.OrderedGroup(0, self.root_group)
self.bg_group = pyglet.graphics.OrderedGroup(1, self.root_group)
self.fg_group = pyglet.graphics.OrderedGroup(2, self.root_group)
self.highlight_group = pyglet.graphics.OrderedGroup(3, self.root_group)
self.needs_layout = True
self.is_dragging = False
if window is None:
self.screen = Widget()
else:
width, height = window.get_size()
self.screen = Widget(width=width, height=height)
window.push_handlers(self)
def do_layout(self):
"""
We lay out the Dialog by first determining the size of all its
chlid Widgets, then laying ourself out relative to the parent window.
"""
# Determine size of all components
self.size(self)
# Calculate our position relative to our containing window,
# making sure that we fit completely on the window. If our offset
# would send us off the screen, constrain it.
x, y = GetRelativePoint(self.screen, self.anchor,
self, None, (0, 0))
max_offset_x = self.screen.width - self.width - x
max_offset_y = self.screen.height - self.height - y
offset_x, offset_y = self.offset
offset_x = max(min(offset_x, max_offset_x), -x)
offset_y = max(min(offset_y, max_offset_y), -y)
self.offset = (offset_x, offset_y)
x += offset_x
y += offset_y
# Perform the actual layout now!
self.layout(x, y)
self.update_controls()
self.needs_layout = False
def draw(self):
assert self.own_batch
self.batch.draw()
def ensure_visible(self, control):
"""
Ensure a control is visible. For Dialog, this doesn't matter
since we don't scroll.
"""
pass
def get_root(self):
return self
def on_key_press(self, symbol, modifiers):
"""
We intercept TAB, ENTER, and ESCAPE events. TAB and ENTER will
move us between fields, holding shift will reverse the direction
of our iteration. ESCAPE may cause us to send an on_escape
callback.
Otherwise, we pass key presses to our child elements.
@param symbol Key pressed
@param modifiers Modifiers for key press
"""
retval = DialogEventManager.on_key_press(self, symbol, modifiers)
if not retval:
if symbol in [pyglet.window.key.TAB, pyglet.window.key.ENTER]:
if self.on_enter is not None:
self.on_enter(self)
return pyglet.event.EVENT_HANDLED
elif symbol == pyglet.window.key.ESCAPE:
if self.on_escape is not None:
self.on_escape(self)
return pyglet.event.EVENT_HANDLED
return retval
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""
Handles mouse dragging. If we have a focus, pass it in. Otherwise
if we are movable, and we were being dragged, move the window.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param dx Delta X
@param dy Delta Y
@param buttons Buttons held while moving
@param modifiers Modifiers to apply to buttons
"""
if not DialogEventManager.on_mouse_drag(self, x, y, dx, dy,
buttons, modifiers):
if self.is_movable and self.is_dragging:
x, y = self.offset
self.offset = (int(x + dx), int(y + dy))
self.set_needs_layout()
return pyglet.event.EVENT_HANDLED
def on_mouse_press(self, x, y, button, modifiers):
"""
If the focus is set, and the target lies within the focus, pass the
message down. Otherwise, check if we need to assign a new focus.
If the mouse was pressed within our frame but no control was targeted,
we may be setting up to drag the Dialog around.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param button Button pressed
@param modifiers Modifiers to apply to button
"""
retval = DialogEventManager.on_mouse_press(self, x, y,
button, modifiers)
if self.hit_test(x, y):
if not self.root_group.is_on_top():
self.pop_to_top()
if not retval:
self.is_dragging = True
retval = pyglet.event.EVENT_HANDLED
return retval
def on_mouse_release(self, x, y, button, modifiers):
"""
Button was released. We pass this along to the focus, then we
generate an on_mouse_motion to handle changing the highlighted
Control if necessary.
@param x X coordinate of mouse
@param y Y coordinate of mouse
@param button Button released
@param modifiers Modifiers to apply to button
"""
self.is_dragging = False
return DialogEventManager.on_mouse_release(self, x, y,
button, modifiers)
def on_resize(self, width, height):
"""
Update our knowledge of the window's width and height.
@param width Width of the window
@param height Height of the window
"""
if self.screen.width != width or self.screen.height != height:
self.screen.width, self.screen.height = width, height
self.needs_layout = True
def on_update(self, dt):
"""
We update our layout only when it's time to construct another frame.
Since we may receive several resize events within this time, this
ensures we don't resize too often.
@param dt Time passed since last update event (in seconds)
"""
if self.needs_layout:
self.do_layout()
DialogEventManager.on_update(self, dt)
def pop_to_top(self):
"""
Pop our dialog group to the top, and force our batch to re-sort
the groups. Also, puts our event handler on top of the window's
event handler stack.
"""
self.root_group.pop_to_top()
self.batch._draw_list_dirty = True # forces resorting groups
if self.window is not None:
self.window.remove_handlers(self)
self.window.push_handlers(self)
def set_needs_layout(self):
"""
True if we should redo the Dialog layout on our next update.
"""
self.needs_layout = True
def teardown(self):
DialogEventManager.teardown(self)
if self.content is not None:
self.content.teardown()
self.content = None
if self.window is not None:
self.window.remove_handlers(self)
self.window = None
self.batch._draw_list_dirty = True # forces resorting groups
class PopupMessage(Dialog):
"""A simple fire-and-forget dialog."""
def __init__(self, text="", window=None, batch=None, group=None,
theme=None, on_escape=None):
def on_ok(dialog=None):
if on_escape is not None:
on_escape(self)
self.teardown()
return Dialog.__init__(self, content=Frame(
VerticalLayout([
Label(text),
Button("Ok", on_click=on_ok),
])),
window=window, batch=batch, group=group,
theme=theme, movable=True,
on_enter=on_ok, on_escape=on_ok)
class PopupConfirm(Dialog):
"""An ok/cancel-style dialog. Escape defaults to cancel."""
def __init__(self, text="", ok="Ok", cancel="Cancel",
window=None, batch=None, group=None, theme=None,
on_ok=None, on_cancel=None):
def on_ok_click(dialog=None):
if on_ok is not None:
on_ok(self)
self.teardown()
def on_cancel_click(dialog=None):
if on_cancel is not None:
on_cancel(self)
self.teardown()
return Dialog.__init__(self, content=Frame(
VerticalLayout([
Label(text),
HorizontalLayout([
Button(ok, on_click=on_ok_click),
None,
Button(cancel, on_click=on_cancel_click)
]),
])),
window=window, batch=batch, group=group,
theme=theme, movable=True,
on_enter=on_ok_click, on_escape=on_cancel_click) |
import os
import sys
import getopt
import lxml.html
import tqdm
from datetime import datetime
from urllib.request import Request
from urllib.request import urlopen
# Helper function to print srcipt usage command
def print_usage():
print('Usage:')
print('\t1. python %s' % __file__)
print('\t2. python %s -s <start-page-number> -e <end-page-number> [-o <output-file>]' % __file__)
print('\t3. python %s -h (for help)\n' % __file__)
# Helper function to parse command line arguments
def parse_args(argv):
start = end = output_file = None
try:
[opts, args] = getopt.getopt(argv, 'hs:e:o:')
except getopt.GetoptError:
print_usage()
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt == '-s':
start = arg
elif opt == '-e':
end = arg
elif opt == '-o':
output_file = arg
if start and end:
try:
start = int(start)
end = int(end)
except ValueError:
print('ERROR:')
print('\'start-page-number\' and \'end-page-number\' must be integers.')
print_usage()
sys.exit()
else:
# set default start and end configuration when argument was not specified by user
start = 0
end = 5
print('\nRange of featured links pages to scrape not completely specified')
print('Missing argument(s) \'start-page-number\', \'end-page-number\' or both.')
print('Using default configuration...')
print('start page number: %d' % start)
print('end page number: %d' % end)
print()
if not output_file:
output_file = 'nl-scraper-{0}.txt'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('Using default output file: %s\n' % output_file)
return [start, end, output_file]
def main(start, end, output_file):
base_url = 'http://www.nairaland.com/links/'
featured_links_list = []
print('Fetching and processing featured links page(s) from page %d to %d...' % (start, end))
for page_num in tqdm.tqdm(range(start, end+1)):
# fetch page
url = '{0}{1}'.format(base_url, page_num)
# set request user-agent as mozilla (any other web browser should as be fine)
# nairaland does not accept the default python user-agent (responds with forbidden 403)
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
str_response = urlopen(req).read()
str_response = str_response.decode()
# create html tree structure from html response string
root = lxml.html.fromstring(str_response)
# locate the table tag containing the featured links
# (this is based on the structure of nairaland page)
featured_links_table_tag = None
for table in root.iter('table'):
if table.attrib.get('summary') == 'links':
featured_links_table_tag = table
if featured_links_table_tag == None:
print('There was an issue scrapping %s' % url)
print('Could not locate the section containing featured links...')
for a_tag in featured_links_table_tag.iter('a'):
content = a_tag.text_content()
content = '{0}\n'.format(content)
featured_links_list.append(content)
print('Saving featured links to file %s' % output_file)
with open(output_file, 'w', encoding='utf-8') as f:
f.writelines(featured_links_list)
print('Done...')
if __name__ == '__main__':
start, end, output_file = parse_args(sys.argv[1:])
main(start, end, output_file)
|
from enum import Enum
import numpy as np
import math
import lib.config as C
import lib.utils as U
import time
from strategy.agent import Agent as A
from unit.units import Army
import unit.protoss_unit as P
class ProtossAction(Enum):
Do_nothing = 0
Build_worker = 1
Build_zealot = 2
Build_pylon = 3
Build_gateway = 4
Attack = 5
Move = 6
Defend = 7
Build_sub_base = 8
Build_cannon = 9
All = 10
class Protoss(A):
def __init__(self, agent_id=0, global_buffer=None, net=None, restore_model=False):
A.__init__(self, agent_id=agent_id, global_buffer=global_buffer,
net=net, restore_model=restore_model)
self.gateway_num = 0
self.pylon_num = 0
self.zealot_num = 0
self.collected_mineral = 0
self.MAX_ACTIONS = ProtossAction.All.value
def __str__(self):
return str(self.time_seconds) + ', ' + str(self.mineral) + \
', ' + str(self.mineral_worker_nums) + ', ' + str(self.zealot_num) + ', ' + str(self.food_cap)
def reset(self):
super().reset()
self.gateway_num = 0
self.pylon_num = 0
self.zealot_num = 0
self.collected_mineral = 0
def obs(self):
simple_input = np.zeros([11])
simple_input[0] = 0 # self.time_seconds
simple_input[1] = self.mineral_worker_nums
simple_input[2] = self.gas_worker_nums
simple_input[3] = self.mineral
simple_input[4] = self.gas
simple_input[5] = self.food_cap
simple_input[6] = self.food_used
simple_input[7] = self.army_nums
simple_input[8] = self.gateway_num
simple_input[9] = self.pylon_num
simple_input[10] = self.zealot_num
return simple_input
def set_obs(self, state):
self.mineral_worker_nums = state[1]
self.gas_worker_nums = state[2]
self.mineral = state[3]
self.gas = state[4]
self.food_cap = state[5]
self.food_used = state[6]
self.army_nums = state[7]
self.gateway_num = state[8]
self.pylon_num = state[9]
self.zealot_num = state[10]
def get_next_state(self, action):
self.env.step(self.player_id, action)
return self.obs()
@property
def result(self):
return self._result
def play_with_mpc(self, verbose=False):
max_steps = 100
state_now = self.obs()
if verbose:
print('initial state:', state_now)
print('initial env:', self.env)
state_last, action_last = None, None
for i in range(max_steps):
if self.is_end or i == max_steps - 1:
if verbose:
print(self.local_buffer.rewards)
if self.env.win_index == self.player_id:
pass
self._result = sum(self.local_buffer.rewards)
# self.global_buffer.add(self.local_buffer)
break
if state_last is not None:
reward = self.get_mineral_reward(state_last, state_now)
if True:
print('reward:', reward)
self.local_buffer.append(state_last, action_last, state_now, reward, 0, 0)
action, v_preds = self.mpc.get_action(state_now, agent_clone=self, verbose=verbose)
state_last = state_now
state_now = self.get_next_state(action)
if verbose:
print('state now:', state_now.astype(dtype=np.int32))
time.sleep(1)
action_last = action
def play_with_rl(self, verbose=False):
max_steps = 125
state_now = self.obs()
if verbose:
print('initial state:', state_now)
print('initial env:', self.env)
state_last, action_last = None, None
for i in range(max_steps):
if self.is_end or i == max_steps - 1:
if self.env.win_index == self.player_id:
ratio = (i + 1) / float(max_steps)
the_reward = 1. - ratio / 1.5
self.local_buffer.rewards[-1] += the_reward
self._result = the_reward
if verbose:
print(self.local_buffer.rewards)
#self._result = sum(self.local_buffer.rewards)
self.global_buffer.add(self.local_buffer)
break
if state_last is not None:
reward = 0 # = self.get_pop_reward(state_last, state_now)
if 0:
print('reward:', reward)
v_preds_next = self.net.policy.get_values(state_now)
v_preds_next = self.get_values(v_preds_next)
self.local_buffer.append(state_last, action_last, state_now, reward, v_preds, v_preds_next)
action, v_preds = self.net.policy.get_action(state_now, verbose=False)
state_last = state_now
state_now = self.get_next_state(action)
if verbose:
print('state now:', state_now.astype(dtype=np.int32))
print('action:', action)
time.sleep(1)
action_last = action
def get_pop_reward(self, state_last, state_now):
pop_reward = state_now[6] - state_last[6]
return pop_reward
def get_mineral_reward(self, state_last, state_now):
mineral_reward = state_now[3] - state_last[3]
return mineral_reward
def get_values(self, values):
# check if the game is end
if self.is_end and self.result != 0:
return 0
else:
return values
def get_action_by_policy(self, obs):
act, v_preds = self.net.policy.get_action(obs, verbose=True)
return act, v_preds
'''def get_policy_action(self, obs):
random = np.random.randint(self.MAX_ACTIONS)
action = random
return action'''
def fullfill_technology(self, unit):
if type(unit) == P.Zealot:
if self.gateway_num > 0:
return True
return False
def fullfill_creature_condition(self, unit):
if self.mineral >= unit.mineral_price and self.gas >= unit.gas_price:
if self.food_cap >= self.food_used + unit.food_used and self.fullfill_technology(unit):
return True
else:
return False
def win(self):
if self.zealot_num >= 8:
return True
else:
return False
def get_build_num(self, unit):
max_n = self.gateway_num
n = 1
#print('max_n:', max_n)
for i in range(max_n):
if unit.mineral_price * i < self.mineral and unit.food_used * i + self.food_used < self.food_cap:
continue
else:
n = i - 1
break
#print('n:', n)
return n
def step(self, action):
if action == ProtossAction.Build_worker.value:
if self.mineral >= 50 and self.food_used < self.food_cap:
self.mineral_worker_nums += 1
self.food_used += 1
self.mineral -= 50
elif action == ProtossAction.Build_zealot.value:
Zealot = P.Zealot()
if self.fullfill_creature_condition(Zealot):
n = self.get_build_num(Zealot)
self.army_nums += n
self.zealot_num += n
self.food_used += Zealot.food_used * n
self.mineral -= Zealot.mineral_price * n
self.add_unit(Zealot, n)
elif action == ProtossAction.Build_pylon.value:
if self.mineral >= 100:
self.building_nums += 1
self.food_cap += 8
self.pylon_num += 1
self.mineral -= 100
elif action == ProtossAction.Build_gateway.value:
if self.mineral >= 150 and self.pylon_num >= 1:
self.gateway_num += 1
self.building_nums += 1
self.mineral -= 150
elif action == ProtossAction.Attack.value:
if self.military_num() > 0:
#print('order:', self.env.army[self.player_id].order)
self.env.army[self.player_id].order = Army.Order.ATTACK
#print('order:', self.env.army[self.player_id].order)
elif action == ProtossAction.Defend.value:
if self.military_num() > 0:
self.env.army[self.player_id].order = Army.Order.DEFEND
elif action == ProtossAction.Build_sub_base.value:
pass
elif action == ProtossAction.Build_cannon.value:
pass
# update mineral
self.collected_mineral += min(self.mineral_worker_nums, 16) * 3
if self.collected_mineral <= 10000:
self.mineral += min(self.mineral_worker_nums, 16) * 3
self.time_seconds += 5
# update population
if self.military_num() == 0:
#print('order:', self.env.army[self.player_id].order)
self.env.army[self.player_id].order = Army.Order.NOTHING
#print('order:', self.env.army[self.player_id].order)
else:
self.army_nums = self.military_num()
self.zealot_num = self.military_num()
self.food_used = self.military_num() * 2 + self.mineral_worker_nums + self.gas_worker_nums
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
# Function to plot diurnal variaiton for given dataframe as input
# Plot mean diurnal variation PM2.5
def plot(dfmod, dfobs, mod, obs, mod_stdev=None, obs_stdev=None):
"""
Python function to plot the diurnal variation for
hourly data in model and observation data frames
------------------------------------------------
Example input:
dplot.plot(dfmod, mod, obs, mod_stdev=str, obs_stdev=str)
Here mod_stdev and obs_stdev are optional inputs
"""
dfmod['time'] = dfmod.index.time
dfobs['time'] = dfobs.index.time
dfmod = dfmod.groupby('time').describe().unstack()
dfobs = dfobs.groupby('time').describe().unstack()
times = [x for x in range(0, 24)]
plt.plot(times, dfmod[mod]['mean'], color='tab:red', label='model')
if mod_stdev is not None:
(_, caps, _) = plt.errorbar(times, dfmod[mod]['mean'], yerr=dfmod[mod_stdev]['mean'],
alpha=0.3, ecolor='tab:red', fmt='o', mfc='tab:red', markersize=8,
capsize=10, label='stdev')
for cap in caps:
cap.set_markeredgewidth(1)
plt.plot(times, dfobs[obs]['mean'], color='tab:blue', label='observed')
if obs_stdev is not None:
(_, caps, _) = plt.errorbar(times, dfobs[obs]['mean'], yerr=dfobs[obs_stdev]['mean'],
alpha=0.3, ecolor='tab:blue', fmt='o', mfc='tab:blue', markersize=8,
capsize=10, label='stdev')
for cap in caps:
cap.set_markeredgewidth(1)
plt.legend(ncol=2, frameon=False, loc='right', bbox_to_anchor=(1.02, 1.13), prop={'size': 12})
plt.xticks(np.arange(0, 24, step=1))
|
# this will generate suppl fig 4 A - D (4 matrices of covariance analysis)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# need ana_rd and ana_all_rd from analysis.py
ana = ana_all_rd
conn_data = ana.conn_data['glom_kc_in_claw_unit']
ob_conn, glom_prob, glom_idx_ids = get_conn_prob_idx(conn_data)
num_exp = 1000
t11 = [np.cov(shuffle_glom_kc_w_prob(ob_conn, glom_prob),rowvar=False) for i in range(num_exp)]
t12 = np.stack(t11,2)
ob_cov = np.cov(ob_conn,rowvar=False)
idx = np.triu_indices(len(ob_cov))
results = np.zeros(ob_cov.shape)
for i,j1 in enumerate(idx[0]):
j2 = idx[1][i]
t1 = t12[j1,j2,:]
j3 = ob_cov[j1,j2]
p = sum(t1>j3)/num_exp
results[j1,j2]=p
results[j2,j1]=p
pvs = results.copy()
pvs[pvs>=0.05]=1
pvs = 1-pvs
pvs[np.where(pvs)]=1
# re-cluster the P binary matrix
cm_cv = PairMatrix('', pvs.copy(), glom_idx_ids)
# reorder_idx = km_cluster(pvs)
reorder_idx = reorder(covar_order_ids, glom_idx_ids)
t1_cv = cm_cv.reorder(reorder_idx, return_new=True)
fig, ax1 = plt.subplots()
t1 = t1_cv;
gloms = df_lookup('glom_id',t1.col_ids,'short_glom_name',glom_btn_table)
sns.heatmap(t1.conn, xticklabels=gloms, yticklabels=gloms, ax=ax1, cmap=cm.get_cmap('viridis', 2))
ax1.tick_params(bottom=False,labeltop=True, top=True, labelbottom=False)
ax1.tick_params(axis='x',labelrotation=90)
col_list = t1.col_ids
col_colors = df_lookup('short_glom_name', gloms, 'color', tbl)
for x in [ax1.get_xticklabels(), ax1.get_yticklabels()]:
for idx, tick in enumerate(x):
tick.set_color(col_colors[idx])
if col_list[idx] in comm_ids:
tick.set_weight("extra bold")
ax1.set_aspect("equal")
fig.set_size_inches(18,12)
plt.show()
fig.savefig(save_path + "191202-allKC_cov_BinaryPval_ClusterP_over-rep_wGlomAnno.png", bbox_inches='tight')
# use original cluster order
cm_cv = PairMatrix('', pvs.copy(), glom_idx_ids)
# reorder_idx = km_cluster(cm_zs.conn)
reorder_idx = reorder(ClusterOrder0707, glom_idx_ids)
t1_cv = cm_cv.reorder(reorder_idx, return_new=True)
fig, ax1 = plt.subplots()
t1 = t1_cv;
gloms = df_lookup('glom_id',t1.col_ids,'short_glom_name',glom_btn_table)
sns.heatmap(t1.conn, xticklabels=gloms, yticklabels=gloms, ax=ax1, cmap=cm.get_cmap('viridis', 2))
ax1.tick_params(bottom=False,labeltop=True, top=True, labelbottom=False)
ax1.tick_params(axis='x',labelrotation=90)
col_list = t1.col_ids
col_colors = df_lookup('short_glom_name', gloms, 'color', tbl)
for x in [ax1.get_xticklabels(), ax1.get_yticklabels()]:
for idx, tick in enumerate(x):
tick.set_color(col_colors[idx])
if col_list[idx] in comm_ids:
tick.set_weight("extra bold")
ax1.set_aspect("equal")
fig.set_size_inches(18,12)
plt.show()
fig.savefig(save_path + "191202-allKC_cov_BinaryPval_OriginalOrder_over-rep_wGlomAnno.png", bbox_inches='tight')
# calculate covariance matrices with synapse numbers
def shuffle_glom_kc_w_prob_syn(gk_conn, col_prob, syn_d):
'''Given a glomerulus-KC connectivity matrix, shuffle the connection
while maintain the numbers CLAWS ONLY and return the shuffled matrix.
Note that ndividual claw connection is not identifiable but as numbers in the glom-KC cell
e.g. 2 in a cell means the KC and glom connects with 2 claws
This one with probability of choice for each glomerulus (eacg column)
191112 add synapse distribution to generate a shuffled connectivity matrix'''
sfl_conn = np.zeros(gk_conn.shape)
num_col = sfl_conn.shape[1]
for i in range(sfl_conn.shape[0]):
t1 = np.random.choice(int(num_col), size=int(sum(gk_conn[i,:])), p=col_prob)
for j in t1:
sfl_conn[i, j] += np.random.choice(syn_d)
return sfl_conn
# get the distribution of all synapses from random draw KCs
syn_conn = ana_rd.conn_data['bouton_claw'].conn['5s']
syn_d = syn_conn[np.where(syn_conn)]
conn_data = ana.conn_data['glom_kc_in_claw_unit']
ob_conn, glom_prob, glom_idx_ids = get_conn_prob_idx(conn_data)
num_exp = 1000
t11 = [np.cov(shuffle_glom_kc_w_prob_syn(ob_conn, glom_prob, syn_d),rowvar=False) for i in range(num_exp)]
t12 = np.stack(t11,2)
# get the observed covariance matrix of synapse level connections
conn = ana.conn_data['glom_kc_contracted'].conn['5s']
ob_cov = np.cov(conn, rowvar=False)
idx = np.triu_indices(len(ob_cov))
results = np.zeros(ob_cov.shape)
for i,j1 in enumerate(idx[0]):
j2 = idx[1][i]
t1 = t12[j1,j2,:]
j3 = ob_cov[j1,j2]
p = sum(t1>j3)/num_exp
results[j1,j2]=p
results[j2,j1]=p
pvs = results.copy()
pvs[pvs>=0.05]=1
pvs = 1-pvs
pvs[np.where(pvs)]=1
cm_cv = PairMatrix('', pvs.copy(), glom_idx_ids)
# reorder_idx = km_cluster(1-pvs)
reorder_idx = reorder(covar_order_syn_ids, glom_idx_ids)
t1_cv = cm_cv.reorder(reorder_idx, return_new=True)
fig, ax1 = plt.subplots()
t1 = t1_cv;
gloms = df_lookup('glom_id',t1.col_ids,'short_glom_name',glom_btn_table)
sns.heatmap(t1.conn, xticklabels=gloms, yticklabels=gloms, ax=ax1, cmap=cm.get_cmap('viridis', 2))
ax1.tick_params(bottom=False,labeltop=True, top=True, labelbottom=False)
ax1.tick_params(axis='x',labelrotation=90)
col_list = t1.col_ids
col_colors = df_lookup('short_glom_name', gloms, 'color', tbl)
for x in [ax1.get_xticklabels(), ax1.get_yticklabels()]:
for idx, tick in enumerate(x):
tick.set_color(col_colors[idx])
if col_list[idx] in comm_ids:
tick.set_weight("extra bold")
ax1.set_aspect("equal")
fig.set_size_inches(18,12)
plt.show()
fig.savefig(save_path + "191202-allKC_cov_BinaryPval_ClusterP_SynDistr_over-rep_wGlomAnno.png", bbox_inches='tight')
# same as above but use original clustering order
cm_cv = PairMatrix('', pvs.copy(), glom_idx_ids)
# reorder_idx = km_cluster(1-results)
reorder_idx = reorder(ClusterOrder0707, glom_idx_ids)
t1_cv = cm_cv.reorder(reorder_idx, return_new=True)
fig, ax1 = plt.subplots()
t1 = t1_cv;
gloms = df_lookup('glom_id',t1.col_ids,'short_glom_name',glom_btn_table)
sns.heatmap(t1.conn, xticklabels=gloms, yticklabels=gloms, ax=ax1, cmap=cm.get_cmap('viridis', 2))
ax1.tick_params(bottom=False,labeltop=True, top=True, labelbottom=False)
ax1.tick_params(axis='x',labelrotation=90)
col_list = t1.col_ids
col_colors = df_lookup('short_glom_name', gloms, 'color', tbl)
for x in [ax1.get_xticklabels(), ax1.get_yticklabels()]:
for idx, tick in enumerate(x):
tick.set_color(col_colors[idx])
if col_list[idx] in comm_ids:
tick.set_weight("extra bold")
ax1.set_aspect("equal")
fig.set_size_inches(18,12)
plt.show()
fig.savefig(save_path + "191202-allKC_cov_BinaryPval_OriginalOrder_SynDistr_over-rep_wGlomAnno.png", bbox_inches='tight')
# fig.savefig(save_path + "191112-allKC_cov_pval_OriginCluster_SynDistr_over-representation.png", bbox_inches='tight')
# below are old comments
#------------------------------------------------------------------
#copy from /Users/zhengz11/myscripts/bocklab_git/bocklab/zhihao/mushroom_py/v10/191202-replot_covariance_matrix.py
# change date to 200110
# save_path = "/Users/zhengz11/myscripts/data_results/191112-covariance_matrices/191202_updated/"
|
# ------------------------------------------
# Copyright (c) Rygor. 2021.
# ------------------------------------------
from .api import (
Sap_system,
TasksException,
add,
run,
delete,
update,
pw,
start_sap_db,
stop_sap_db,
list_systems,
query_param,
database
)
__version__ = '0.1.5'
|
# File: chaos.py
# A simple program illustating chaotic behavior.
"""Modify the chaos program from section 1.6 so it prints out 20 values instead
of 10."""
def main():
print("This program illustates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(20):
x = 3.9 * x * (1 - x)
print(x)
main()
|
default_port = {
'mongo': 27017,
'mysql': 3306,
'postgres': 5432,
'redis': 6379,
'hbase': 9090,
'elasticsearch':9200
}
|
import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
def update_node(self, context):
self.execute_tree()
class RSNodeLuxcoreRenderSettingsNode(RenderNodeBase):
"""A simple input node"""
bl_idname = 'RSNodeLuxcoreRenderSettingsNode'
bl_label = 'Luxcore Settings'
use_samples: BoolProperty(name='Use Samples', default=True, update=update_node)
use_time: BoolProperty(name='Use Time', default=False, update=update_node)
time: IntProperty(default=300, min=1, name='Time(s)', update=update_node)
samples: IntProperty(default=64, min=1, name="Samples", update=update_node)
# seem to be 2.92's bug
warning: BoolProperty(name='Is warning', default=False)
warning_msg: StringProperty(name='warning message', default='')
def init(self, context):
self.warning = False
self.outputs.new('RSNodeSocketTaskSettings', "Settings")
self.width = 225
def draw_buttons(self, context, layout):
super().draw_buttons(context, layout)
col = layout.column(align=1)
row = col.row(align=True)
row.prop(self, "use_samples")
row.prop(self, "samples")
row = col.row(align=True)
row.prop(self, 'use_time')
row.prop(self, 'time')
def process(self, context, id, path):
task_data = self.get_data()
engines = ['BLENDER_EEVEE', 'BLENDER_WORKBENCH'] + [engine.bl_idname for engine in
bpy.types.RenderEngine.__subclasses__()]
# engine settings
if 'engine' in task_data:
if task_data['engine'] in engines:
self.compare(bpy.context.scene.render, 'engine', task_data['engine'])
if 'luxcore_half' in task_data and 'BlendLuxCore' in bpy.context.preferences.addons:
if not bpy.context.scene.luxcore.halt.enable:
bpy.context.scene.luxcore.halt.enable = True
if task_data['luxcore_half']['use_samples'] is False and task_data['luxcore_half'][
'use_time'] is False:
bpy.context.scene.luxcore.halt.use_samples = True
elif task_data['luxcore_half']['use_samples'] is True and task_data['luxcore_half'][
'use_time'] is False:
if not bpy.context.scene.luxcore.halt.use_samples:
bpy.context.scene.luxcore.halt.use_samples = True
if bpy.context.scene.luxcore.halt.use_time:
bpy.context.scene.luxcore.halt.use_time = False
self.compare(bpy.context.scene.luxcore.halt, 'samples', task_data['luxcore_half']['samples'])
elif task_data['luxcore_half']['use_samples'] is False and task_data['luxcore_half'][
'use_time'] is True:
if bpy.context.scene.luxcore.halt.use_samples:
bpy.context.scene.luxcore.halt.use_samples = False
if not bpy.context.scene.luxcore.halt.use_time:
bpy.context.scene.luxcore.halt.use_time = True
self.compare(bpy.context.scene.luxcore.halt, 'time', task_data['luxcore_half']['time'])
def get_data(self):
task_data = {}
if 'BlendLuxCore' in bpy.context.preferences.addons:
task_data['engine'] = 'LUXCORE'
task_data['luxcore_half'] = {'use_samples': self.use_samples,
'samples' : self.samples,
'use_time' : self.use_time,
'time' : self.time}
self.set_warning(msg= "Luxcore is not enabled")
return task_data
def register():
bpy.utils.register_class(RSNodeLuxcoreRenderSettingsNode)
def unregister():
bpy.utils.unregister_class(RSNodeLuxcoreRenderSettingsNode)
|
'''
Exercício Python 093: Crie um programa que gerencie o aproveitamento
de um jogador de futebol. O programa vai ler o nome do jogador e quantas
partidas ele jogou. Depois vai ler a quantidade de gols feitos em cada partida.
No final, tudo isso será guardado em um dicionário, incluindo o total de gols
feitos durante o campeonato.
'''
ficha = {}
gols = []
ficha['nome'] = str(input('Digite o nome do jogador: '))
resp = int(input(f'Quantas partidas {ficha["nome"]} jogou: '))
print('-='*30)
for i in range(0, resp):
gols.append(int(input(f' Quantos gols na partida {i+1}: ')))
soma = 0
for n in gols:
soma += n
ficha['gols'] = gols[:]
ficha['aproveitamento'] = soma
print('-='*40)
print(ficha)
print('-='*40)
for k, v in ficha.items():
print(f'O campo {k} tem {v}')
print('-='*40)
print(f'O jogador {ficha["nome"]} jogou {len(ficha["gols"])} partidas')
for i, v in enumerate(ficha['gols']):
print(f' ++> no jogo {i+1} fez {v} Gols')
print(f'O aproveitamento foi de {ficha["aproveitamento"]}')
|
import metadata
import xml.etree.ElementTree as etree
import os
def read_file(file_name):
obj = []
levels = []
level = -1
word = ''
with open(file_name, 'r', encoding='utf-8-sig') as f:
for line in f:
for c in line[:-1]:
if c in ',{}':
if word != '':
# if len(levels[level]) == 0 and word.isdigit():
# pass
# else:
levels[level].append(word)
if c == ',':
pass
elif c == '{':
if level == -1:
item = obj
else:
item = []
levels[level].append(item)
levels.append(item)
level += 1
elif c == '}':
del levels[level]
level -= 1
word = ''
else:
word += c
return obj
def parse(values):
if isinstance(values, list):
return parse_prop(values)
else:
return values.strip('"')
def isguid(string):
return len(string) == 36 and \
string[8] == '-' and \
string[13] == '-' \
and string[18] == '-' \
and string[23] == '-'
def parse_prop(values):
prop = metadata.Properties()
offset = -1
count = 0
if isinstance(values[0], list):
pass
elif values[0].isdigit():
count = int(values[0])
offset = 1
elif len(values) > 1:
if isguid(values[0]):
prop.uid = values[0]
offset = 1
if isinstance(values[1], str) and values[1].isdigit():
offset = 2
count = int(values[1])
if offset == -1:
offset = 0
if count == 0:
count = len(values) - offset
if offset != -1:
while offset < len(values):
if len(values) < offset + count:
print('atat')
count = len(values) - offset
for i in range(offset, offset + count):
prop.append(parse(values[i]))
offset += count
count = len(values) - offset
return prop
def get_property(prop, info):
if info['path'] is None or info['path'] == '':
return None
current = prop
for el in info['path'].split('.'):
if el.isdigit():
current = current[int(el)]
if info['conv_fn']:
return info['conv_fn'](current, info)
else:
return current
def parse_array(values, containers):
obj = metadata.Configuration()
obj.uid = values[1][0]
count = int(values[2])
for i in range(3, 3 + count):
value = parse(values[i])
if value.uid in containers:
container = containers[value.uid]
for item in container:
obj.properties[item['name']] = get_property(value, item)
obj.internalInfo.append(value)
return obj
Conversion_fn = {
'string': lambda x, info: str(x),
'ml_string': lambda x, info: metadata.MultiLangString.from_array(x),
'enum': lambda x, info: info['values'][x],
'bool': lambda x, info: bool(x)
}
def load_config():
global cfg
cfg = {}
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'config.xml'))
for el in tree.getiterator('MetaDataObject'):
containers = {}
name = el.attrib['name']
if name:
cfg[name] = containers
for cont_el in el.getiterator('Container'):
container = []
containers[cont_el.attrib['uid']] = container
for item_el in cont_el.getiterator('item'):
container.append({
'name': item_el.attrib['value'],
'path': item_el.attrib['path'],
'type': item_el.attrib['type'],
'conv_fn': Conversion_fn[item_el.attrib['type']],
'values': {
val_el.attrib['key']: val_el.text for val_el in item_el.getiterator('value')
}
})
cfg = None
|
import pymysql
import sqlalchemy
from sqlalchemy import Column, DateTime, Integer, create_engine, func
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import sessionmaker
from settings import DB_LINK
pymysql.install_as_MySQLdb()
class BaseModelClass:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + "s"
__mapper_args__ = {"always_refresh": True}
id = Column(Integer, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), server_onupdate=func.now())
engine: sqlalchemy.engine.base.Engine = create_engine(
DB_LINK, pool_recycle=3600,
)
session: sqlalchemy.orm.session.sessionmaker = sessionmaker(
autocommit=False, autoflush=False, bind=engine,
expire_on_commit=False
)
base = declarative_base(cls=BaseModelClass)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
# webapp2 has not been ported to python3, so it will give a syntax
# error if we try. We'll just skip the webapp2 tests in that case.
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
|
from .fetcher import Fetcher
name = "reference_sequence_fetcher"
|
import subprocess
import json
import sqlite3
from .fixtures import *
def test_depth_flag_is_accepted(process, disable_extractors_dict):
arg_process = subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
assert 'unrecognized arguments: --depth' not in arg_process.stderr.decode("utf-8")
def test_depth_flag_fails_if_it_is_not_0_or_1(process, disable_extractors_dict):
arg_process = subprocess.run(
["archivebox", "add", "--depth=5", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
assert 'invalid choice' in arg_process.stderr.decode("utf-8")
arg_process = subprocess.run(
["archivebox", "add", "--depth=-1", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
assert 'invalid choice' in arg_process.stderr.decode("utf-8")
def test_depth_flag_0_crawls_only_the_arg_page(tmp_path, process, disable_extractors_dict):
arg_process = subprocess.run(
["archivebox", "add", "--depth=0", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
with open(archived_item_path / "index.json", "r", encoding='utf-8') as f:
output_json = json.load(f)
assert output_json["base_url"] == "127.0.0.1:8080/static/example.com.html"
def test_depth_flag_1_crawls_the_page_AND_links(tmp_path, process, disable_extractors_dict):
arg_process = subprocess.run(
["archivebox", "add", "--depth=1", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
urls = c.execute("SELECT url from core_snapshot").fetchall()
conn.commit()
conn.close()
urls = list(map(lambda x: x[0], urls))
assert "http://127.0.0.1:8080/static/example.com.html" in urls
assert "http://127.0.0.1:8080/static/iana.org.html" in urls
def test_overwrite_flag_is_accepted(process, disable_extractors_dict):
subprocess.run(
["archivebox", "add", "--depth=0", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
arg_process = subprocess.run(
["archivebox", "add", "--overwrite", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
assert 'unrecognized arguments: --overwrite' not in arg_process.stderr.decode("utf-8")
assert 'favicon' in arg_process.stdout.decode('utf-8'), 'archive methods probably didnt run, did overwrite work?'
def test_add_updates_history_json_index(tmp_path, process, disable_extractors_dict):
subprocess.run(
["archivebox", "add", "--depth=0", "http://127.0.0.1:8080/static/example.com.html"],
capture_output=True,
env=disable_extractors_dict,
)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
output_json = json.load(f)
assert output_json["history"] != {}
def test_extract_input_uses_only_passed_extractors(tmp_path, process):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--extract", "wget"],
capture_output=True)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
assert (archived_item_path / "warc").exists()
assert not (archived_item_path / "singlefile.html").exists()
|
import hyperchamber as hc
from shared.ops import *
from shared.util import *
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rates = list(np.linspace(0.0001, 1, num=30))
hc.set("learning_rate", learning_rates)
hidden_layers = [ [], [26], [128], [16, 32], [32,16,8], [16,8,8,4], [64,64]]
hc.set("hidden_layer", hidden_layers)
hc.set("activation", [tf.nn.elu, tf.nn.relu, tf.nn.relu6, tf.tanh, tf.sigmoid, lrelu]);
hc.set("batch_size", 128)
X_DIMS=[28,28]
Y_DIMS=10
def hidden_layers(config, x):
output = tf.reshape(x, [config["batch_size"], X_DIMS[0]*X_DIMS[1]])
for i, layer in enumerate(config['hidden_layer']):
output = linear(output, layer, scope="l"+str(i))
output = config['activation'](output)
return output
def output_layer(config, x):
return linear(x, Y_DIMS)
def create(config):
batch_size = config["batch_size"]
x = tf.placeholder(tf.float32, [batch_size, X_DIMS[0], X_DIMS[1], 1], name="x")
y = tf.placeholder(tf.float32, [batch_size, Y_DIMS], name="y")
hidden = hidden_layers(config, x)
output = output_layer(config, hidden)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, y), name="loss")
output = tf.nn.softmax(output)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variables = tf.trainable_variables()
optimizer = tf.train.GradientDescentOptimizer(config['learning_rate']).minimize(loss)
set_tensor("x", x)
set_tensor("y", y)
set_tensor("loss", loss)
set_tensor("optimizer", optimizer)
set_tensor("accuracy", accuracy)
def train(sess, config, x_input, y_labels):
x = get_tensor("x")
y = get_tensor("y")
cost = get_tensor("loss")
optimizer = get_tensor("optimizer")
accuracy = get_tensor("accuracy")
_, accuracy, cost = sess.run([optimizer, accuracy, cost], feed_dict={x:x_input, y:y_labels})
#hc.cost(config, cost)
#print("Accuracy %.2f Cost %.2f" % (accuracy, cost))
def test(sess, config, x_input, y_labels):
x = get_tensor("x")
y = get_tensor("y")
cost = get_tensor("loss")
accuracy = get_tensor("accuracy")
accuracy, cost = sess.run([accuracy, cost], feed_dict={x:x_input, y:y_labels})
print("Accuracy %.2f Cost %.2f" % (accuracy, cost))
return accuracy, cost
def epoch(sess, config):
batch_size = config["batch_size"]
n_samples = mnist.train.num_examples
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
x, y = mnist.train.next_batch(batch_size)
x=np.reshape(x, [batch_size, X_DIMS[0], X_DIMS[1], 1])
train(sess, config, x, y)
def test_config(sess, config):
batch_size = config["batch_size"]
n_samples = mnist.test.num_examples
total_batch = int(n_samples / batch_size)
accuracies = []
costs = []
for i in range(total_batch):
x, y = mnist.test.next_batch(batch_size)
x=np.reshape(x, [batch_size, X_DIMS[0], X_DIMS[1], 1])
accuracy, cost = test(sess, config, x, y)
accuracies.append(accuracy)
costs.append(cost)
return accuracies, costs
print("Searching randomly with %d possible configurations." % hc.count_configs())
for i in range(100):
config = hc.random_config()
print("Testing configuration", config)
sess = tf.Session()
graph = create(config)
init = tf.initialize_all_variables()
sess.run(init)
for i in range(10):
epoch(sess, config)
accuracies, costs = test_config(sess, config)
accuracy, cost = np.mean(accuracies), np.mean(costs)
results = {
'accuracy':accuracy,
'cost':cost
}
hc.record(config, results)
ops.reset_default_graph()
sess.close()
def by_accuracy(x):
config,result = x
return 1-result['accuracy']
for config, result in hc.top(by_accuracy):
print("RESULTS")
print(config, result)
|
'''
Manage Azure Cosmos DB Cassandra tables.
'''
from .... pyaz_utils import _call_az
from . import throughput
def create(account_name, keyspace_name, name, resource_group, schema, analytical_storage_ttl=None, max_throughput=None, throughput=None, ttl=None):
'''
Create an Cassandra table under an Azure Cosmos DB Cassandra keyspace.
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- name -- Table name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- Schema, you can enter it as a string or as a file, e.g., --schema @schema-file.json or --schema "{\"columns\": [{\"name\": \"columnA\",\"type\": \"uuid\"}, {\"name\": \"columnB\",\"type\": \"Ascii\"}],\"partitionKeys\": [{\"name\": \"columnA\"}]}"
Optional Parameters:
- analytical_storage_ttl -- Analytical TTL, when analytical storage is enabled.
- max_throughput -- The maximum throughput resource can scale to (RU/s). Provided when the resource is autoscale enabled. The minimum value can be 4000 (RU/s)
- throughput -- The throughput of Cassandra table (RU/s). Default value is 400. Omit this parameter if the keyspace has shared throughput unless the table should have dedicated throughput.
- ttl -- Default TTL. If the value is missing or set to "-1", items don’t expire. If the value is set to "n", items will expire "n" seconds after last modified time.
'''
return _call_az("az cosmosdb cassandra table create", locals())
def update(account_name, keyspace_name, name, resource_group, analytical_storage_ttl=None, schema=None, ttl=None):
'''
Update an Cassandra table under an Azure Cosmos DB Cassandra keyspace.
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- name -- Table name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- analytical_storage_ttl -- Analytical TTL, when analytical storage is enabled.
- schema -- Schema, you can enter it as a string or as a file, e.g., --schema @schema-file.json or --schema "{\"columns\": [{\"name\": \"columnA\",\"type\": \"uuid\"}, {\"name\": \"columnB\",\"type\": \"Ascii\"}],\"partitionKeys\": [{\"name\": \"columnA\"}]}"
- ttl -- Default TTL. If the value is missing or set to "-1", items don’t expire. If the value is set to "n", items will expire "n" seconds after last modified time.
'''
return _call_az("az cosmosdb cassandra table update", locals())
def exists(account_name, keyspace_name, name, resource_group):
'''
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- name -- Table name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cosmosdb cassandra table exists", locals())
def list(account_name, keyspace_name, resource_group):
'''
List the Cassandra tables under an Azure Cosmos DB Cassandra keyspace.
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cosmosdb cassandra table list", locals())
def show(account_name, keyspace_name, name, resource_group):
'''
Show the details of a Cassandra table under an Azure Cosmos DB Cassandra keyspace.
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- name -- Table name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cosmosdb cassandra table show", locals())
def delete(account_name, keyspace_name, name, resource_group, yes=None):
'''
Delete the Cassandra table under an Azure Cosmos DB Cassandra keyspace.
Required Parameters:
- account_name -- Cosmosdb account name.
- keyspace_name -- Keyspace name
- name -- Table name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az cosmosdb cassandra table delete", locals())
|
"""
whodunit.py
Computer Science 50 in Python
Problem Set 5
Decypher message hidden in BMP.
Run without arguments for usage.
"""
from sys import argv, exit
import struct
import Image
if __name__ == "__main__":
# Ensure proper usage
if not len(argv) == 3:
print "Usage: copy infile outfile"
exit(1)
# Remember filenames
infile = argv[1]
outfile = argv[2]
# Open input file
try:
img = Image.open(infile)
except IOError:
print "Could not open {0}".format(infile)
exit(2)
# Get all the pixels
pixels = list(img.getdata())
new_data = []
for pixel in pixels:
# Change red to white
if pixel == (255, 0, 0):
new_data.append((0, 0, 0))
# Change white to black
elif pixel == (255, 255, 255):
new_data.append((0, 0, 0))
# The rest should be the message
else:
new_data.append((255, 0, 0))
# Create a new file and attempt to save to disk
new_img = Image.new(img.mode, img.size)
new_img.putdata(new_data)
try:
new_img.save(outfile)
except IOError:
print "Failed to write to disk"
|
import torch
from torch import nn as nn
from basicsr.archs.buildingblocks import DoubleConv, ExtResNetBlock, create_encoders, create_decoders
from basicsr.utils.registry import ARCH_REGISTRY
def number_of_features_per_level(init_channel_number, num_levels):
return [init_channel_number * 2 ** k for k in range(num_levels)]
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
pool_kernel_size (int or tuple): the size of the window
conv_padding (int or tuple): add zero-padding added to all three sides of the input
"""
def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='cl',
num_groups=8, num_levels=4, is_segmentation=False, testing=False,
conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, **kwargs):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
assert isinstance(f_maps, list) or isinstance(f_maps, tuple)
assert len(f_maps) > 1, "Required at least 2 levels in the U-Net"
# create encoder path
self.encoders = create_encoders(in_channels, f_maps, basic_module, conv_kernel_size, conv_padding, layer_order,
num_groups, pool_kernel_size)
# create decoder path
self.decoders = create_decoders(f_maps, basic_module, conv_kernel_size, conv_padding, layer_order, num_groups,
upsample=True)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
@ARCH_REGISTRY.register()
class UNet2D(Abstract3DUNet):
"""
Just a standard 2D Unet. Arises naturally by specifying conv_kernel_size=(1, 3, 3), pool_kernel_size=(1, 2, 2).
"""
def __init__(self, in_channels, out_channels, final_sigmoid=False, f_maps=64, layer_order='cl',
num_groups=8, num_levels=4, is_segmentation=False, conv_padding=1, **kwargs):
if conv_padding == 1:
conv_padding = (0, 1, 1)
super(UNet2D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_kernel_size=(1, 3, 3),
pool_kernel_size=(1, 2, 2),
conv_padding=conv_padding,
**kwargs)
@ARCH_REGISTRY.register()
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(self, in_channels, out_channels, final_sigmoid=False, f_maps=64, layer_order='cl',
num_groups=8, num_levels=4, is_segmentation=False, conv_padding=1, **kwargs):
super(UNet3D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_padding=conv_padding,
**kwargs)
@ARCH_REGISTRY.register()
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(self, in_channels, out_channels, final_sigmoid=False, f_maps=64, layer_order='cl',
num_groups=8, num_levels=4, is_segmentation=False, conv_padding=1, **kwargs):
super(ResidualUNet3D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_padding=conv_padding,
**kwargs)
class FRB(nn.Module):
"""Fast spatial-temporal residual block"""
def __init__(self, ks=3, nf=64):
super(FRB, self).__init__()
self.prelu = nn.PReLU()
self.conv3d_1 = nn.Conv3d(nf, nf, (1, ks, ks), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
self.conv3d_2 = nn.Conv3d(nf, nf, (ks, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=True)
def forward(self, x):
res = x
out = self.conv3d_2(self.conv3d_1(self.prelu(x)))
return res + out
@ARCH_REGISTRY.register()
class FSTRN(nn.Module):
"""Fast spatial-temporal residual network"""
def __init__(self, ks=3, nf=64):
super(FSTRN, self).__init__()
self.ks = ks
self.nf = nf
self.conv3d_1 = nn.Conv3d(3, nf, (ks, ks, ks), stride=(1, 1, 1), padding=(1, 1, 1), bias=True)
self.frb_1 = FRB(ks=ks, nf=nf)
self.frb_2 = FRB(ks=ks, nf=nf)
self.frb_3 = FRB(ks=ks, nf=nf)
self.frb_4 = FRB(ks=ks, nf=nf)
self.frb_5 = FRB(ks=ks, nf=nf)
self.prelu = nn.PReLU()
self.conv3d_2 = nn.Conv3d(nf, 3, (ks, ks, ks), stride=(1, 1, 1), padding=(1, 1, 1), bias=True)
def forward(self, inp):
"""
x: [B, T, C, H, W], reshape to [B, C, T, H, W] for Conv3D
"""
inp = inp.permute(0, 2, 1, 3, 4)
img_res = inp
out = self.conv3d_1(inp)
fea_res = out
out = self.frb_5(self.frb_4(self.frb_3(self.frb_2(self.frb_1(out)))))
out = fea_res + out
out = self.conv3d_2(out)
out = img_res + out
out = out.permute(0, 2, 1, 3, 4)
return out
|
import bilevel_solver
bilevel_solver.all_pivots_dot(
[
[0.000000, 0.000000, 1.00000, 1.00000, 6.000000, 1.000000, 2.000000, 0.000000, 0.000000, 510.000000],
[0.000000, 6.000000, 5.000000, 5.000000, 0.000000, -1.00000, 4.000000, 0.000000, 0.000000, 90.000000],
[0.000000, 0.000000, -5.00000, -5.00000, 0.000000, 1.000000, -4.00000, 6.000000, 0.000000, 90.000000],
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 85.000000],
[1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 100.000000]
],
[1.000000, -0.500000, -0.700000, -0.100000, -0.400000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
#[-1.000000, -0.500000, 0.700000, -0.100000, 0.400000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
0b1010,1
) #43 #20
'''
[0.000000, 0.000000, 0.166667, 0.166667, 1.000000, 0.166667, 0.333333, 0.000000, 0.000000, 85.000000],
[0.000000, 1.000000, 0.833333, 0.833333, 0.000000, -0.166667, 0.666667, 0.000000, 0.000000, 15.000000],
[0.000000, 0.000000, -0.833333, -0.833333, 0.000000, 0.166667, -0.666667, 1.000000, 0.000000, 15.000000],
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 85.000000],
[1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 100.000000]'''
|
import numpy as np
def griewank(P):
return (
1
+ (P**2).sum(axis=1) / 4000
- np.prod(
np.cos(
P/np.sqrt(
np.arange(1, P.shape[-1] + 1)
)
),
axis=1
)
)
def rast(P):
n = P.shape[-1]
return (
P**2 - 10 * np.cos(2*np.pi * P)
).sum(axis=1) + 10*n
def schw(P):
n = P.shape[-1]
return np.sum(
P * np.sin(
np.sqrt(
np.abs(P)
)
),
axis=1
) + 418.9829*n
def dp(P):
return (P[..., 0]-1)**2 + np.sum(
(
2 * P[..., 1:]**2 - P[..., :-1]**2
)**2 *
np.arange(2, P.shape[-1] + 1), axis=1
)
def sphere(P):
return np.sum(
P**2,
axis=1
)
|
import re, subprocess
from smsgateway.sources.sms import command_list
from smsgateway.config import *
from smsgateway.sources.utils import *
service_commands = ['start', 'restart', 'stop', 'enable', 'disable', 'status']
service_regex = re.compile('^(?P<command>[a-zA-Z]+) (?P<service>.+)$')
def run_cmd(args):
return subprocess.check_output(args, stderr=subprocess.STDOUT).decode('UTF-8').strip()
def match(cmd):
m = service_regex.match(cmd)
c = m.groupdict()['command']
s = m.groupdict()['service']
return (c, s)
def check(cmd, multiline):
# print("Checking %s" % cmd)
if service_regex.match(cmd):
print("Service RE matches!")
(c, s) = match(cmd)
if c in service_commands and s in SERVICES:
print("Command: %s, Service: %s" % (c, s))
return True
return False
def run(lines):
cmd = lines[0]
(c, s) = match(cmd)
if c in service_commands and s in SERVICES:
try:
out = run_cmd([SUDO_PATH, SYSTEMCTL_PATH, c, s])
ret = "%s: OK!\n%s" % (cmd, out)
except subprocess.CalledProcessError as e:
ret = "%s failed:\n%s" % (cmd, e.output.decode('UTF-8').strip())
else:
ret = "Unknown command or service:\n%s" % cmd
return ret
command_list.append({
'name' : 'Services',
'check': check,
'run': run
})
|
import argparse
import sys
from ledger import classproperty
from ledger.command import Command
from ledger.command.output import chunks
from ledger.transaction import Transaction
class Delete(Command):
names = ("delete", "del")
def __call__(self, args):
args = self.parser.parse_args(args)
if not args.transaction_ids and not sys.stdin.isatty():
args.transaction_ids = [l.rstrip() for l in sys.stdin.read().split(" ")]
deleted = []
for chunk in chunks(args.transaction_ids, 100):
transactions = Transaction.filter(
" OR ".join(f"uid=\"{uid}\"" for uid in chunk or [""])
)
Transaction.batch_delete(transactions)
deleted += transactions
print("Removed the following transactions:")
for t in deleted:
print(t)
@classproperty
def parser(self):
parser = argparse.ArgumentParser(
description="Deletes transactions from the ledger given a space separated list of transaction IDs.",
prog=f"{sys.argv[0]} {self.names[0]}"
)
parser.add_argument(
"transaction_ids",
type=str,
help="The IDs of the transactions to delete.",
nargs="*"
)
return parser |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
from numpy.random import randn, randint
from decimal import Decimal
from tqdm import tqdm
import pickle
from tensorflow.keras.utils import plot_model
from cDCGAN.Discriminator import make_discriminator
from cDCGAN.Generator import make_generator
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
IM_SIZE = 32*13
OUTPUT_DIR = "./img"
IMAGE_DIR = 'IMAGE DIR'
# evaluate the discriminator, plot generated images, save generator model
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=150):
# prepare real samples
X_real, y_real = generate_real_samples(dataset, n_samples)
# evaluate discriminator on real examples
_, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
# prepare fake examples
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
# evaluate discriminator on fake examples
_, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
# summarize discriminator performance
print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
# save plot
save_plot(x_fake, epoch)
# save the generator model
filename = f'./models/generator_model_{epoch+1}.h5'
g_model.save(filename)
# create and save a plot of generated images
def save_plot(examples, epoch, n=1):
# scale from [-1,1] to [0,1]
examples = (examples + 1) / 2.0
# plot images
for i in range(n * n):
# define subplot
plt.subplot(n, n, 1 + i)
# turn off axis
plt.axis('off')
# plot raw pixel data
plt.imshow(examples[i])
# save plot to file
filename = f'./plots/generated_plot_epoch_{epoch+1}.png'
plt.savefig(filename)
plt.close()
# example of smoothing class=1 to [0.8, 1.2]
def smooth_positive_labels(y):
return y - 0.3 + (np.random.random(y.shape) * 0.3)
def smooth_negative_labels(y):
return y + np.random.random(y.shape) * 0.3
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
# generate points in the latent space
x_input = randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
# load and prepare cifar10 training images
def load_real_samples():
with open('train_images.pkl', 'rb') as f:
train_images = pickle.load(f)
train_images = train_images.reshape(train_images.shape[0], IM_SIZE, IM_SIZE, 3).astype('float32')
# train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
return train_images
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, 1))
y = smooth_positive_labels(y)
return X, y
def generate_fake_samples(generator, latent_dim, n_samples):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n_samples)
# predict outputs
X = generator.predict(x_input)
# create 'fake' class labels (0)
y = np.zeros((n_samples, 1))
y = smooth_negative_labels(y)
return X, y
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model):
# make weights in the discriminator not trainable
d_model.trainable = False
# connect them
model = keras.Sequential()
# add generator
model.add(g_model)
# add the discriminator
model.add(d_model)
# compile model
opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=200, n_batch=56):
bat_per_epo = int(dataset.shape[0] / n_batch)
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in tqdm(range(n_epochs), leave=True):
# enumerate batches over the training set
for j in range(bat_per_epo):
# get randomly selected 'real' samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator model weights
d_loss1, _ = d_model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator model weights
d_loss2, _ = d_model.train_on_batch(X_fake, y_fake)
# prepare points in latent space as input for the generator
X_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = np.ones((n_batch, 1))
# update the generator via the discriminator's error
g_loss = gan_model.train_on_batch(X_gan, y_gan)
# summarize loss on this batch
print(f'Epoch: {i+1}, '
f'batch pr. epoch: {j+1}/{bat_per_epo} '
f'd1= {round(Decimal(str(d_loss1)), 5)}, '
f'd2= {round(Decimal(str(d_loss2)), 5)} '
f'g= {round(Decimal(str(g_loss)), 5)}')
if (i + 1) % 40 == 0:
summarize_performance(i, g_model, d_model, dataset, latent_dim)
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Size of latent space
latent_dim = 100
# Create the generator model
generator = make_generator(latent_dim=latent_dim)
# Create the discriminator model
diskriminator = make_discriminator()
generator.summary()
diskriminator.summary()
plot_model(generator, to_file='generator-model-plot.png', show_shapes=True, show_layer_names=True)
plot_model(diskriminator, to_file='diskriminator-model-plot.png', show_shapes=True, show_layer_names=True)
# Create the GAN
gan_model = define_gan(g_model=generator, d_model=diskriminator)
# plot_model(diskriminator, to_file='diskriminator-model-plot.png', show_shapes=True, show_layer_names=True)
# Load images of pipes with ice
samples = load_real_samples()
# train model
train(generator, diskriminator, gan_model, samples, latent_dim, n_epochs=40000, n_batch=4)
|
import os
import random
import torch
from PIL import Image
from torch.utils import data
from torchvision import transforms as T
class CelebA_withname(data.Dataset):
"""Dataset class for the CelebA dataset."""
def __init__(self,
image_dir,
attr_path,
selected_attrs,
transform,
test_idlist=[],
test_namelist=[],
sub_folder=''):
"""Initialize and preprocess the CelebA dataset."""
self.image_dir = image_dir
self.attr_path = attr_path
self.selected_attrs = selected_attrs
self.transform = transform
self.test_dataset = []
self.attr2idx = {}
self.idx2attr = {}
self.test_namelist = test_namelist
self.id_name_dic = dict(zip(test_namelist, test_idlist))
self.preprocess()
self.num_images = len(self.test_dataset)
self.sub_folder = sub_folder
def preprocess(self):
"""Preprocess the CelebA attribute file."""
lines = [line.rstrip() for line in open(self.attr_path, 'r')]
all_attr_names = lines[1].split()
for i, attr_name in enumerate(all_attr_names):
self.attr2idx[attr_name] = i
self.idx2attr[i] = attr_name
lines = lines[2:]
random.seed(1234)
random.shuffle(lines)
assert self.test_namelist != []
for i, line in enumerate(lines):
split = line.split()
filename = split[0]
if filename in self.test_namelist:
id = self.id_name_dic[filename]
values = split[1:]
label = []
for attr_name in self.selected_attrs:
idx = self.attr2idx[attr_name]
label.append(values[idx] == '1')
self.test_dataset.append([filename, label, id])
print('Finished preprocessing the CelebA dataset...')
def __getitem__(self, index):
"""Return one image and its corresponding attribute label."""
dataset = self.test_dataset
filename, label, id = dataset[index]
image = Image.open(os.path.join(self.image_dir, id, self.sub_folder, filename))
return self.transform(image), torch.FloatTensor(label), filename
def __len__(self):
"""Return the number of images."""
return self.num_images
def get_loader(image_dir,
attr_path,
selected_attrs,
crop_size=112,
image_size=112,
batch_size=16,
num_workers=1):
"""Build and return a data loader."""
transform = []
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
dataset = CelebA_withname(image_dir, attr_path, selected_attrs, transform)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
return data_loader
def create_dic(image_dir,
attr_path,
selected_attrs,
crop_size=112,
image_size=112,
test_idlist=[],
test_namelist=[],
sub_folder=''):
transform = []
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
dataset = CelebA_withname(image_dir, attr_path, selected_attrs, transform,
test_idlist, test_namelist, sub_folder)
dic_label = {}
dic_image = {}
for i in range(len(dataset)):
img, label, filename = dataset[i]
dic_label[filename] = label
dic_image[filename] = img
return dic_label, dic_image
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import argparse
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
def RF_Classifier(X, y, indep=None, fold=5, n_trees=100, out='RF_output'):
"""
Parameters:
----------
:param X: 2-D ndarray
:param y: 1-D ndarray
:param indep: 2-D ndarray, the first column is labels and the rest are feature values
:param fold: int, default 5
:param n_trees: int, number of trees, default: 5
:param out:
:return:
info: str, the model parameters
cross-validation result: list with element is ndarray
independent result: ndarray, the first column is labels and the rest are prediction scores.
"""
classes = sorted(list(set(y)))
if indep.shape[0] != 0:
indep_out = np.zeros((indep.shape[0], len(classes) + 1))
indep_out[:, 0] = indep[:, 0]
prediction_result_cv = []
prediction_result_ind = np.array([])
if indep.shape[0] != 0:
prediction_result_ind = np.zeros((len(indep), len(classes) + 1))
prediction_result_ind[:, 0] = indep[:, 0]
folds = StratifiedKFold(fold).split(X, y)
for i, (trained, valided) in enumerate(folds):
train_y, train_X = y[trained], X[trained]
valid_y, valid_X = y[valided], X[valided]
model = RandomForestClassifier(n_estimators=n_trees, bootstrap=False)
rfc = model.fit(train_X, train_y)
scores = rfc.predict_proba(valid_X)
tmp_result = np.zeros((len(valid_y), len(classes) + 1))
tmp_result[:, 0], tmp_result[:, 1:] = valid_y, scores
prediction_result_cv.append(tmp_result)
# independent
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] += rfc.predict_proba(indep[:, 1:])
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] /= fold
header = 'n_trees: %d' % n_trees
return header, prediction_result_cv, prediction_result_ind
|
import subprocess
import sys
import setup_util
import os
from zipfile import ZipFile
def start(args, logfile, errfile):
setup_util.replace_text("play-scala-mongodb/conf/application.conf", "jdbc:mysql:\/\/.*:3306", "jdbc:mysql://" + args.database_host + ":3306")
subprocess.check_call("play clean dist", shell=True, cwd="play-scala-mongodb", stderr=errfile, stdout=logfile)
if os.name == 'nt':
ZipFile("./play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT.zip").extractall("./play-scala-mongodb/target/universal")
with open("./play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/bin/play-scala-mongodb.bat", "w+") as f:
f.write("java %1 -cp \"./lib/*;\" play.core.server.NettyServer .")
subprocess.Popen("play-scala-mongodb.bat", shell=True, cwd="play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/bin", stderr=errfile, stdout=logfile)
else:
subprocess.check_call("unzip play-scala-mongodb-1.0-SNAPSHOT.zip", shell=True, cwd="play-scala-mongodb/target/universal", stderr=errfile, stdout=logfile)
subprocess.check_call("chmod +x play-scala-mongodb", shell=True, cwd="play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/bin", stderr=errfile, stdout=logfile)
subprocess.Popen("./play-scala-mongodb", shell=True, cwd="play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/bin", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
with open("./play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/RUNNING_PID") as f:
pid = int(f.read())
os.kill(pid, 15)
else:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'NettyServer' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
try:
os.remove("play-scala-mongodb/target/universal/play-scala-mongodb-1.0-SNAPSHOT/RUNNING_PID")
except OSError:
return 1
# Takes up so much disk space
if os.name == 'nt':
subprocess.check_call("del /f /s /q target", shell=True, cwd="play-scala-mongodb", stderr=errfile, stdout=logfile)
else:
subprocess.check_call("rm -rf target", shell=True, cwd="play-scala-mongodb", stderr=errfile, stdout=logfile)
return 0
|
__version_info__ = ('1', '10', '12')
__version__ = '.'.join(__version_info__)
from .wrappers import (ObjectProxy, CallableObjectProxy, FunctionWrapper,
BoundFunctionWrapper, WeakFunctionProxy, resolve_path, apply_patch,
wrap_object, wrap_object_attribute, function_wrapper,
wrap_function_wrapper, patch_function_wrapper,
transient_function_wrapper)
from .decorators import (adapter_factory, AdapterFactory, decorator,
synchronized)
from .importer import (register_post_import_hook, when_imported,
notify_module_loaded, discover_post_import_hooks)
try:
from inspect import getcallargs
except ImportError:
from .arguments import getcallargs
|
#!/usr/bin/env python
# -------------------------------------------------
# This script is for changing names of .png images.
# Written by Xiaoming Zhao
# -------------------------------------------------
import os
import sys
import glob
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='Change png\'s name to match imdb_IRMA')
parser.add_argument('--dir', dest='dir_path',
help='Set the .png images\'s directory.')
parser.add_argument('--file', dest='img_list_file',
help='Set the imdb_IRMA image list\'s directory.')
parser.add_argument('--repeat', dest='repeated_img_file',
help='Set the file directory for repeated iamges.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
dir_path = args.dir_path
# read the content of image list file
img_list_file = args.img_list_file
with open(img_list_file, 'r') as f:
pre_img_list = [item.strip() for item in f.readlines()]
# generate the image index list
img_index_list = {}
for line in pre_img_list:
split_line = line.split(' ')
img_index_list[split_line[0]] = []
img_index_list[split_line[0]].extend(split_line[1:])
# get the .png file list under the path
png_list = []
for (dirpath, dirname_s, filename_s) in os.walk(dir_path):
for filename in filename_s:
if filename.split('.')[-1].lower() == 'png':
png_list.append(os.path.join(dirpath, filename))
assert png_list != [],\
'There does not exist .png file: {}.\n'.format(dir_path)
print 'Find {} png images totally.\n'.format(len(png_list))
# chagne png file name to match the name of imdb_IRMA
print 'Changing name of png files ...'
repeated_img_file = args.repeated_img_file
rename_num = 0
repeated_img_num = 0
for png_file in png_list:
png_dirname = os.path.dirname(png_file)
png_filename = os.path.basename(png_file)
file_index = '.'.join(png_filename.split('.')[:2])
# print '{} {} {}\n'.format(png_dirname, png_filename, file_index)
assert file_index in img_index_list.keys(),\
'Wrong image index, imdb_IRMA does not include this image: {}.\n'\
.format(png_file)
new_filename_list = img_index_list[file_index]
if len(new_filename_list) == 1:
rename_num = rename_num + 1
new_filename = new_filename_list[0]
os.rename(png_file, os.path.join(png_dirname, new_filename))
else:
repeated_img_num = repeated_img_num + 1
info_list = [file_index]
info_list.extend(new_filename_list)
info_list.append(png_file)
with open(repeated_img_file, 'a') as f:
f.write('{} {} {} {}\n').format(*info_list)
print '... done'
print 'Rename {} images.\nWrite {} repeated images to {}.\n'\
.format(rename_num, repeated_img_num, repeated_img_file)
|
import datetime
import functools
import logging
import os
import time
import gobject
import gtk
from tornado import ioloop
from tornado.log import gen_log
class GtkIOLoop(ioloop.IOLoop):
READ = gobject.IO_IN
WRITE = gobject.IO_OUT
ERROR = gobject.IO_ERR | gobject.IO_HUP
def initialize(self, time_func=None):
super(GtkIOLoop, self).initialize()
self.time_func = time_func or time.time
self._handles = {}
def close(self, all_fds=False):
if all_fds:
for fd in self._handles.keys():
try:
os.close(fd)
except Exception:
gen_log.debug("error closing fd %s", fd, exc_info=True)
def _handle_events(self, fd, events, callback):
callback(fd, events)
return True
def add_handler(self, fd, callback, events):
handle = gobject.io_add_watch(
fd, events | self.ERROR, self._handle_events, callback)
self._handles[fd] = handle, callback
def update_handler(self, fd, events):
handle, callback = self._handles.pop(fd)
gobject.source_remove(handle)
self.add_handler(fd, callback, events)
def remove_handler(self, fd):
handle, _ = self._handles.pop(fd)
gobject.source_remove(handle)
def start(self):
if not logging.getLogger().handlers: # pragma: no cover
logging.basicConfig()
gtk.main()
def stop(self):
gtk.main_quit()
def time(self):
return self.time_func()
def add_timeout(self, deadline, callback):
if isinstance(deadline, datetime.timedelta): # pragma: no cover
seconds = ioloop._Timeout.timedelta_to_seconds(deadline)
else:
seconds = deadline - self.time()
ms = max(0, int(seconds * 1000))
handle = gobject.timeout_add(ms, self._run_callback, callback)
return handle
def remove_timeout(self, handle):
gobject.source_remove(handle)
def add_callback(self, callback, *args, **kwargs):
callback = functools.partial(callback, *args, **kwargs)
gobject.timeout_add(0, self._run_callback, callback)
add_callback_from_signal = add_callback
|
from typing import Dict
import gym
import numpy as np
from ding.envs import ObsNormWrapper, RewardNormWrapper, DelayRewardWrapper, FinalEvalRewardEnv
def wrap_mujoco(
env_id,
norm_obs: Dict = dict(use_norm=False, ),
norm_reward: Dict = dict(use_norm=False, ),
delay_reward_step: int = 1
) -> gym.Env:
r"""
Overview:
Wrap Mujoco Env to preprocess env step's return info, e.g. observation normalization, reward normalization, etc.
Arguments:
- env_id (:obj:`str`): Mujoco environment id, for example "HalfCheetah-v3"
- norm_obs (:obj:`EasyDict`): Whether to normalize observation or not
- norm_reward (:obj:`EasyDict`): Whether to normalize reward or not. For evaluator, environment's reward \
should not be normalized: Either ``norm_reward`` is None or ``norm_reward.use_norm`` is False can do this.
Returns:
- wrapped_env (:obj:`gym.Env`): The wrapped mujoco environment
"""
env = gym.make(env_id)
env = FinalEvalRewardEnv(env)
if norm_obs is not None and norm_obs.use_norm:
env = ObsNormWrapper(env)
if norm_reward is not None and norm_reward.use_norm:
env = RewardNormWrapper(env, norm_reward.reward_discount)
if delay_reward_step > 1:
env = DelayRewardWrapper(env, delay_reward_step)
return env
|
import sys
from io import StringIO
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QObject
from PyQt5.QtGui import QCloseEvent, QShowEvent, QTextCursor, QTextOption
from PyQt5.QtWidgets import qApp, QDialog, QDialogButtonBox, QStyleFactory, QTextEdit, QVBoxLayout
# documentation from vidcutter... let's see if this works
class VideoPanel(QTextEdit):
def __init__(self, parent=None):
super(VideoPanel, self).__init__(parent)
self._buffer = StringIO()
self.setReadOnly(True)
self.setWordWrapMode(QTextOption.WordWrap)
self.setStyleSheet('QTextEdit { font-family:monospace; font-size:%s; }'
% ('10pt' if sys.platform == 'darwin' else '8pt'))
def __getattr__(self, item):
return getattr(self._buffer, item)
class ConsoleWidget(QDialog):
def __init__(self, parent=None, flags=Qt.Dialog | Qt.WindowCloseButtonHint):
super(ConsoleWidget, self).__init__(parent, flags)
self.parent = parent
self.edit = VideoPanel(self)
buttons = QDialogButtonBox()
buttons.setCenterButtons(True)
clearButton = buttons.addButton('Clear', QDialogButtonBox.ResetRole)
clearButton.clicked.connect(self.edit.clear)
closeButton = buttons.addButton(QDialogButtonBox.Close)
closeButton.clicked.connect(self.close)
closeButton.setDefault(True)
layout = QVBoxLayout()
layout.addWidget(self.edit)
layout.addWidget(buttons)
self.setLayout(layout)
self.setWindowTitle('{0} Console'.format(qApp.applicationName()))
self.setWindowModality(Qt.NonModal)
def showEvent(self, event: QShowEvent):
self.parent.consoleLogger.flush()
super(ConsoleWidget, self).showEvent(event)
def closeEvent(self, event: QCloseEvent):
self.parent.cutter.consoleButton.setChecked(False)
super(ConsoleWidget, self).closeEvent(event) |
''' Distutils / setuptools helpers '''
import os
from os.path import join as pjoin, split as psplit, splitext
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
from distutils.version import LooseVersion
from distutils.command.build_py import build_py
from distutils.command.install_scripts import install_scripts
from distutils import log
def get_comrec_build(pkg_dir, build_cmd=build_py):
""" Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file -
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted during 'git archive'
archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git substitution
- so you probably also want a ``.gitattributes`` file in the root directory
of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
class MyBuildPy(build_cmd):
''' Subclass to write commit data into installation tree '''
def run(self):
build_cmd.run(self)
import subprocess
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = str(repo_commit)
# We write the installation commit even if it's empty
cfg_parser = ConfigParser()
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
cfg_parser.write(open(out_pth, 'wt'))
return MyBuildPy
# Dependency checks
def package_check(pkg_name, version=None,
optional=False,
checker=LooseVersion,
version_getter=None,
messages=None
):
''' Check if package `pkg_name` is present, and correct version
Parameters
----------
pkg_name : str
name of package as imported into python
version : {None, str}, optional
minimum version of the package that we require. If None, we don't
check the version. Default is None
optional : {False, True}, optional
If False, raise error for absent package or wrong version;
otherwise warn
checker : callable, optional
callable with which to return comparable thing from version
string. Default is ``distutils.version.LooseVersion``
version_getter : {None, callable}:
Callable that takes `pkg_name` as argument, and returns the
package version string - as in::
``version = version_getter(pkg_name)``
If None, equivalent to::
mod = __import__(pkg_name); version = mod.__version__``
messages : None or dict, optional
dictionary giving output messages
'''
if version_getter is None:
def version_getter(pkg_name):
mod = __import__(pkg_name)
return mod.__version__
if messages is None:
messages = {}
msgs = {
'missing': 'Cannot import package "%s" - is it installed?',
'missing opt': 'Missing optional package "%s"',
'opt suffix' : '; you may get run-time errors',
'version too old': 'You have version %s of package "%s"'
' but we need version >= %s', }
msgs.update(messages)
try:
__import__(pkg_name)
except ImportError:
if not optional:
raise RuntimeError(msgs['missing'] % pkg_name)
log.warn(msgs['missing opt'] % pkg_name +
msgs['opt suffix'])
return
if not version:
return
try:
have_version = version_getter(pkg_name)
except AttributeError:
raise RuntimeError('Cannot find version for %s' % pkg_name)
if checker(have_version) < checker(version):
if optional:
log.warn(msgs['version too old'] % (have_version,
pkg_name,
version)
+ msgs['opt suffix'])
else:
raise RuntimeError(msgs['version too old'] % (have_version,
pkg_name,
version))
BAT_TEMPLATE = \
r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
call %py_exe% %pyscript% %*
"""
class install_scripts_bat(install_scripts):
""" Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
line. Unix recognizes and uses this first "shebang" line, but Windows does
not. So, on Windows only we add a ``.bat`` wrapper of name
``bare_script_name.bat`` to call ``bare_script_name`` using the python
interpreter from the #! first line of the script.
Notes
-----
See discussion at
http://matthew-brett.github.com/pydagogue/installing_scripts.html and
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
def run(self):
install_scripts.run(self)
if not os.name == "nt":
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
if not (first_line.startswith('#!') and
'python' in first_line.lower()):
log.info("No #!python executable found, skipping .bat "
"wrapper")
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
log.info("Making %s wrapper for %s" % (bat_file, filepath))
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
fobj.write(bat_contents)
|
import numpy as np
mu = 0.1
nu = 0.2
lam = 0.3
G = np.array([[1,0,0],[0,1,0],[mu,nu,lam]])
G_inv_T = np.linalg.inv(G.T)
print (G,"G")
print (G_inv_T,"G_inv_T")
# print(G.dot(G_inv_T.T),"G.dot(G_inv_T)") |
"""Host built-in actions."""
from __future__ import annotations
import random
from typing import TYPE_CHECKING, List, Union
from voiceassistant.skills.create import action
if TYPE_CHECKING:
from voiceassistant.core import VoiceAssistant
@action("say")
def say(vass: VoiceAssistant, text: Union[str, List[str]]) -> None:
"""Say a text or randamly chosen text."""
if isinstance(text, str):
vass.interfaces.speech.output(text)
elif isinstance(text, list):
vass.interfaces.speech.output(random.choice(text))
|
import os, sys, importlib
from csalt.synthesize import make_template, make_data
from csalt.utils import cubestats, img_cube
sys.path.append('configs/')
cfg = 'FITStest'
# only need to do this **one time**.
make_template(cfg)
# impose a model onto the synthetic tracks
make_data(cfg, mtype='FITS', new_template=False)
# image the cube
inp = importlib.import_module('gen_'+cfg)
img_cube(inp.reduced_dir+inp.basename+'/'+inp.basename+'.noisy',
inp.reduced_dir+inp.basename+'/images/'+inp.basename+'.noisy',
'gen_'+cfg, masktype='kep')
|
# TODO can we use explicit imports?
from bigchaindb.db.utils import *
|
from xml.etree import cElementTree as ElementTree
from casexml.apps.case.mock import CaseBlock
from dimagi.utils.chunked import chunked
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.apps.locations.models import SQLLocation
from custom.covid.management.commands.update_cases import CaseUpdateCommand
BATCH_SIZE = 100
DEVICE_ID = __name__ + ".update_owner_ids"
CHILD_LOCATION_TYPE = 'investigators'
class Command(CaseUpdateCommand):
help = f"Changes the owner_id of a case to the location_id of the child location with type " \
f"{CHILD_LOCATION_TYPE} of the current location"
def case_block(self, case, child_location):
return ElementTree.tostring(CaseBlock.deprecated_init(
create=False,
case_id=case.case_id,
owner_id=child_location.location_id,
).as_xml()).decode('utf-8')
def update_cases(self, domain, case_type, user_id):
case_ids = self.find_case_ids_by_type(domain, case_type)
accessor = CaseAccessors(domain)
locations_objects = {}
case_blocks = []
skip_count = 0
for case in accessor.iter_cases(case_ids):
owner_id = case.get_case_property('owner_id')
if owner_id in locations_objects:
location_obj = locations_objects[owner_id]
else:
location_obj = SQLLocation.objects.get(location_id=owner_id)
locations_objects[owner_id] = location_obj
if location_obj:
children = location_obj.get_children()
has_correct_child_location_type = False
for child_location in children:
if child_location.location_type.code == CHILD_LOCATION_TYPE:
case_blocks.append(self.case_block(case, child_location))
has_correct_child_location_type = True
break
if not has_correct_child_location_type:
skip_count += 1
else:
skip_count += 1
print(f"{len(case_blocks)} to update in {domain}, {skip_count} cases have skipped due to unknown owner_id"
f" or has no child location of type {CHILD_LOCATION_TYPE}.")
total = 0
for chunk in chunked(case_blocks, BATCH_SIZE):
submit_case_blocks(chunk, domain, device_id=DEVICE_ID, user_id=user_id)
total += len(chunk)
print("Updated {} cases on domain {}".format(total, domain))
|
#! /usr/bin/env python3
import os
import numpy as np
from scipy.io import loadmat
from sklearn import linear_model
from sklearn import svm
from sklearn.neural_network import MLPRegressor as nnr
import cPickle
from scipy import misc
import glob
import sys
import random
from computeOrupdateERD import FindNeighbors
from computeOrupdateERD import ComputeRecons
from computeFeatures import computeFeatures
from computeDifference import computeDifference
def performTraining(MeasurementPercentageVector,TrainingDataPath,ImageType,ImageExtension,SizeImage,TrainingInfo,Resolution,WindowSize,c_vec,PercOfRD):
ImNum = 0
loadPathImage = TrainingDataPath + 'Images' + os.path.sep
NumTrainingImages = np.size(glob.glob(loadPathImage + '*' + ImageExtension))
for image_path in glob.glob(loadPathImage + '*' + ImageExtension):
if ImageExtension=='.mat':
ImgDat=loadmat(image_path)
Img=ImgDat['img']
else:
Img = misc.imread(image_path)
if SizeImage[0]!=Img.shape[0] or SizeImage[1]!=Img.shape[1]:
sys.exit('Error!!! The dimensions you entered in "SizeImage" do not match the dimensions of the training images')
if not os.path.exists(TrainingDataPath + 'FeaturesRegressCoeffs'):
os.makedirs(TrainingDataPath + 'FeaturesRegressCoeffs')
for m in range(0,np.size(MeasurementPercentageVector)):
SaveFolder = 'Image_' + str(ImNum+1) + '_Perc_' + str(MeasurementPercentageVector[m])
SavePath = TrainingDataPath + 'FeaturesRegressCoeffs' + os.path.sep + SaveFolder
if not os.path.exists(SavePath):
os.makedirs(SavePath)
Mask = np.zeros((SizeImage[0],SizeImage[1]))
UnifMatrix = np.random.rand(SizeImage[0],SizeImage[1])
Mask = UnifMatrix<(MeasurementPercentageVector[m]/100)
MeasuredIdxs = np.transpose(np.where(Mask==1))
UnMeasuredIdxs = np.transpose(np.where(Mask==0))
MeasuredValues = Img[Mask==1]
NeighborValues,NeighborWeights,NeighborDistances = FindNeighbors(TrainingInfo,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Resolution)
ReconValues,ReconImage = ComputeRecons(TrainingInfo,NeighborValues,NeighborWeights,SizeImage,UnMeasuredIdxs,MeasuredIdxs,MeasuredValues)
AllPolyFeatures=computeFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,NeighborValues,NeighborWeights,NeighborDistances,TrainingInfo,ReconValues,ReconImage,Resolution,ImageType)
NumRandChoices = int(PercOfRD*MeasurementPercentageVector[m]*SizeImage[1]*SizeImage[0]/(100*100))
OrderForRD = random.sample(range(0,UnMeasuredIdxs.shape[0]), NumRandChoices)
PolyFeatures = AllPolyFeatures[OrderForRD,:]
RDPP = computeDifference(Img,ReconImage,ImageType)+0
RDPP.astype(int)
RDPPWithZeros = np.lib.pad(RDPP,(int(np.floor(WindowSize[0]/2)),int(np.floor(WindowSize[1]/2))),'constant',constant_values=0)
ImgAsBlocks = im2col(RDPPWithZeros,WindowSize)
MaskVect = np.ravel(Mask)
ImgAsBlocksOnlyUnmeasured = ImgAsBlocks[:,np.logical_not(MaskVect)]
temp = np.zeros((WindowSize[0]*WindowSize[1],NumRandChoices))
for c in c_vec:
sigma = NeighborDistances[:,0]/c
cnt = 0;
for l in OrderForRD:
Filter = generateGaussianKernel(sigma[l],WindowSize)
temp[:,cnt] = ImgAsBlocksOnlyUnmeasured[:,l]*Filter
cnt=cnt+1
RD = np.sum(temp, axis=0)
SavePath_c = SavePath + os.path.sep + 'c_' + str(c)
if not os.path.exists(SavePath_c):
os.makedirs(SavePath_c)
np.save(SavePath_c + os.path.sep + 'RD', RD)
np.save(SavePath_c + os.path.sep + 'OrderForRD', OrderForRD)
np.save(SavePath + os.path.sep + 'Mask', Mask)
np.save(SavePath + os.path.sep + 'ReconImage', ReconImage)
np.save(SavePath + os.path.sep + 'PolyFeatures', PolyFeatures)
if ImNum == 0:
print('Feature Extraction Complete for ' + str(ImNum+1) + ' Image' )
else:
print('Feature Extraction Complete for ' + str(ImNum+1) + ' Images' )
ImNum = ImNum + 1
try:
Img
except NameError:
sys.exit('Error!!! There are no images in ' + loadPathImage + ' that have the extention ' + ImageExtension)
for c in c_vec:
FirstLoop = 1
for ImNum in range(0,NumTrainingImages):
for m in range(0,np.size(MeasurementPercentageVector)):
LoadFolder = 'Image_' + str(ImNum+1) + '_Perc_' + str(MeasurementPercentageVector[m])
LoadPath = TrainingDataPath + 'FeaturesRegressCoeffs' + os.path.sep + LoadFolder
PolyFeatures = np.load(LoadPath + os.path.sep + 'PolyFeatures.npy')
LoadPath_c = LoadPath + os.path.sep + 'c_' + str(c)
RD = np.load(LoadPath_c + os.path.sep + 'RD.npy')
if ImageType=='D':
if FirstLoop==1:
BigPolyFeatures = np.column_stack((PolyFeatures[:,0:25],PolyFeatures[:,26]))
BigRD = RD
FirstLoop = 0
else:
TempPolyFeatures = np.column_stack((PolyFeatures[:,0:25],PolyFeatures[:,26]))
BigPolyFeatures = np.row_stack((BigPolyFeatures,TempPolyFeatures))
BigRD = np.append(BigRD,RD)
else:
if FirstLoop==1:
BigPolyFeatures = PolyFeatures
BigRD = RD
FirstLoop = 0
else:
TempPolyFeatures = PolyFeatures
BigPolyFeatures = np.row_stack((BigPolyFeatures,TempPolyFeatures))
BigRD = np.append(BigRD,RD)
#regr = linear_model.LinearRegression()
# regr = svm.SVR(kernel='rbf')
regr = nnr(activation='identity', solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 5), random_state=1, max_iter=500)
regr.fit(BigPolyFeatures, BigRD)
# Theta = np.zeros((PolyFeatures.shape[1]))
# if ImageType=='D':
# Theta[0:24]=regr.coef_[0:24]
# Theta[26]=regr.coef_[25]
# else:
# Theta = regr.coef_
SavePath_c = TrainingDataPath + os.path.sep + 'c_' + str(c)
del BigRD,BigPolyFeatures
if not os.path.exists(SavePath_c):
os.makedirs(SavePath_c)
# np.save(SavePath_c + os.path.sep + 'Theta', Theta)
with open(SavePath_c + os.path.sep + 'Theta.pkl', 'wb') as fid:
cPickle.dump(regr, fid)
print("Regressions Complete for c = " + str(c))
def im2col(Matrix,WidowSize):
M,N = Matrix.shape
col_extent = N - WidowSize[1] + 1
row_extent = M - WidowSize[0] + 1
start_idx = np.arange(WidowSize[0])[:,None]*N + np.arange(WidowSize[1])
offset_idx = np.arange(row_extent)[:,None]*N + np.arange(col_extent)
out = np.take (Matrix,start_idx.ravel()[:,None] + offset_idx.ravel())
return(out)
# http://stackoverflow.com/questions/30109068/implement-matlabs-im2col-sliding-in-python
def generateGaussianKernel(sigma,WindowSize):
FilterMat = np.ones((WindowSize[0],WindowSize[1]))
for i in range(0,WindowSize[0]):
for j in range(0,WindowSize[1]):
FilterMat[i][j]=np.exp( -(1/(2*sigma**2)) * np.absolute( ( (i-np.floor(WindowSize[0]/2))**2 + (j-np.floor(WindowSize[1]/2))**2 ) ) )
FilterMat = FilterMat/np.amax(FilterMat)
FilterMat = np.transpose(FilterMat)
Filter=np.ravel(FilterMat)
return Filter
|
# Most lines' ref-ID are refer back to the same original ID,
# which means those lines' User and Resource are the same. For this
# reason, we can exculde User and Resource in transaction. When node
# queries User and Resource, the baseline4 first get its original
# Node+ID, and use Node+RID to query additional result and union them
# baseline4_1
# add Node + RID stream
# this method saves data ob blockchain
# user can query the Node+RID result directly from blockchain instead
# of sorting form memory
from config import ATTRIBUTE, ATTRIBUTE_NAME, MAX_RESULT
from util import getData, createStream, ENCODE_FORMAT, database
from sortedcontainers import SortedList
DO_VALIDATION = True
att_dict = {key: value for key, value in zip(ATTRIBUTE, ATTRIBUTE_NAME)}
att_name_index = {value: counter for counter,
value in enumerate(ATTRIBUTE_NAME)}
# UID = 'UID'
NRID = 'NRID'
def createStreams(api):
for att in ATTRIBUTE_NAME:
createStream(api, att)
# createStream(api, UID)
createStream(api, NRID)
def insert(api, data):
result = api.listunspent(0)
txid = result["result"][0]["txid"]
vout = result["result"][0]["vout"]
address = api.getaddresses()["result"][0]
for line in data:
hexstr = line.encode(ENCODE_FORMAT).hex()
values = line.split(" ")
data = []
short = ATTRIBUTE_NAME.copy()
short.remove('User')
short.remove('Resource')
uid = values[1] + 'N' + values[2]
if values[att_name_index['ID']] == values[att_name_index['Ref-ID']]:
for att, v in zip(ATTRIBUTE_NAME, values):
# if att == 'Timestamp' or att == 'ID':
# data.append(
# {"for": att, "key": v, "data": uid.encode(ENCODE_FORMAT).hex()})
# else:
data.append({"for": att, "key": v, "data": hexstr})
else:
for key in short:
# if key == 'Timestamp' or key == 'ID':
# data.append(
# {"for": key, "key": values[att_name_index[key]], "data": uid.encode(ENCODE_FORMAT).hex()})
# else:
data.append(
{"for": key, "key": values[att_name_index[key]], "data": hexstr})
nrid = values[1] + 'R' + values[3]
# data.append({"for": UID, "key": uid, "data": hexstr})
data.append({"for": NRID, "key": nrid, "data": hexstr})
txid = api.createrawtransaction(
[{'txid': txid, 'vout': vout}], {address: 0}, data, 'send')["result"]
vout = 0
# sort the result using RID in blockchain
def pointQuery(api, attribute, sort=False, reverse=False):
# result = []
result = api.liststreamkeyitems(
att_dict[attribute[0]], attribute[1:], False, MAX_RESULT)
result = getData(result["result"])
temp = []
# if attribute[0] == 'T' or attribute[0] == 'I':
# for uid in getData(api.liststreamkeyitems(
# att_dict[attribute[0]], attribute[1:], False, MAX_RESULT)["result"]):
# # print("Uid", uid)
# result += getData(api.liststreamkeyitems(UID,
# uid, False, MAX_RESULT)["result"])
# else:
# result += getData(api.liststreamkeyitems(
# att_dict[attribute[0]], attribute[1:], False, MAX_RESULT)["result"])
if attribute[0] == 'U' or attribute[0] == 'R':
for line in result:
node, _, RID = line.split(" ")[1:4]
nrid = node + 'R' + RID
RIDResult = getData(api.liststreamkeyitems(
NRID, nrid, False, MAX_RESULT)["result"])
temp += RIDResult
# for r in RIDResult:
# if r.split(" ")[1] == node:
# temp += [r]
result += temp
if DO_VALIDATION: # and attribute[0] != 'U' and attribute[0] != 'R':
if database.validate(result, attribute, True) is False:
print("Wrong!")
return result
def rangeQuery(api, start, end):
result = []
stream = att_dict['T']
timestamps = api.liststreamkeys(stream)["result"]
sl = SortedList(list(map(int, [key['key'] for key in timestamps])))
for timestamp in sl.irange(start, end):
result += getData(api.liststreamkeyitems(stream,
str(timestamp))['result'])
return result
def andQuery(api, attributes):
resultSet = []
for attr in attributes:
resultSet.append(set(pointQuery(api, attr)))
result = resultSet[0]
for i in range(1, len(resultSet)):
result &= resultSet[i]
return list(result)
def sortResult(results, attribute, reverse=False):
return results.sort(reverse=reverse, key=lambda line: int(line.split(" ")[att_name_index[attribute]]))
|
#!/usr/bin/env python
import sys
import os
import subprocess
import time
import argparse
import tempfile
import shutil
import glob
import multiprocessing
def ingest(ns):
procs = dict()
def spawn(afile):
print "Ingest %s" % afile
proc = subprocess.Popen([ns.script, afile])
procs[proc.pid] = proc
def reap_some():
done = []
while True:
for (pid, proc) in procs.items():
if proc.poll() is not None:
done.append(pid)
if len(done) > 0:
for pid in done:
del procs[pid]
break
else:
time.sleep(1)
for afile in glob.glob('{}/*'.format(ns.dir)):
if (len(procs) < ns.procs):
spawn(afile)
else:
reap_some()
spawn(afile)
while len(procs) > 0:
reap_some()
def main(args):
ncpus = multiprocessing.cpu_count()
parser = argparse.ArgumentParser(
prog = "ingest-parallel.py",
description = "ingest a dir of ndjson files in parallel"
)
parser.add_argument('-n', '--processes',
type = int,
default = ncpus,
dest = 'procs',
help = "Number of parallel ingestion processes; defaults to number of cpus")
parser.add_argument('script',
type = str,
help = "Ingest script; must accept ndjson filname")
parser.add_argument('dir',
type = str,
help = "input dir of ndjson files")
ns = parser.parse_args(args)
ingest(ns)
if __name__ == '__main__':
main(sys.argv[1:])
|
import os
import json
import pytest
import tempfile
from pathlib import Path
from jina.hubble import helper
@pytest.fixture
def dummy_zip_file():
return Path(__file__).parent / 'dummy_executor.zip'
def test_parse_hub_uri():
result = helper.parse_hub_uri('jinahub://hello')
assert result == ('jinahub', 'hello', None, None)
result = helper.parse_hub_uri('jinahub+docker://hello')
assert result == ('jinahub+docker', 'hello', None, None)
result = helper.parse_hub_uri('jinahub+docker://hello/world')
assert result == ('jinahub+docker', 'hello', 'world', None)
result = helper.parse_hub_uri('jinahub+docker://hello:magic/world')
assert result == ('jinahub+docker', 'hello', 'world', 'magic')
def test_md5file(dummy_zip_file):
md5sum = helper.md5file(dummy_zip_file)
assert md5sum == '4cda7063c8f81d53c65d621ec1b29124'
def test_archive_package(tmpdir):
pkg_path = Path(__file__).parent / 'dummy_executor'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
def test_unpack_package(tmpdir, dummy_zip_file):
helper.unpack_package(dummy_zip_file, tmpdir / 'dummp_executor')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.