code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import re
import logging
import time
import uuid
import os
import tempfile
import json
import base64
from textwrap import dedent
from threading import Thread
from datetime import datetime
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
import yaml
from botocore.exceptions import WaiterError, ClientError
import boto3
from sdcm import cluster
from sdcm import ec2_client
from sdcm.utils.common import retrying, list_instances_aws, get_ami_tags
from sdcm.sct_events import SpotTerminationEvent, DbEventsFilter
from sdcm import wait
from sdcm.remote import LocalCmdRunner
LOGGER = logging.getLogger(__name__)
INSTANCE_PROVISION_ON_DEMAND = 'on_demand'
INSTANCE_PROVISION_SPOT_FLEET = 'spot_fleet'
INSTANCE_PROVISION_SPOT_LOW_PRICE = 'spot_low_price'
INSTANCE_PROVISION_SPOT_DURATION = 'spot_duration'
SPOT_CNT_LIMIT = 20
SPOT_FLEET_LIMIT = 50
LOCAL_CMD_RUNNER = LocalCmdRunner()
def create_tags_list():
tags_list = [{'Key': k, 'Value': v} for k, v in cluster.create_common_tags().items()]
if cluster.TEST_DURATION >= 24 * 60 or cluster.Setup.KEEP_ALIVE:
tags_list.append({'Key': 'keep', 'Value': 'alive'})
return tags_list
class PublicIpNotReady(Exception):
pass
class AWSCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attributes,abstract-method,
"""
Cluster of Node objects, started on Amazon EC2.
"""
def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments
services, credentials, cluster_uuid=None,
ec2_instance_type='c4.xlarge', ec2_ami_username='root',
ec2_user_data='', ec2_block_device_mappings=None,
cluster_prefix='cluster',
node_prefix='node', n_nodes=10, params=None, node_type=None, aws_extra_network_interface=False):
# pylint: disable=too-many-locals
region_names = params.get('region_name').split()
if len(credentials) > 1 or len(region_names) > 1:
assert len(credentials) == len(region_names)
for idx, _ in enumerate(region_names):
credential = credentials[idx]
cluster.CREDENTIALS.append(credential)
self._ec2_ami_id = ec2_ami_id
self._ec2_subnet_id = ec2_subnet_id
self._ec2_security_group_ids = ec2_security_group_ids
self._ec2_services = services
self._credentials = credentials
self._reuse_credentials = None
self._ec2_instance_type = ec2_instance_type
self._ec2_ami_username = ec2_ami_username
if ec2_block_device_mappings is None:
ec2_block_device_mappings = []
self._ec2_block_device_mappings = ec2_block_device_mappings
self._ec2_user_data = ec2_user_data
self.region_names = region_names
self.instance_provision = params.get('instance_provision', default=INSTANCE_PROVISION_ON_DEMAND)
self.aws_extra_network_interface = aws_extra_network_interface
self.params = params
self.node_type = node_type
super(AWSCluster, self).__init__(cluster_uuid=cluster_uuid,
cluster_prefix=cluster_prefix,
node_prefix=node_prefix,
n_nodes=n_nodes,
params=params,
region_names=self.region_names)
def __str__(self):
return 'Cluster %s (AMI: %s Type: %s)' % (self.name,
self._ec2_ami_id,
self._ec2_instance_type)
def _create_on_demand_instances(self, count, interfaces, ec2_user_data, dc_idx=0, tags_list=None): # pylint: disable=too-many-arguments
ami_id = self._ec2_ami_id[dc_idx]
self.log.debug("Creating {count} on-demand instances using AMI id '{ami_id}'... ".format(**locals()))
instances = self._ec2_services[dc_idx].create_instances(ImageId=ami_id,
UserData=ec2_user_data,
MinCount=count,
MaxCount=count,
KeyName=self._credentials[dc_idx].key_pair_name,
BlockDeviceMappings=self._ec2_block_device_mappings,
NetworkInterfaces=interfaces,
InstanceType=self._ec2_instance_type,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': tags_list if tags_list else []
},
],
)
self.log.debug("Created instances: %s." % instances)
return instances
def _create_spot_instances(self, count, interfaces, ec2_user_data='', dc_idx=0, tags_list=None): # pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
ec2 = ec2_client.EC2Client(region_name=self.region_names[dc_idx],
spot_max_price_percentage=self.params.get('spot_max_price', default=0.60))
subnet_info = ec2.get_subnet_info(self._ec2_subnet_id[dc_idx])
spot_params = dict(instance_type=self._ec2_instance_type,
image_id=self._ec2_ami_id[dc_idx],
region_name=subnet_info['AvailabilityZone'],
network_if=interfaces,
key_pair=self._credentials[dc_idx].key_pair_name,
user_data=ec2_user_data,
count=count,
block_device_mappings=self._ec2_block_device_mappings,
tags_list=tags_list if tags_list else [])
if self.instance_provision == INSTANCE_PROVISION_SPOT_DURATION:
# duration value must be a multiple of 60
spot_params.update({'duration': cluster.TEST_DURATION / 60 * 60 + 60})
limit = SPOT_FLEET_LIMIT if self.instance_provision == INSTANCE_PROVISION_SPOT_FLEET else SPOT_CNT_LIMIT
request_cnt = 1
tail_cnt = 0
if count > limit:
# pass common reservationid
spot_params['user_data'] += (' --customreservation %s' % str(uuid.uuid4())[:18])
self.log.debug("User_data to spot instances: '%s'", spot_params['user_data'])
request_cnt = count / limit
spot_params['count'] = limit
tail_cnt = count % limit
if tail_cnt:
request_cnt += 1
instances = []
for i in range(request_cnt):
if tail_cnt and i == request_cnt - 1:
spot_params['count'] = tail_cnt
try:
if self.instance_provision == INSTANCE_PROVISION_SPOT_FLEET and count > 1:
instances_i = ec2.create_spot_fleet(**spot_params)
else:
instances_i = ec2.create_spot_instances(**spot_params)
instances.extend(instances_i)
except ClientError as cl_ex:
if ec2_client.MAX_SPOT_EXCEEDED_ERROR in cl_ex.message:
self.log.debug('Cannot create spot instance(-s): %s.'
'Creating on demand instance(-s) instead.', cl_ex)
instances_i = self._create_on_demand_instances(
count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list)
instances.extend(instances_i)
else:
raise
return instances
def _create_instances(self, count, ec2_user_data='', dc_idx=0):
tags_list = create_tags_list()
tags_list.append({'Key': 'NodeType', 'Value': self.node_type})
if not ec2_user_data:
ec2_user_data = self._ec2_user_data
self.log.debug("Passing user_data '%s' to create_instances", ec2_user_data)
interfaces = [{'DeviceIndex': 0,
'SubnetId': self._ec2_subnet_id[dc_idx],
'AssociatePublicIpAddress': True,
'Groups': self._ec2_security_group_ids[dc_idx]}]
if self.aws_extra_network_interface:
interfaces = [{'DeviceIndex': 0,
'SubnetId': self._ec2_subnet_id[dc_idx],
'Groups': self._ec2_security_group_ids[dc_idx]},
{'DeviceIndex': 1,
'SubnetId': self._ec2_subnet_id[dc_idx],
'Groups': self._ec2_security_group_ids[dc_idx]}]
if self.instance_provision == 'mixed':
instances = self._create_mixed_instances(count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list)
elif self.instance_provision == INSTANCE_PROVISION_ON_DEMAND:
instances = self._create_on_demand_instances(count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list)
else:
instances = self._create_spot_instances(count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list)
return instances
def _create_mixed_instances(self, count, interfaces, ec2_user_data, dc_idx, tags_list=None): # pylint: disable=too-many-arguments
tags_list = tags_list if tags_list else []
instances = []
max_num_on_demand = 2
if isinstance(self, (ScyllaAWSCluster, CassandraAWSCluster)):
if count > 2:
count_on_demand = max_num_on_demand
elif count == 2:
count_on_demand = 1
else:
count_on_demand = 0
if self.nodes:
num_of_on_demand = len([node for node in self.nodes if not node.is_spot])
if num_of_on_demand < max_num_on_demand:
count_on_demand = max_num_on_demand - num_of_on_demand
else:
count_on_demand = 0
count_spot = count - count_on_demand
if count_spot > 0:
self.instance_provision = INSTANCE_PROVISION_SPOT_LOW_PRICE
instances.extend(self._create_spot_instances(
count_spot, interfaces, ec2_user_data, dc_idx, tags_list=tags_list))
if count_on_demand > 0:
self.instance_provision = INSTANCE_PROVISION_ON_DEMAND
instances.extend(self._create_on_demand_instances(
count_on_demand, interfaces, ec2_user_data, dc_idx, tags_list=tags_list))
self.instance_provision = 'mixed'
elif isinstance(self, LoaderSetAWS):
self.instance_provision = INSTANCE_PROVISION_SPOT_LOW_PRICE
instances = self._create_spot_instances(count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list)
elif isinstance(self, MonitorSetAWS):
self.instance_provision = INSTANCE_PROVISION_ON_DEMAND
instances.extend(self._create_on_demand_instances(
count, interfaces, ec2_user_data, dc_idx, tags_list=tags_list))
else:
raise Exception('Unsuported type of cluster type %s' % self)
return instances
def _get_instances(self, dc_idx):
test_id = cluster.Setup.test_id()
if not test_id:
raise ValueError("test_id should be configured for using reuse_cluster")
ec2 = ec2_client.EC2Client(region_name=self.region_names[dc_idx],
spot_max_price_percentage=self.params.get('spot_max_price', default=0.60))
results = list_instances_aws(tags_dict={'TestId': test_id, 'NodeType': self.node_type},
region_name=self.region_names[dc_idx], group_as_region=True)
instances = results[self.region_names[dc_idx]]
def sort_by_index(item):
for tag in item['Tags']:
if tag['Key'] == 'NodeIndex':
return tag['Value']
return '0'
instances = sorted(instances, key=sort_by_index)
return [ec2.get_instance(instance['InstanceId']) for instance in instances]
@staticmethod
def update_bootstrap(ec2_user_data, enable_auto_bootstrap):
"""
Update --bootstrap argument inside ec2_user_data string.
"""
if isinstance(ec2_user_data, dict):
ec2_user_data['scylla_yaml']['auto_bootstrap'] = enable_auto_bootstrap
return ec2_user_data
if enable_auto_bootstrap:
if '--bootstrap ' in ec2_user_data:
ec2_user_data.replace('--bootstrap false', '--bootstrap true')
else:
ec2_user_data += ' --bootstrap true '
else:
if '--bootstrap ' in ec2_user_data:
ec2_user_data.replace('--bootstrap true', '--bootstrap false')
else:
ec2_user_data += ' --bootstrap false '
return ec2_user_data
@staticmethod
def configure_eth1_script():
return dedent("""
BASE_EC2_NETWORK_URL=http://169.254.169.254/latest/meta-data/network/interfaces/macs/
NUMBER_OF_ENI=`curl -s ${BASE_EC2_NETWORK_URL} | wc -w`
for mac in `curl -s ${BASE_EC2_NETWORK_URL}`
do
DEVICE_NUMBER=`curl -s ${BASE_EC2_NETWORK_URL}${mac}/device-number`
if [[ "$DEVICE_NUMBER" == "1" ]]; then
ETH1_MAC=${mac}
fi
done
if [[ ! "${DEVICE_NUMBER}x" == "x" ]]; then
ETH1_IP_ADDRESS=`curl -s ${BASE_EC2_NETWORK_URL}${ETH1_MAC}/local-ipv4s`
ETH1_CIDR_BLOCK=`curl -s ${BASE_EC2_NETWORK_URL}${ETH1_MAC}/subnet-ipv4-cidr-block`
fi
sudo bash -c "echo 'GATEWAYDEV=eth0' >> /etc/sysconfig/network"
echo "
DEVICE="eth1"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"
USERCTL="yes"
PEERDNS="yes"
IPV6INIT="no"
PERSISTENT_DHCLIENT="1"
" > /etc/sysconfig/network-scripts/ifcfg-eth1
echo "
default via 10.0.0.1 dev eth1 table 2
${ETH1_CIDR_BLOCK} dev eth1 src ${ETH1_IP_ADDRESS} table 2
" > /etc/sysconfig/network-scripts/route-eth1
echo "
from ${ETH1_IP_ADDRESS}/32 table 2
" > /etc/sysconfig/network-scripts/rule-eth1
sudo systemctl restart network
""")
def add_nodes(self, count, ec2_user_data='', dc_idx=0, enable_auto_bootstrap=False):
post_boot_script = cluster.Setup.get_startup_script()
if self.aws_extra_network_interface:
post_boot_script += self.configure_eth1_script()
if isinstance(ec2_user_data, dict):
ec2_user_data['post_configuration_script'] = base64.b64encode(post_boot_script).decode()
ec2_user_data = json.dumps(ec2_user_data)
else:
if 'clustername' in ec2_user_data:
ec2_user_data += " --base64postscript={0}".format(base64.b64encode(post_boot_script))
else:
ec2_user_data = post_boot_script
if cluster.Setup.REUSE_CLUSTER:
instances = self._get_instances(dc_idx)
else:
instances = self._create_instances(count, ec2_user_data, dc_idx)
added_nodes = [self._create_node(instance, self._ec2_ami_username,
self.node_prefix, node_index,
self.logdir, dc_idx=dc_idx)
for node_index, instance in
enumerate(instances, start=self._node_index + 1)]
for node in added_nodes:
node.enable_auto_bootstrap = enable_auto_bootstrap
self._node_index += len(added_nodes)
self.nodes += added_nodes
self.write_node_public_ip_file()
self.write_node_private_ip_file()
return added_nodes
def _create_node(self, instance, ami_username, node_prefix, node_index, # pylint: disable=too-many-arguments
base_logdir, dc_idx):
return AWSNode(ec2_instance=instance, ec2_service=self._ec2_services[dc_idx],
credentials=self._credentials[dc_idx], parent_cluster=self, ami_username=ami_username,
node_prefix=node_prefix, node_index=node_index,
base_logdir=base_logdir, dc_idx=dc_idx, node_type=self.node_type)
class AWSNode(cluster.BaseNode):
"""
Wraps EC2.Instance, so that we can also control the instance through SSH.
"""
def __init__(self, ec2_instance, ec2_service, credentials, parent_cluster, # pylint: disable=too-many-arguments
node_prefix='node', node_index=1, ami_username='root',
base_logdir=None, dc_idx=0, node_type=None):
name = '%s-%s' % (node_prefix, node_index)
self._instance = ec2_instance
self._ec2_service = ec2_service
LOGGER.debug("Waiting until instance {0._instance} starts running...".format(self))
self._instance_wait_safe(self._instance.wait_until_running)
self._eth1_private_ip_address = None
self.eip_allocation_id = None
if len(self._instance.network_interfaces) == 2:
self.allocate_and_attach_elastic_ip()
self._wait_public_ip()
ssh_login_info = {'hostname': None,
'user': ami_username,
'key_file': credentials.key_file}
self._spot_aws_termination_task = None
super(AWSNode, self).__init__(name=name,
parent_cluster=parent_cluster,
ssh_login_info=ssh_login_info,
base_logdir=base_logdir,
node_prefix=node_prefix,
dc_idx=dc_idx)
if not cluster.Setup.REUSE_CLUSTER:
tags_list = create_tags_list()
tags_list.append({'Key': 'Name', 'Value': name})
tags_list.append({'Key': 'NodeIndex', 'Value': str(node_index)})
tags_list.append({'Key': 'NodeType', 'Value': node_type})
if cluster.TEST_DURATION >= 24 * 60 or cluster.Setup.KEEP_ALIVE:
self.log.info('Test duration set to %s. '
'Keep cluster on failure %s. '
'Tagging node with {"keep": "alive"}',
cluster.TEST_DURATION, cluster.Setup.KEEP_ALIVE)
self._ec2_service.create_tags(Resources=[self._instance.id],
Tags=tags_list)
@property
def is_spot(self):
return bool(self._instance.instance_lifecycle and 'spot' in self._instance.instance_lifecycle.lower())
@property
def external_address(self):
"""
the communication address for usage between the test and the nodes
:return:
"""
if cluster.IP_SSH_CONNECTIONS == 'public' or cluster.Setup.INTRA_NODE_COMM_PUBLIC:
return self.public_ip_address
else:
return self._instance.private_ip_address
@property
def public_ip_address(self):
return self._instance.public_ip_address
@property
def private_ip_address(self):
if self._eth1_private_ip_address:
return self._eth1_private_ip_address
return self._instance.private_ip_address
def _refresh_instance_state(self):
raise NotImplementedError()
def allocate_and_attach_elastic_ip(self):
primary_interface = [
interface for interface in self._instance.network_interfaces if interface.attachment['DeviceIndex'] == 0][0]
if primary_interface.association_attribute is None:
# create and attach EIP
client = boto3.client('ec2', region_name=self.parent_cluster.region_names[self.dc_idx])
response = client.allocate_address(Domain='vpc')
self.eip_allocation_id = response['AllocationId']
client.create_tags(
Resources=[
self.eip_allocation_id
],
Tags=create_tags_list()
)
client.associate_address(
AllocationId=self.eip_allocation_id,
NetworkInterfaceId=primary_interface.id,
)
self._eth1_private_ip_address = [interface for interface in self._instance.network_interfaces if
interface.attachment['DeviceIndex'] == 1][0].private_ip_address
def _instance_wait_safe(self, instance_method, *args, **kwargs):
"""
Wrapper around AWS instance waiters that is safer to use.
Since AWS adopts an eventual consistency model, sometimes the method
wait_until_running will raise a botocore.exceptions.WaiterError saying
the instance does not exist. AWS API guide [1] recommends that the
procedure is retried using an exponencial backoff algorithm [2].
:see: [1] http://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency
:see: [2] http://docs.aws.amazon.com/general/latest/gr/api-retries.html
"""
threshold = 300
ok = False
retries = 0
max_retries = 9
while not ok and retries <= max_retries:
try:
instance_method(*args, **kwargs)
ok = True
except WaiterError:
time.sleep(min((2 ** retries) * 2, threshold))
retries += 1
if not ok:
try:
self._instance.reload()
except Exception as ex: # pylint: disable=broad-except
LOGGER.exception("Error while reloading instance metadata: %s", ex)
finally:
method_name = instance_method.__name__
instance_id = self._instance.id
LOGGER.debug(self._instance.meta.data)
msg = "Timeout while running '{method_name}' method on AWS instance '{instance_id}'".format(
method_name=method_name, instance_id=instance_id)
raise cluster.NodeError(msg)
@retrying(n=7, sleep_time=10, allowed_exceptions=(PublicIpNotReady,),
message="Waiting for instance to get public ip")
def _wait_public_ip(self):
self._instance.reload()
if self._instance.public_ip_address is None:
raise PublicIpNotReady(self._instance)
LOGGER.debug("[{0._instance}] Got public ip: {0._instance.public_ip_address}".format(self))
def restart(self):
# We differenciate between "Restart" and "Reboot".
# Restart in AWS will be a Stop and Start of an instance.
# When using storage optimized instances like i2 or i3, the data on disk is deleted upon STOP. therefore, we
# need to setup the instance and treat it as a new instance.
if self._instance.spot_instance_request_id:
LOGGER.debug("target node is spot instance, impossible to stop this instance, skipping the restart")
return
event_filters = ()
if any(ss in self._instance.instance_type for ss in ['i3', 'i2']):
# since there's no disk yet in those type, lots of the errors here are acceptable, and we'll ignore them
event_filters = DbEventsFilter(type="DATABASE_ERROR"), DbEventsFilter(type="SCHEMA_FAILURE"), \
DbEventsFilter(type="NO_SPACE_ERROR"), DbEventsFilter(type="FILESYSTEM_ERROR")
clean_script = dedent("""
sudo sed -e '/.*scylla/s/^/#/g' -i /etc/fstab
sudo sed -e '/auto_bootstrap:.*/s/False/True/g' -i /etc/scylla/scylla.yaml
""")
self.remoter.run("sudo bash -cxe '%s'" % clean_script)
output = self.remoter.run('sudo grep replace_address: /etc/scylla/scylla.yaml', ignore_status=True)
if 'replace_address_first_boot:' not in output.stdout:
self.remoter.run('echo replace_address_first_boot: %s |sudo tee --append /etc/scylla/scylla.yaml' %
self._instance.private_ip_address)
self._instance.stop()
self._instance_wait_safe(self._instance.wait_until_stopped)
self._instance.start()
self._instance_wait_safe(self._instance.wait_until_running)
self._wait_public_ip()
self.log.debug('Got new public IP %s',
self._instance.public_ip_address)
self.remoter.hostname = self.external_address
self.wait_ssh_up()
if any(ss in self._instance.instance_type for ss in ['i3', 'i2']):
try:
self.stop_scylla_server(verify_down=False)
# the scylla_create_devices has been moved to the '/opt/scylladb' folder in the master branch
for create_devices_file in ['/usr/lib/scylla/scylla-ami/scylla_create_devices',
'/opt/scylladb/scylla-ami/scylla_create_devices']:
result = self.remoter.run('sudo test -e %s' % create_devices_file, ignore_status=True)
if result.exit_status == 0:
self.remoter.run('sudo %s' % create_devices_file)
break
else:
raise IOError('scylla_create_devices file isn\'t found')
self.start_scylla_server(verify_up=False)
self.remoter.run(
'sudo sed -i -e "s/replace_address_first_boot:/# replace_address_first_boot:/g" /etc/scylla/scylla.yaml')
self.remoter.run("sudo sed -e '/auto_bootstrap:.*/s/True/False/g' -i /etc/scylla/scylla.yaml")
finally:
if event_filters:
for event_filter in event_filters:
event_filter.cancel_filter()
def hard_reboot(self):
self._instance_wait_safe(self._instance.reboot)
def destroy(self):
self.stop_task_threads()
self._instance.terminate()
if self.eip_allocation_id:
client = boto3.client('ec2', region_name=self.parent_cluster.region_names[self.dc_idx])
response = client.release_address(AllocationId=self.eip_allocation_id)
self.log.debug("release elastic ip . Result: %s\n", response)
self.log.info('Destroyed')
def start_task_threads(self):
if self._instance.spot_instance_request_id and 'spot' in self._instance.instance_lifecycle.lower():
self.start_aws_termination_monitoring()
super(AWSNode, self).start_task_threads()
def stop_task_threads(self, timeout=10):
if self._spot_aws_termination_task and not self.termination_event.isSet():
self.termination_event.set()
self._spot_aws_termination_task.join(timeout)
super(AWSNode, self).stop_task_threads(timeout)
def get_aws_termination_notification(self): # pylint: disable=invalid-name
try:
result = self.remoter.run(
'curl http://169.254.169.254/latest/meta-data/spot/instance-action', verbose=False)
status = result.stdout.strip()
if '404 - Not Found' not in status:
return status
except Exception as details: # pylint: disable=broad-except
self.log.warning('Error during getting aws termination notification %s' % details)
return None
def monitor_aws_termination_thread(self): # pylint: disable=invalid-name
while True:
duration = 5
if self.termination_event.isSet():
break
try:
self.wait_ssh_up(verbose=False)
except Exception as ex: # pylint: disable=broad-except
LOGGER.warning("Unable to connect to '%s'. Probably the node was terminated or is still booting. "
"Error details: '%s'", self.name, ex)
continue
aws_message = self.get_aws_termination_notification()
if aws_message:
self.log.warning('Got spot termination notification from AWS %s' % aws_message)
terminate_action = json.loads(aws_message)
terminate_action_timestamp = time.mktime(datetime.strptime(
terminate_action['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
duration = terminate_action_timestamp - time.time() - 15
if duration <= 0:
duration = 5
terminate_action['time-left'] = terminate_action_timestamp - time.time()
SpotTerminationEvent(node=self, aws_message=terminate_action)
time.sleep(duration)
def start_aws_termination_monitoring(self): # pylint: disable=invalid-name
self._spot_aws_termination_task = Thread(target=self.monitor_aws_termination_thread)
self._spot_aws_termination_task.daemon = True
self._spot_aws_termination_task.start()
def get_console_output(self):
"""Get instance console Output
Get console output of instance which is printed during initiating and loading
Get only last 64KB of output data.
"""
result = self._ec2_service.meta.client.get_console_output(
InstanceId=self._instance.id,
)
console_output = result.get('Output', '')
if not console_output:
self.log.warning('Some error during getting console output')
return console_output
def get_console_screenshot(self):
result = self._ec2_service.meta.client.get_console_screenshot(
InstanceId=self._instance.id
)
imagedata = result.get('ImageData', '')
if not imagedata:
self.log.warning('Some error during getting console screenshot')
return imagedata
def traffic_control(self, tcconfig_params=None):
"""
run tcconfig locally to create tc commands, and run them on the node
:param tcconfig_params: commandline arguments for tcset, if None will call tcdel
:return: None
"""
self.remoter.run("sudo modprobe sch_netem")
if tcconfig_params is None:
tc_command = LOCAL_CMD_RUNNER.run("tcdel eth1 --tc-command", ignore_status=True).stdout
self.remoter.run('sudo bash -cxe "%s"' % tc_command, ignore_status=True)
else:
tc_command = LOCAL_CMD_RUNNER.run("tcset eth1 {} --tc-command".format(tcconfig_params)).stdout
self.remoter.run('sudo bash -cxe "%s"' % tc_command)
class ScyllaAWSCluster(cluster.BaseScyllaCluster, AWSCluster):
def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments
services, credentials, ec2_instance_type='c4.xlarge',
ec2_ami_username='centos',
ec2_block_device_mappings=None,
user_prefix=None,
n_nodes=3,
params=None):
# pylint: disable=too-many-locals
# We have to pass the cluster name in advance in user_data
cluster_uuid = cluster.Setup.test_id()
cluster_prefix = cluster.prepend_user_prefix(user_prefix, 'db-cluster')
node_prefix = cluster.prepend_user_prefix(user_prefix, 'db-node')
node_type = 'scylla-db'
shortid = str(cluster_uuid)[:8]
name = '%s-%s' % (cluster_prefix, shortid)
scylla_cloud_image_version = get_ami_tags(ec2_ami_id[0], region_name=params.get(
'region_name').split()[0]).get('sci_version', '1')
if LooseVersion(scylla_cloud_image_version) >= LooseVersion('2'):
user_data = dict(scylla_yaml=dict(cluster_name=name), start_scylla_on_first_boot=False)
else:
user_data = ('--clustername %s '
'--totalnodes %s' % (name, sum(n_nodes)))
user_data += ' --stop-services'
super(ScyllaAWSCluster, self).__init__(ec2_ami_id=ec2_ami_id,
ec2_subnet_id=ec2_subnet_id,
ec2_security_group_ids=ec2_security_group_ids,
ec2_instance_type=ec2_instance_type,
ec2_ami_username=ec2_ami_username,
ec2_user_data=user_data,
ec2_block_device_mappings=ec2_block_device_mappings,
cluster_uuid=cluster_uuid,
services=services,
credentials=credentials,
cluster_prefix=cluster_prefix,
node_prefix=node_prefix,
n_nodes=n_nodes,
params=params,
node_type=node_type,
aws_extra_network_interface=params.get('aws_extra_network_interface'))
self.version = '2.1'
def add_nodes(self, count, ec2_user_data='', dc_idx=0, enable_auto_bootstrap=False):
if not ec2_user_data:
if self._ec2_user_data:
ec2_user_data = re.sub(r'(--totalnodes\s)(\d*)(\s)',
r'\g<1>{}\g<3>'.format(len(self.nodes) + count), self._ec2_user_data)
else:
ec2_user_data = ('--clustername %s --totalnodes %s ' % (self.name, count))
if self.nodes and isinstance(ec2_user_data, str):
node_ips = [node.ip_address for node in self.nodes if node.is_seed]
seeds = ",".join(node_ips)
if not seeds:
seeds = self.nodes[0].ip_address
ec2_user_data += ' --seeds %s ' % seeds
ec2_user_data = self.update_bootstrap(ec2_user_data, enable_auto_bootstrap)
added_nodes = super(ScyllaAWSCluster, self).add_nodes(count=count,
ec2_user_data=ec2_user_data,
dc_idx=dc_idx,
enable_auto_bootstrap=enable_auto_bootstrap)
return added_nodes
def node_config_setup(self, node, seed_address=None, endpoint_snitch=None, murmur3_partitioner_ignore_msb_bits=None, client_encrypt=None): # pylint: disable=too-many-arguments
setup_params = dict(
enable_exp=self._param_enabled('experimental'),
endpoint_snitch=endpoint_snitch,
authenticator=self.params.get('authenticator'),
server_encrypt=self._param_enabled('server_encrypt'),
client_encrypt=client_encrypt if client_encrypt is not None else self._param_enabled('client_encrypt'),
append_scylla_args=self.get_scylla_args(),
authorizer=self.params.get('authorizer'),
hinted_handoff=self.params.get('hinted_handoff'),
alternator_port=self.params.get('alternator_port'),
seed_address=seed_address,
append_scylla_yaml=self.params.get('append_scylla_yaml'),
murmur3_partitioner_ignore_msb_bits=murmur3_partitioner_ignore_msb_bits,
)
if cluster.Setup.INTRA_NODE_COMM_PUBLIC:
setup_params.update(dict(
broadcast=node.public_ip_address,
))
if self.aws_extra_network_interface:
setup_params.update(dict(
seed_address=seed_address,
broadcast=node.private_ip_address,
listen_on_all_interfaces=True,
))
node.config_setup(**setup_params)
def node_setup(self, node, verbose=False, timeout=3600):
endpoint_snitch = self.params.get('endpoint_snitch')
seed_address = ','.join(self.seed_nodes_ips)
def scylla_ami_setup_done():
"""
Scylla-ami-setup will update config files and trigger to start the scylla-server service.
`--stop-services` parameter in ec2 user-data, not really stop running scylla-server
service, but deleting a flag file (/etc/scylla/ami_disabled) in first start of scylla-server
(by scylla_prepare), and fail the first start.
We use this function to make sure scylla-ami-setup finishes, and first start is
done (fail as expected, /etc/scylla/ami_disabled is deleted). Then it won't effect
reconfig in SCT.
The fllowing two examples are different opportunity to help understand.
# opportunity 1: scylla-ami-setup finishes:
result = node.remoter.run('systemctl status scylla-ami-setup', ignore_status=True)
return 'Started Scylla AMI Setup' in result.stdout
# opportunity 2: flag file is deleted in scylla_prepare:
result = node.remoter.run('test -e /etc/scylla/ami_disabled', ignore_status=True)
return result.exit_status != 0
"""
# make sure scylla-ami-setup finishes, flag file is deleted, and first start fails as expected.
result = node.remoter.run('systemctl status scylla-server', ignore_status=True)
return 'Failed to start Scylla Server.' in result.stdout
if not cluster.Setup.REUSE_CLUSTER:
node.wait_ssh_up(verbose=verbose)
wait.wait_for(scylla_ami_setup_done, step=10, timeout=300)
node.install_scylla_debuginfo()
if cluster.Setup.MULTI_REGION:
if not endpoint_snitch:
endpoint_snitch = "Ec2MultiRegionSnitch"
node.datacenter_setup(self.datacenter)
self.node_config_setup(node, seed_address, endpoint_snitch)
node.stop_scylla_server(verify_down=False)
node.start_scylla_server(verify_up=False)
else:
# for reconfigure rsyslog
node.run_startup_script()
node.wait_db_up(verbose=verbose, timeout=timeout)
node.check_nodes_status()
self.clean_replacement_node_ip(node, seed_address, endpoint_snitch)
def destroy(self):
self.stop_nemesis()
super(ScyllaAWSCluster, self).destroy()
class CassandraAWSCluster(ScyllaAWSCluster):
def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments
services, credentials, ec2_instance_type='c4.xlarge',
ec2_ami_username='ubuntu',
ec2_block_device_mappings=None,
user_prefix=None,
n_nodes=3,
params=None):
# pylint: disable=too-many-locals
if ec2_block_device_mappings is None:
ec2_block_device_mappings = []
# We have to pass the cluster name in advance in user_data
cluster_uuid = uuid.uuid4()
cluster_prefix = cluster.prepend_user_prefix(user_prefix,
'cs-db-cluster')
node_prefix = cluster.prepend_user_prefix(user_prefix, 'cs-db-node')
node_type = 'cs-db'
shortid = str(cluster_uuid)[:8]
name = '%s-%s' % (cluster_prefix, shortid)
user_data = ('--clustername %s '
'--totalnodes %s --version community '
'--release 2.1.15' % (name, sum(n_nodes)))
super(CassandraAWSCluster, self).__init__(ec2_ami_id=ec2_ami_id,
ec2_subnet_id=ec2_subnet_id,
ec2_security_group_ids=ec2_security_group_ids,
ec2_instance_type=ec2_instance_type,
ec2_ami_username=ec2_ami_username,
ec2_user_data=user_data,
ec2_block_device_mappings=ec2_block_device_mappings,
cluster_uuid=cluster_uuid,
services=services,
credentials=credentials,
cluster_prefix=cluster_prefix,
node_prefix=node_prefix,
n_nodes=n_nodes,
params=params,
node_type=node_type)
def get_seed_nodes(self):
node = self.nodes[0]
yaml_dst_path = os.path.join(tempfile.mkdtemp(prefix='sct-cassandra'), 'cassandra.yaml')
node.remoter.receive_files(src='/etc/cassandra/cassandra.yaml',
dst=yaml_dst_path)
with open(yaml_dst_path, 'r') as yaml_stream:
conf_dict = yaml.load(yaml_stream, Loader=yaml.SafeLoader)
try:
return conf_dict['seed_provider'][0]['parameters'][0]['seeds'].split(',')
except:
raise ValueError('Unexpected cassandra.yaml '
'contents:\n%s' % yaml_stream.read())
def add_nodes(self, count, ec2_user_data='', dc_idx=0, enable_auto_bootstrap=False):
if not ec2_user_data:
if self.nodes:
seeds = ",".join(self.get_seed_nodes())
ec2_user_data = ('--clustername %s '
'--totalnodes %s --seeds %s '
'--version community '
'--release 2.1.15' % (self.name,
count,
seeds))
ec2_user_data = self.update_bootstrap(ec2_user_data, enable_auto_bootstrap)
added_nodes = super(CassandraAWSCluster, self).add_nodes(count=count,
ec2_user_data=ec2_user_data,
dc_idx=dc_idx)
return added_nodes
def node_setup(self, node, verbose=False, timeout=3600):
node.wait_ssh_up(verbose=verbose)
node.wait_db_up(verbose=verbose)
if cluster.Setup.REUSE_CLUSTER:
# for reconfigure rsyslog
node.run_startup_script()
return
node.wait_apt_not_running()
node.remoter.run('sudo apt-get update')
node.remoter.run('sudo apt-get install -y collectd collectd-utils')
node.remoter.run('sudo apt-get install -y openjdk-6-jdk')
@cluster.wait_for_init_wrap
def wait_for_init(self, node_list=None, verbose=False, timeout=None):
self.get_seed_nodes()
class LoaderSetAWS(cluster.BaseLoaderSet, AWSCluster):
def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments
services, credentials, ec2_instance_type='c4.xlarge',
ec2_block_device_mappings=None,
ec2_ami_username='centos',
user_prefix=None, n_nodes=10, params=None):
# pylint: disable=too-many-locals
node_prefix = cluster.prepend_user_prefix(user_prefix, 'loader-node')
node_type = 'loader'
cluster_prefix = cluster.prepend_user_prefix(user_prefix, 'loader-set')
user_data = ('--clustername %s --totalnodes %s --bootstrap false --stop-services' %
(cluster_prefix, n_nodes))
cluster.BaseLoaderSet.__init__(self,
params=params)
AWSCluster.__init__(self,
ec2_ami_id=ec2_ami_id,
ec2_subnet_id=ec2_subnet_id,
ec2_security_group_ids=ec2_security_group_ids,
ec2_instance_type=ec2_instance_type,
ec2_ami_username=ec2_ami_username,
ec2_user_data=user_data,
services=services,
ec2_block_device_mappings=ec2_block_device_mappings,
credentials=credentials,
cluster_prefix=cluster_prefix,
node_prefix=node_prefix,
n_nodes=n_nodes,
params=params,
node_type=node_type)
class MonitorSetAWS(cluster.BaseMonitorSet, AWSCluster):
def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments
services, credentials, ec2_instance_type='c4.xlarge',
ec2_block_device_mappings=None,
ec2_ami_username='centos',
user_prefix=None, n_nodes=10, targets=None, params=None):
# pylint: disable=too-many-locals
node_prefix = cluster.prepend_user_prefix(user_prefix, 'monitor-node')
node_type = 'monitor'
cluster_prefix = cluster.prepend_user_prefix(user_prefix, 'monitor-set')
cluster.BaseMonitorSet.__init__(self,
targets=targets,
params=params)
AWSCluster.__init__(self,
ec2_ami_id=ec2_ami_id,
ec2_subnet_id=ec2_subnet_id,
ec2_security_group_ids=ec2_security_group_ids,
ec2_instance_type=ec2_instance_type,
ec2_ami_username=ec2_ami_username,
services=services,
ec2_block_device_mappings=ec2_block_device_mappings,
credentials=credentials,
cluster_prefix=cluster_prefix,
node_prefix=node_prefix,
n_nodes=n_nodes,
params=params,
node_type=node_type)
| amoskong/scylla-cluster-tests | sdcm/cluster_aws.py | Python | agpl-3.0 | 47,032 |
# Utilities for printing ASN.1 values
def bits_to_hex(bit_array, delimiter=":"):
"""Convert a bit array to a prettily formated hex string. If the array
length is not a multiple of 8, it is padded with 0-bits from the left.
For example, [1,0,0,1,1,0,1,0,0,1,0] becomes 04:d2.
Args:
bit_array: the bit array to convert
Returns:
the formatted hex string."""
# Pad the first partial byte.
partial_bits = len(bit_array) % 8
pad_length = 8 - partial_bits if partial_bits else 0
bitstring = "0" * pad_length + "".join(map(str, bit_array))
byte_array = [int(bitstring[i:i + 8], 2) for i in range(0, len(bitstring), 8)]
return delimiter.join(map(lambda x: "%02x" % x, byte_array))
def bytes_to_hex(byte_string, delimiter=":"):
"""Convert a bytestring to a prettily formated hex string: for example,
'\x04\xd2' becomes 04:d2.
Args:
byte_string: the bytes to convert.
Returns:
the formatted hex string."""
return delimiter.join([("%02x" % ord(b)) for b in byte_string])
def int_to_hex(int_value, delimiter=":"):
"""Convert an integer to a prettily formated hex string: for example,
1234 (0x4d2) becomes 04:d2 and -1234 becomes ' -:04:d2'
Args:
int_value: the value to convert.
Returns:
the formatted hex string."""
hex_string = "%x" % int_value
ret = ""
pos = 0
# Accommodate for negative integers.
if hex_string[0] == '-':
ret += ' -' + delimiter
hex_string = hex_string[1:]
# If the first digit is a half-byte, pad with a 0.
remaining_len = len(hex_string) - pos
hex_string = hex_string.zfill(remaining_len + remaining_len % 2)
byte_values = [hex_string[i:i + 2] for i in range(pos, len(hex_string), 2)]
return ret + delimiter.join(byte_values)
def wrap_lines(long_string, wrap):
"""Split the long string into line chunks according to the wrap limit and
existing newlines.
Args:
long_string: a long, possibly multiline string
wrap: maximum number of characters per line. 0 or negative
wrap means no limit.
Returns:
a list of lines of at most |wrap| characters each."""
if not long_string:
return []
long_lines = long_string.decode('utf-8').split('\n')
if wrap <= 0:
return long_lines
ret = []
for line in long_lines:
if not line:
# Empty line
ret += [line]
else:
ret += [line[i:i + wrap] for i in range(0, len(line), wrap)]
return ret
def append_lines(lines, wrap, buf):
"""Append lines to the buffer. If the first line can be appended to the last
line of the buf without exceeding wrap characters, the two lines are merged.
Args:
lines: an iterable of lines to append
wrap: maximum number of characters per line. 0 or negative wrap means
no limit.
buf: an iterable of lines to append to"""
if not lines:
return
if not buf or wrap > 0 and len(buf[-1]) + len(lines[0]) > wrap:
buf += lines
else:
buf[-1] += lines[0]
buf += lines[1:]
| balena/python-smime | smime/print_util.py | Python | apache-2.0 | 3,175 |
"""
Model class for MyTardis API v1's InstrumentResource.
See: https://github.com/mytardis/mytardis/blob/3.7/tardis/tardis_portal/api.py
"""
from __future__ import print_function
import json
import logging
import requests
from mytardisclient.conf import config
from .facility import Facility
from .resultset import ResultSet
from mytardisclient.utils.exceptions import DoesNotExist
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Instrument(object):
"""
Model class for MyTardis API v1's InstrumentResource.
See: https://github.com/mytardis/mytardis/blob/3.7/tardis/tardis_portal/api.py
"""
def __init__(self, instrument_json):
self.id = instrument_json['id'] # pylint: disable=invalid-name
self.name = instrument_json['name']
self.json = instrument_json
self.facility = Facility(instrument_json['facility'])
def __str__(self):
return self.name
@staticmethod
@config.region.cache_on_arguments(namespace="Instrument")
def list(facility_id=None, limit=None, offset=None, order_by=None):
"""
Retrieve a list of instruments in a facility with ID facility_id.
:param facility_id: The ID of a facility to retrieve instruments from.
:param limit: Maximum number of results to return.
:param offset: Skip this many records from the start of the result set.
:param order_by: Order by this field.
:return: A list of :class:`Instrument` records, encapsulated in a
ResultSet object.
"""
url = "%s/api/v1/instrument/?format=json" % config.url
if facility_id:
url += "&facility__id=%s" % facility_id
if limit:
url += "&limit=%s" % limit
if offset:
url += "&offset=%s" % offset
if order_by:
url += "&order_by=%s" % order_by
response = requests.get(url=url, headers=config.default_headers)
logger.debug("GET %s %s", url, response.status_code)
if response.status_code != 200:
message = response.text
raise Exception(message)
return ResultSet(Instrument, url, response.json())
@staticmethod
@config.region.cache_on_arguments(namespace="Instrument")
def get(instrument_id):
"""
Get instrument with ID instrument_id
:param instrument_id: The ID of an instrument to retrieve.
:return: An :class:`Instrument` record.
"""
url = "%s/api/v1/instrument/?format=json&id=%s" % \
(config.url, instrument_id)
response = requests.get(url=url, headers=config.default_headers)
logger.debug("GET %s %s", url, response.status_code)
if response.status_code != 200:
message = response.text
raise Exception(message)
instruments_json = response.json()
if instruments_json['meta']['total_count'] == 0:
message = "Instrument matching filter doesn't exist."
raise DoesNotExist(message, url, response, Instrument)
return Instrument(instrument_json=instruments_json['objects'][0])
@staticmethod
def create(facility_id, name):
"""
Create an instrument record.
:param facility_id: The ID of the facility to create the instrument in.
:param name: The name of the instrument.
:return: A new :class:`Instrument` record.
"""
new_instrument_json = {
"name": name,
"facility": "/api/v1/facility/%s/" % facility_id
}
url = config.url + "/api/v1/instrument/"
response = requests.post(headers=config.default_headers, url=url,
data=json.dumps(new_instrument_json))
logger.debug("POST %s %s", url, response.status_code)
if response.status_code != 201:
message = response.text
raise Exception(message)
instrument_json = response.json()
return Instrument(instrument_json)
@staticmethod
def update(instrument_id, name):
"""
Update an instrument record.
:param instrument_id: The ID of the instrument record to update.
:param name: The new name of the instrument.
:return: An updated :class:`Instrument` record.
"""
updated_fields_json = {
"name": name,
}
url = "%s/api/v1/instrument/%s/" % (config.url, instrument_id)
response = requests.patch(headers=config.default_headers, url=url,
data=json.dumps(updated_fields_json))
if response.status_code != 202:
print("HTTP %s" % response.status_code)
message = response.text
raise Exception(message)
instrument_json = response.json()
return Instrument(instrument_json)
| wettenhj/mytardisclient | mytardisclient/models/instrument.py | Python | gpl-3.0 | 4,860 |
def fu<caret>nc(b):
pass
| siosio/intellij-community | python/testData/refactoring/changeSignature/newParameterWithSignatureDefaultBeforeExistingWithoutSignatureDefault.py | Python | apache-2.0 | 29 |
import abc
__author__ = 'paoolo'
class Listener(object):
@abc.abstractmethod
def handle(self, response):
pass | project-capo/amber-python-clients | src/amberclient/common/listener.py | Python | mit | 128 |
import pandas as pd
from link import lnk
import IPython.core.display as d
import os
import sys
def df_to_json(df, columns=False):
"""
Returns columns in a Pandas dataframe as a JSON object
with the following structure:
{
"col_name":[val1,val2,val3],
"col_name2":[val1,val2,val3]
}
"""
if columns:
df_columns = columns
else:
df_columns = df.columns
json_dict = {}
for col in df_columns:
json_dict.update({col: list(df[col].values)})
return json_dict
def format_col(df, col_name, rounding=0, currency=False, percent=False):
"""Function to format numerical Pandas Dataframe columns (one at a time). WARNING: This function will convert the column to strings. Apply this function as the last step in your script.
Output is the formatted column.
Parameters:
df = the dataframe object
col_name = the name of the column as a string
rounding = decimal places to round to. ie 0 means round to the nearest whole number
currency = adds the $ symbol
percent = adds the % symbol and multiplies by 100
"""
import locale
import math
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
round_by = '{:,.%sf}' % str(rounding)
if currency == True:
return df[col_name].apply(lambda x: '$' + round_by.format(x) if math.isnan(x) == False else x)
elif percent == True:
return df[col_name].apply(lambda x: round_by.format(x * 100) + '%' if math.isnan(x) == False else x)
else:
return df[col_name].apply(lambda x: round_by.format(x) if math.isnan(x) == False else x)
def format_value(value, rounding=0, currency=False, percent=False):
import locale
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
round_by = '{:,.%sf}' % str(rounding)
if currency == True:
return '$' + round_by.format(value)
elif percent == True:
return round_by.format(value * 100) + '%'
else:
return round_by.format(value)
def format_df(df):
"""Attempts to autoformat numbers based on the column names.
if %: format as percent
if imps: format as integer with commas
if revenue, cost, cpm, rpm, profit: format as currency with 2 decimals
"""
for col in df.columns:
if '%' in col:
df[col] = format_col(
df, col, rounding=0, currency=False, percent=True)
if 'imps' in col:
df[col] = format_col(
df, col, rounding=0, currency=False, percent=False)
if any([x in col for x in ['revenue', 'cost', 'cpm', 'rpm', 'profit']]):
df[col] = format_col(
df, col, rounding=2, currency=True, percent=False)
def sql_list(iterable, format="int"):
"""Function to convert a Pandas dataframe column/series into an SQL friends string of comma separate values.
Parameters
==========================
iterable: can be any type of array, list, or pandas column/series
format: "int" or "string". "int" returns string with no quotes, "string" does.
"""
sql_list = ''
if format == "string":
for i in iterable:
sql_list += "'" + str(i) + "'" + ","
elif format == "int":
for i in iterable:
sql_list += str(i) + ','
else:
print 'Incorrect format parameter. Choose string or int.'
sql_list = sql_list[:-1] # delete last comma
return sql_list
def get_object_names(target_df, inputs):
prod = lnk.dbs.mprod_api
for k, v in inputs.items():
input_string = sql_list(v.unique())
if k in "advertiser campaign campaign_group":
obj_name = prod.select_dataframe("""SELECT id, name as {}_name FROM bidder.{}
WHERE id IN ({})
""" .format(k, k, input_string))
elif k in "publisher site tinytag":
obj_name = prod.select_dataframe("""SELECT id, name as {}_name FROM api.{}
WHERE id IN ({})
""" .format(k, k, input_string))
elif k in "creative":
obj_name = prod.select_dataframe("""SELECT id, description as {}_name FROM api.{}
WHERE id IN ({})
""" .format(k, k, input_string))
elif k in "seller_member":
obj_name = prod.select_dataframe("""SELECT id, name as seller_name FROM bidder.member
WHERE id IN ({})
""" .format(input_string))
elif k in "buyer_member":
obj_name = prod.select_dataframe("""SELECT id, name as buyer_name FROM bidder.member
WHERE id IN ({})
""" .format(input_string))
else:
print "invalid input"
obj_name = obj_name.set_index('id')
target_df = target_df.merge(obj_name, left_on=v, right_index=True)
target_df = target_df.drop('key_0', axis=1)
return target_df
def reorder_df_cols(df, new_order, print_summary=True):
"""
Function reorders columns in a Pandas DataFrame by using the column order index.
Parameters
=======================================
df: the dataframe for which you'd like to reorder columns
new_order: a list of integer values that correspond to the positions of the columns in the new order. Example: if columns in a df are 'a' 'b' 'c', and you
wanted to reorder to be 'b', 'a', 'c', the new_order list would be [1,0,2]
print_summary: if True, prints the original column order and index values and the new column order and index values
"""
orig_cols = df.columns.tolist()
if print_summary:
print "Original Order:\n"
for i in enumerate(orig_cols):
print i
new_cols = [orig_cols[i] for i in new_order]
df = df[new_cols]
if print_summary:
print "\nNew Order:\n"
for i in enumerate(new_cols):
print i
return df
def send_email(to_addr_list,
subject,
body,
from_addr,
smtpserver="mail.adnxs.net",
cc_addr_list=None,
attachments=None
):
"""
Function to send emails using Python.
Parameters
==================================
to_addr_list: Supply a list of recipients (even if only sending to one recipient). Example=['[email protected]','[email protected]']
subject: The email subject as a string
body: Body of the email. This can be HTML or plain text.
from_addr: The name and/or email of the sender. This can be any string value but it is recommend to follow the format:
Display Name <[email protected]>. For example if you are using this for the Manual Exclusions script,
you might set the from_addr to be: Manual Exclusions Alert <[email protected]>
smtpserver: default is mail.adnxs.net. Do not change unless you know what you're doing.
cc_addr_list: Provide cc email recipients in the same format as to_addr_list
attachments: Provide list of attachment locations. If in same directory as script, simply input the filename.
"""
import smtplib
import os
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email import Encoders
from email.MIMEBase import MIMEBase
from bs4 import BeautifulSoup as bs
import datetime
msgRoot = MIMEMultipart('mixed')
msg = MIMEMultipart('alternative')
soup = bs(body)
plain_text_body = str(soup.getText())
plain = MIMEText(plain_text_body, 'plain')
html = MIMEText(body, 'html')
msgRoot.add_header('From', from_addr)
msgRoot.add_header('To', ','.join(to_addr_list))
msgRoot.add_header('Subject', subject)
if attachments != None:
for attachment in attachments:
attached_file = MIMEBase('application', "octet-stream")
attached_file.set_payload(open(attachment, "rb").read())
Encoders.encode_base64(attached_file)
attached_file.add_header(
'Content-Disposition', 'attachment', filename=os.path.basename(attachment))
msgRoot.attach(attached_file)
msg.attach(plain)
msg.attach(html)
msgRoot.attach(msg)
server = smtplib.SMTP(smtpserver)
server.sendmail(from_addr, to_addr_list, msgRoot.as_string())
server.quit()
def format_col_names(input_df):
for col in input_df.columns:
new_col = col.replace('_', ' ')
new_col = str.title(new_col)
# print new_col
input_df = input_df.rename(columns={col: new_col})
return input_df
def format_trend_col(x):
# format_dict[col] = lambda x: '{0:.0%}'.format(x)
if math.isnan(x)==False:
if x > .2:
return "<span class='positive_trend'>"+\
'{0:.0%}'.format(x) +\
"</span>"
elif x < -.2:
return "<span class='negative_trend'>"+\
'{0:.0%}'.format(x)+\
"</span>"
else:
return '{0:.0%}'.format(x)
else:
return x | babraham123/script-runner | utility_functions.py | Python | mit | 9,325 |
class ProfileAgent(object):
""" A class that communicates to a profiler which assembler code belongs to
which functions. """
def startup(self):
pass
def shutdown(self):
pass
def native_code_written(self, name, address, size):
pass
| oblique-labs/pyVM | rpython/jit/backend/x86/profagent.py | Python | mit | 278 |
from tests import mock
from tests import unittest
from botocore.history import HistoryRecorder
from botocore.history import BaseHistoryHandler
from botocore.history import get_global_history_recorder
class TerribleError(Exception):
pass
class ExceptionThrowingHandler(BaseHistoryHandler):
def emit(self, event_type, payload, source):
raise TerribleError('Bad behaving handler')
class TestHistoryRecorder(unittest.TestCase):
def test_can_attach_and_call_handler_emit(self):
mock_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.enable()
recorder.add_handler(mock_handler)
recorder.record('foo', 'bar', source='source')
mock_handler.emit.assert_called_with('foo', 'bar', 'source')
def test_can_call_multiple_handlers(self):
first_handler = mock.Mock(spec=BaseHistoryHandler)
second_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.enable()
recorder.add_handler(first_handler)
recorder.add_handler(second_handler)
recorder.record('foo', 'bar', source='source')
first_handler.emit.assert_called_with('foo', 'bar', 'source')
second_handler.emit.assert_called_with('foo', 'bar', 'source')
def test_does_use_botocore_source_by_default(self):
mock_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.enable()
recorder.add_handler(mock_handler)
recorder.record('foo', 'bar')
mock_handler.emit.assert_called_with('foo', 'bar', 'BOTOCORE')
def test_does_not_call_handlers_when_never_enabled(self):
mock_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.add_handler(mock_handler)
recorder.record('foo', 'bar')
mock_handler.emit.assert_not_called()
def test_does_not_call_handlers_when_disabled(self):
mock_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.enable()
recorder.disable()
recorder.add_handler(mock_handler)
recorder.record('foo', 'bar')
mock_handler.emit.assert_not_called()
def test_can_ignore_handler_exceptions(self):
mock_handler = mock.Mock(spec=BaseHistoryHandler)
recorder = HistoryRecorder()
recorder.enable()
bad_handler = ExceptionThrowingHandler()
recorder.add_handler(bad_handler)
recorder.add_handler(mock_handler)
try:
recorder.record('foo', 'bar')
except TerribleError:
self.fail('Should not have raised a TerribleError')
mock_handler.emit.assert_called_with('foo', 'bar', 'BOTOCORE')
class TestGetHistoryRecorder(unittest.TestCase):
def test_can_get_history_recorder(self):
recorder = get_global_history_recorder()
self.assertTrue(isinstance(recorder, HistoryRecorder))
def test_does_reuse_history_recorder(self):
recorder_1 = get_global_history_recorder()
recorder_2 = get_global_history_recorder()
self.assertIs(recorder_1, recorder_2)
| boto/botocore | tests/unit/test_history.py | Python | apache-2.0 | 3,187 |
from django import template
register = template.Library()
@register.assignment_tag
def get_new_notifications_count(user):
"""Usually used to display an unread notifications counter"""
from notifier.models import Notification
return user.notifications.exclude(noti_type=Notification.EMAIL_NOTI).filter(displayed=False).count()
| Nomadblue/django-nomad-notifier | notifier/templatetags/notifier_tags.py | Python | bsd-3-clause | 342 |
from __future__ import absolute_import, division, print_function
import sys
import platform
import os
import _pytest._code
from _pytest.debugging import SUPPORTS_BREAKPOINT_BUILTIN
import pytest
_ENVIRON_PYTHONBREAKPOINT = os.environ.get("PYTHONBREAKPOINT", "")
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
quitting = False
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
@pytest.fixture
def custom_debugger_hook():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomDebugger(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
def set_trace(self, frame):
print("**CustomDebugger**")
called.append("set_trace")
_pytest._CustomDebugger = _CustomDebugger
yield called
del _pytest._CustomDebugger
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin("debugging")
monkeypatch.setattr(plugin, "post_mortem", mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
assert 0
""",
)
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""",
)
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
def test_func():
pytest.skip("hello")
""",
)
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import bdb
def test_func():
raise bdb.BdbQuit
""",
)
assert rep.failed
assert len(pdblist) == 0
def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
raise KeyboardInterrupt
""",
)
assert rep.failed
assert len(pdblist) == 1
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
i = 0
assert i == 1
def test_not_called_due_to_quit():
pass
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "= 1 failed in" in rest
assert "def test_1" not in rest
assert "Exit: Quitting debugger" in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == "Darwin":
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile(
"""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
child.sendline("p self.filename")
child.sendeof()
rest = child.read().decode("utf8")
assert "debug.me" in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile(
"""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
"""
)
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect("Skipping also with pdb active")
child.expect("1 skipped in")
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
print("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_print_captured_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stderr")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
@pytest.mark.parametrize("showcapture", ["all", "no", "log"])
def test_pdb_print_captured_logs(self, testdir, showcapture):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest(
"--show-capture={} --pdb {}".format(showcapture, p1)
)
if showcapture in ("all", "log"):
child.expect("captured log")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_print_captured_logs_nologging(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1)
child.expect("get rekt")
output = child.before.decode("utf8")
assert "captured log" not in output
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("Pdb")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
xxx
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("Pdb")
child.sendline("c")
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest(
"""
def pytest_runtest_protocol():
0/0
"""
)
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
# INTERNALERROR is only displayed once via terminal reporter.
assert (
len(
[
x
for x in child.before.decode().splitlines()
if x.startswith("INTERNALERROR> Traceback")
]
)
== 1
)
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print("hello17")
pytest.set_trace()
x = 3
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
assert "BdbQuit" in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1(capsys):
print("hello1")
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_with_caplog_on_pdb_invocation(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1(capsys, caplog):
import logging
logging.getLogger(__name__).warning("some_warning")
assert 0
"""
)
child = testdir.spawn_pytest("--pdb %s" % str(p1))
child.send("caplog.record_tuples\n")
child.expect_exact(
"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]"
)
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print("hello")
assert 0
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
"""
)
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("Pdb")
child.sendline("i")
child.expect("0")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print("hello17")
pytest.set_trace()
x = 3
print("hello18")
pytest.set_trace()
x = 4
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("Pdb")
child.sendline("c")
child.expect("x = 4")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
pytest.set_trace()
x = 5
"""
)
child = testdir.spawn("{} {}".format(sys.executable, p1))
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("xxx")
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines(
["E NameError: *xxx*", "*! *Exit: Quitting debugger !*"] # due to EOF
)
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest(
"""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print('enter_pdb_hook')
def pytest_configure(config):
config.testing_verification = 'configured'
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send("c\n")
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(
custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print('custom set_trace>')
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect("custom set_trace>")
self.flush(child)
class TestDebuggingBreakpoints(object):
def test_supports_breakpoint_module_global(self):
"""
Test that supports breakpoint global marks on Python 3.7+ and not on
CPython 3.5, 2.7
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is True
if sys.version_info.major == 3 and sys.version_info.minor == 5:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
if sys.version_info.major == 2 and sys.version_info.minor == 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.parametrize("arg", ["--pdb", ""])
def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg):
"""
Test that sys.breakpointhook is set to the custom Pdb class once configured, test that
hook is reset to system value once pytest has been unconfigured
"""
testdir.makeconftest(
"""
import sys
from pytest import hookimpl
from _pytest.debugging import pytestPDB
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
assert sys.breakpointhook == pytestPDB.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_custom_cls(self, testdir, custom_debugger_hook):
p1 = testdir.makepyfile(
"""
def test_nothing():
breakpoint()
"""
)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomDebugger", p1
)
result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"])
assert custom_debugger_hook == ["init", "set_trace"]
@pytest.mark.parametrize("arg", ["--pdb", ""])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_environ_custom_class(self, testdir, custom_debugger_hook, arg):
testdir.makeconftest(
"""
import os
import sys
os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
import _pytest
assert sys.breakpointhook is _pytest._CustomDebugger.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.skipif(
not _ENVIRON_PYTHONBREAKPOINT == "",
reason="Requires breakpoint() default value",
)
def test_sys_breakpoint_interception(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
breakpoint()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_not_altered(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
class TestTraceOption:
def test_trace_sets_breakpoint(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
assert True
"""
)
child = testdir.spawn_pytest("--trace " + str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 passed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
def test_trace_against_yield_test(self, testdir):
p1 = testdir.makepyfile(
"""
def is_equal(a, b):
assert a == b
def test_1():
yield is_equal, 1, 1
"""
)
child = testdir.spawn_pytest("--trace " + str(p1))
child.expect("is_equal")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 passed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
| ddboline/pytest | testing/test_pdb.py | Python | mit | 23,373 |
from django.db import models
class Club(models.Model):
"""
동아리 정보
"""
class Meta:
verbose_name = '동아리 정보'
verbose_name_plural = '동아리 정보(들)'
def __str__(self):
return str(self.name)
name = models.CharField(
max_length=63,
verbose_name='동아리 이름',
)
pages = models.TextField(
blank=True,
verbose_name='동아리 페이스북/유튜브 페이지 주소',
)
one_line_intro = models.TextField(
blank=True,
verbose_name='동아리 한 줄 소개(메인 화면)',
)
intro = models.TextField(
verbose_name='동아리 소개',
)
is_band = models.BooleanField(
default=False,
verbose_name='밴드 여부',
)
video_url1 = models.CharField(
blank=True,
max_length=63,
verbose_name='동아리 소개 비디오 주소 #1',
)
video_url2 = models.CharField(
blank=True,
max_length=63,
verbose_name='동아리 소개 비디오 주소 #2',
)
class Image(models.Model):
"""
동아리 홍보 사진
"""
class Meta:
verbose_name = '동아리 홍보 사진'
verbose_name_plural = '동아리 홍보 사진(들)'
def __str__(self):
return self.club.name
club = models.ForeignKey(
Club,
related_name='images',
verbose_name='동아리',
)
image = models.FileField(
null=False,
upload_to='ot/',
verbose_name='홍보 사진',
)
is_main = models.BooleanField(
default=False,
verbose_name='메인 화면에 올라갈 사진인가 (하나만 가능)',
)
| hangpark/kaistusc | apps/ot/models/club.py | Python | bsd-2-clause | 1,725 |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from navigation.api import register_top_menu
from navigation.api import register_links
from project_setup.api import register_setup
from project_tools.api import register_tool
from .conf.settings import SIDE_BAR_SEARCH, DISABLE_HOME_VIEW
__author__ = 'Roberto Rosario'
__copyright__ = 'Copyright 2012 Roberto Rosario'
__credits__ = ['Roberto Rosario',]
__license__ = 'GPL'
__maintainer__ = 'Roberto Rosario'
__email__ = '[email protected]'
__status__ = 'Production'
__version_info__ = {
'major': 1,
'minor': 0,
'micro': 0,
'releaselevel': 'alpha',
'serial': 0
}
def is_superuser(context):
return context['request'].user.is_staff or context['request'].user.is_superuser
maintenance_menu = {'text': _(u'maintenance'), 'view': 'maintenance_menu', 'famfam': 'wrench', 'icon': 'wrench.png'}
statistics = {'text': _(u'statistics'), 'view': 'statistics', 'famfam': 'table', 'icon': 'blackboard_sum.png', 'condition': is_superuser, 'children_view_regex': [r'statistics']}
diagnostics = {'text': _(u'diagnostics'), 'view': 'diagnostics', 'famfam': 'pill', 'icon': 'pill.png'}
sentry = {'text': _(u'sentry'), 'view': 'sentry', 'famfam': 'bug', 'icon': 'bug.png', 'condition': is_superuser}
admin_site = {'text': _(u'admin site'), 'view': 'admin:index', 'famfam': 'keyboard', 'icon': 'keyboard.png', 'condition': is_superuser}
if not DISABLE_HOME_VIEW:
register_top_menu('home', link={'text': _(u'home'), 'view': 'home', 'famfam': 'house'}, position=0)
if not SIDE_BAR_SEARCH:
register_top_menu('search', link={'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}, children_path_regex=[r'^search/'])
def get_version():
'''
Return the formatted version information
'''
vers = ['%(major)i.%(minor)i' % __version_info__, ]
if __version_info__['micro']:
vers.append('.%(micro)i' % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers)
__version__ = get_version()
if 'django.contrib.admin' in settings.INSTALLED_APPS:
register_setup(admin_site)
register_tool(maintenance_menu)
register_tool(statistics)
register_tool(diagnostics)
if 'sentry' in settings.INSTALLED_APPS:
register_tool(sentry)
| rosarior/mayan | apps/main/__init__.py | Python | gpl-3.0 | 2,420 |
# -*- coding: utf-8 -*-
from .schema import Schema, SchemaOpts
__version__ = '0.7.0'
__author__ = 'Steven Loria'
__license__ = 'MIT'
__all__ = (
'Schema',
'SchemaOpts',
)
| Tim-Erwin/marshmallow-jsonapi | marshmallow_jsonapi/__init__.py | Python | mit | 181 |
import numpy as np
import matplotlib.pyplot as plt
import random
#x = [1,2,3,4]
#y = [1,2,1,1]
random.seed(0)
x = np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0])
y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
z = np.polyfit(x, y, 3)
p = np.poly1d(z)
print "coeffs:", z
print "P3: ", p
print "P(0.5)", p(5)
for j in range(10):
#show predictions
for i in range(10):
xp = np.linspace(0, x[-1]+i, 100)
_ = plt.plot(x, y, '.', xp, p(xp), '-')
plt.ylim(-2,2)
filename = "img%03d.png"% (x[-1]+i)
plt.savefig(filename)
#plt.show()
#add some new positions and recalc predictor
x = np.append(x,x[-1]+10)
y = np.append(y,y[-1] + random.uniform(-0.2, 0.2))
#y = np.append(y,y[-1] + 0.05)
z = np.polyfit(x[-4:], y[-4:], 3)
p = np.poly1d(z)
print x
print y
| squeakus/copresence | debug/polysim2.py | Python | gpl-2.0 | 830 |
import json
from ._BaseModel import BaseModel
from .DiffOrderBook import DiffOrderBook
from .Order import Order
from .OrderBook import OrderBook
from .Trade import Trade
class Stream(BaseModel):
def __init__(self, timestamp, datetime, payload):
self._timestamp = timestamp
self._datetime = datetime
self.event = payload['event']
self.channel = payload['channel']
data = json.loads(payload['data'])
if self.channel.startswith('live_trades'):
self.book = self._get_book(2)
self.data = self._build_data_object(data, Trade)
elif self.channel.startswith('order_book'):
self.book = self._get_book(2)
self.data = self._build_data_object(data, OrderBook)
elif self.channel.startswith('diff_order_book'):
self.book = self._get_book(3)
self.data = self._build_data_object(data, DiffOrderBook)
elif self.channel.startswith('live_orders'):
self.book = self._get_book(2)
self.data = self._build_data_object(data, Order)
def _get_book(self, length):
if len(self.channel.split('_')) == (length + 1):
return self.channel.split('_')[-1]
elif len(self.channel.split('_')) == length:
return 'btcusd'
def _build_data_object(self, data, type):
return type(self._timestamp, self._datetime, self.book, data)
def __repr__(self):
return "Stream({Stream})".format(
Stream=self._repr('event', 'channel', 'book')
)
| oxsoftdev/bitstampws | bitstampws/models/Stream.py | Python | mit | 1,556 |
# validate.py
# A Validator object
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Mark Andrews: mark AT la-la DOT com
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
"""
The Validator object is used to check that supplied values
conform to a specification.
The value can be supplied as a string - e.g. from a config file.
In this case the check will also *convert* the value to
the required type. This allows you to add validation
as a transparent layer to access data stored as strings.
The validation checks that the data is correct *and*
converts it to the expected type.
Some standard checks are provided for basic data types.
Additional checks are easy to write. They can be
provided when the ``Validator`` is instantiated or
added afterwards.
The standard functions work with the following basic data types :
* integers
* floats
* booleans
* strings
* ip_addr
plus lists of these datatypes
Adding additional checks is done through coding simple functions.
The full set of standard checks are :
* 'integer': matches integer values (including negative)
Takes optional 'min' and 'max' arguments : ::
integer()
integer(3, 9) # any value from 3 to 9
integer(min=0) # any positive value
integer(max=9)
* 'float': matches float values
Has the same parameters as the integer check.
* 'boolean': matches boolean values - ``True`` or ``False``
Acceptable string values for True are :
true, on, yes, 1
Acceptable string values for False are :
false, off, no, 0
Any other value raises an error.
* 'ip_addr': matches an Internet Protocol address, v.4, represented
by a dotted-quad string, i.e. '1.2.3.4'.
* 'string': matches any string.
Takes optional keyword args 'min' and 'max'
to specify min and max lengths of the string.
* 'list': matches any list.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the list. (Always returns a list.)
* 'tuple': matches any tuple.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the tuple. (Always returns a tuple.)
* 'int_list': Matches a list of integers.
Takes the same arguments as list.
* 'float_list': Matches a list of floats.
Takes the same arguments as list.
* 'bool_list': Matches a list of boolean values.
Takes the same arguments as list.
* 'ip_addr_list': Matches a list of IP addresses.
Takes the same arguments as list.
* 'string_list': Matches a list of strings.
Takes the same arguments as list.
* 'mixed_list': Matches a list with different types in
specific positions. List size must match
the number of arguments.
Each position can be one of :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So to specify a list with two strings followed
by two integers, you write the check as : ::
mixed_list('string', 'string', 'integer', 'integer')
* 'pass': This check matches everything ! It never fails
and the value is unchanged.
It is also the default if no check is specified.
* 'option': This check matches any from a list of options.
You specify this check with : ::
option('option 1', 'option 2', 'option 3')
You can supply a default value (returned if no value is supplied)
using the default keyword argument.
You specify a list argument for default using a list constructor syntax in
the check : ::
checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
A badly formatted set of arguments will raise a ``VdtParamError``.
"""
__version__ = '1.0.1'
__all__ = (
'__version__',
'dottedQuadToNum',
'numToDottedQuad',
'ValidateError',
'VdtUnknownCheckError',
'VdtParamError',
'VdtTypeError',
'VdtValueError',
'VdtValueTooSmallError',
'VdtValueTooBigError',
'VdtValueTooShortError',
'VdtValueTooLongError',
'VdtMissingValue',
'Validator',
'is_integer',
'is_float',
'is_boolean',
'is_list',
'is_tuple',
'is_ip_addr',
'is_string',
'is_int_list',
'is_bool_list',
'is_float_list',
'is_string_list',
'is_ip_addr_list',
'is_mixed_list',
'is_option',
'__docformat__',
)
import re
import sys
from pprint import pprint
#TODO - #21 - six is part of the repo now, but we didn't switch over to it here
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
if sys.version_info < (3,):
string_type = basestring
else:
string_type = str
# so tests that care about unicode on 2.x can specify unicode, and the same
# tests when run on 3.x won't complain about a undefined name "unicode"
# since all strings are unicode on 3.x we just want to pass it through
# unchanged
unicode = lambda x: x
# in python 3, all ints are equivalent to python 2 longs, and they'll
# never show "L" in the repr
long = int
_list_arg = re.compile(r'''
(?:
([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
)
\)
)
''', re.VERBOSE | re.DOTALL) # two groups
_list_members = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
''', re.VERBOSE | re.DOTALL) # one group
_paramstring = r'''
(?:
(
(?:
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
\)
)|
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?)| # unquoted
(?: # keyword argument
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
)
)
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
)
'''
_matchstring = '^%s*' % _paramstring
# Python pre 2.2.1 doesn't have bool
try:
bool
except NameError:
def bool(val):
"""Simple boolean equivalent function. """
if val:
return 1
else:
return 0
def dottedQuadToNum(ip):
"""
Convert decimal dotted quad string to long integer
>>> int(dottedQuadToNum('1 '))
1
>>> int(dottedQuadToNum(' 1.2'))
16777218
>>> int(dottedQuadToNum(' 1.2.3 '))
16908291
>>> int(dottedQuadToNum('1.2.3.4'))
16909060
>>> dottedQuadToNum('255.255.255.255')
4294967295
>>> dottedQuadToNum('255.255.255.256')
Traceback (most recent call last):
ValueError: Not a good dotted-quad IP: 255.255.255.256
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
try:
return struct.unpack('!L',
socket.inet_aton(ip.strip()))[0]
except socket.error:
raise ValueError('Not a good dotted-quad IP: %s' % ip)
return
def numToDottedQuad(num):
"""
Convert int or long int to dotted quad string
>>> numToDottedQuad(long(-1))
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(long(1))
'0.0.0.1'
>>> numToDottedQuad(long(16777218))
'1.0.0.2'
>>> numToDottedQuad(long(16908291))
'1.2.0.3'
>>> numToDottedQuad(long(16909060))
'1.2.3.4'
>>> numToDottedQuad(long(4294967295))
'255.255.255.255'
>>> numToDottedQuad(long(4294967296))
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
>>> numToDottedQuad(-1)
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(1)
'0.0.0.1'
>>> numToDottedQuad(16777218)
'1.0.0.2'
>>> numToDottedQuad(16908291)
'1.2.0.3'
>>> numToDottedQuad(16909060)
'1.2.3.4'
>>> numToDottedQuad(4294967295)
'255.255.255.255'
>>> numToDottedQuad(4294967296)
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
# no need to intercept here, 4294967295L is fine
if num > long(4294967295) or num < 0:
raise ValueError('Not a good numeric IP: %s' % num)
try:
return socket.inet_ntoa(
struct.pack('!L', long(num)))
except (socket.error, struct.error, OverflowError):
raise ValueError('Not a good numeric IP: %s' % num)
class ValidateError(Exception):
"""
This error indicates that the check failed.
It can be the base class for more specific errors.
Any check function that fails ought to raise this error.
(or a subclass)
>>> raise ValidateError
Traceback (most recent call last):
ValidateError
"""
class VdtMissingValue(ValidateError):
"""No value was supplied to a check that needed one."""
class VdtUnknownCheckError(ValidateError):
"""An unknown check function was requested"""
def __init__(self, value):
"""
>>> raise VdtUnknownCheckError('yoda')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
"""
ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
class VdtParamError(SyntaxError):
"""An incorrect parameter was passed"""
def __init__(self, name, value):
"""
>>> raise VdtParamError('yoda', 'jedi')
Traceback (most recent call last):
VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
"""
SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
class VdtTypeError(ValidateError):
"""The value supplied was of the wrong type"""
def __init__(self, value):
"""
>>> raise VdtTypeError('jedi')
Traceback (most recent call last):
VdtTypeError: the value "jedi" is of the wrong type.
"""
ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
class VdtValueError(ValidateError):
"""The value supplied was of the correct type, but was not an allowed value."""
def __init__(self, value):
"""
>>> raise VdtValueError('jedi')
Traceback (most recent call last):
VdtValueError: the value "jedi" is unacceptable.
"""
ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
class VdtValueTooSmallError(VdtValueError):
"""The value supplied was of the correct type, but was too small."""
def __init__(self, value):
"""
>>> raise VdtValueTooSmallError('0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "0" is too small.
"""
ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
class VdtValueTooBigError(VdtValueError):
"""The value supplied was of the correct type, but was too big."""
def __init__(self, value):
"""
>>> raise VdtValueTooBigError('1')
Traceback (most recent call last):
VdtValueTooBigError: the value "1" is too big.
"""
ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
class VdtValueTooShortError(VdtValueError):
"""The value supplied was of the correct type, but was too short."""
def __init__(self, value):
"""
>>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short.
"""
ValidateError.__init__(
self,
'the value "%s" is too short.' % (value,))
class VdtValueTooLongError(VdtValueError):
"""The value supplied was of the correct type, but was too long."""
def __init__(self, value):
"""
>>> raise VdtValueTooLongError('jedie')
Traceback (most recent call last):
VdtValueTooLongError: the value "jedie" is too long.
"""
ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
class Validator(object):
"""
Validator is an object that allows you to register a set of 'checks'.
These checks take input and test that it conforms to the check.
This can also involve converting the value from a string into
the correct datatype.
The ``check`` method takes an input string which configures which
check is to be used and applies that check to a supplied value.
An example input string would be:
'int_range(param1, param2)'
You would then provide something like:
>>> def int_range_check(value, min, max):
... # turn min and max from strings to integers
... min = int(min)
... max = int(max)
... # check that value is of the correct type.
... # possible valid inputs are integers or strings
... # that represent integers
... if not isinstance(value, (int, long, string_type)):
... raise VdtTypeError(value)
... elif isinstance(value, string_type):
... # if we are given a string
... # attempt to convert to an integer
... try:
... value = int(value)
... except ValueError:
... raise VdtValueError(value)
... # check the value is between our constraints
... if not min <= value:
... raise VdtValueTooSmallError(value)
... if not value <= max:
... raise VdtValueTooBigError(value)
... return value
>>> fdict = {'int_range': int_range_check}
>>> vtr1 = Validator(fdict)
>>> vtr1.check('int_range(20, 40)', '30')
30
>>> vtr1.check('int_range(20, 40)', '60')
Traceback (most recent call last):
VdtValueTooBigError: the value "60" is too big.
New functions can be added with : ::
>>> vtr2 = Validator()
>>> vtr2.functions['int_range'] = int_range_check
Or by passing in a dictionary of functions when Validator
is instantiated.
Your functions *can* use keyword arguments,
but the first argument should always be 'value'.
If the function doesn't take additional arguments,
the parentheses are optional in the check.
It can be written with either of : ::
keyword = function_name
keyword = function_name()
The first program to utilise Validator() was Michael Foord's
ConfigObj, an alternative to ConfigParser which supports lists and
can validate a config file using a config schema.
For more details on using Validator with ConfigObj see:
https://configobj.readthedocs.org/en/latest/configobj.html
"""
# this regex does the initial parsing of the checks
_func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL)
# this regex takes apart keyword arguments
_key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL)
# this regex finds keyword=list(....) type values
_list_arg = _list_arg
# this regex takes individual values out of lists - in one pass
_list_members = _list_members
# These regexes check a set of arguments for validity
# and then pull the members out
_paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
_matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
def __init__(self, functions=None):
"""
>>> vtri = Validator()
"""
self.functions = {
'': self._pass,
'integer': is_integer,
'float': is_float,
'boolean': is_boolean,
'ip_addr': is_ip_addr,
'string': is_string,
'list': is_list,
'tuple': is_tuple,
'int_list': is_int_list,
'float_list': is_float_list,
'bool_list': is_bool_list,
'ip_addr_list': is_ip_addr_list,
'string_list': is_string_list,
'mixed_list': is_mixed_list,
'pass': self._pass,
'option': is_option,
'force_list': force_list,
}
if functions is not None:
self.functions.update(functions)
# tekNico: for use by ConfigObj
self.baseErrorClass = ValidateError
self._cache = {}
def check(self, check, value, missing=False):
"""
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor.check('yoda', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if missing:
if default is None:
# no information needed here - to be handled by caller
raise VdtMissingValue()
value = self._handle_none(default)
if value is None:
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _handle_none(self, value):
if value == 'None':
return None
elif value in ("'None'", '"None"'):
# Special case a quoted None
value = self._unquote(value)
return value
def _parse_with_caching(self, check):
if check in self._cache:
fun_name, fun_args, fun_kwargs, default = self._cache[check]
# We call list and dict below to work with *copies* of the data
# rather than the original (which are mutable of course)
fun_args = list(fun_args)
fun_kwargs = dict(fun_kwargs)
else:
fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
fun_kwargs = dict([(str(key), value) for (key, value) in list(fun_kwargs.items())])
self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
return fun_name, fun_args, fun_kwargs, default
def _check_value(self, value, fun_name, fun_args, fun_kwargs):
try:
fun = self.functions[fun_name]
except KeyError:
raise VdtUnknownCheckError(fun_name)
else:
return fun(value, *fun_args, **fun_kwargs)
def _parse_check(self, check):
fun_match = self._func_re.match(check)
if fun_match:
fun_name = fun_match.group(1)
arg_string = fun_match.group(2)
arg_match = self._matchfinder.match(arg_string)
if arg_match is None:
# Bad syntax
raise VdtParamError('Bad syntax in check "%s".' % check)
fun_args = []
fun_kwargs = {}
# pull out args of group 2
for arg in self._paramfinder.findall(arg_string):
# args may need whitespace removing (before removing quotes)
arg = arg.strip()
listmatch = self._list_arg.match(arg)
if listmatch:
key, val = self._list_handle(listmatch)
fun_kwargs[key] = val
continue
keymatch = self._key_arg.match(arg)
if keymatch:
val = keymatch.group(2)
if not val in ("'None'", '"None"'):
# Special case a quoted None
val = self._unquote(val)
fun_kwargs[keymatch.group(1)] = val
continue
fun_args.append(self._unquote(arg))
else:
# allows for function names without (args)
return check, (), {}, None
# Default must be deleted if the value is specified too,
# otherwise the check function will get a spurious "default" keyword arg
default = fun_kwargs.pop('default', None)
return fun_name, fun_args, fun_kwargs, default
def _unquote(self, val):
"""Unquote a value if necessary."""
if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
val = val[1:-1]
return val
def _list_handle(self, listmatch):
"""Take apart a ``keyword=list('val, 'val')`` type string."""
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return name, out
def _pass(self, value):
"""
Dummy check that always passes
>>> vtor.check('', 0)
0
>>> vtor.check('', '0')
'0'
"""
return value
def get_default_value(self, check):
"""
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if default is None:
raise KeyError('Check "%s" has no default value.' % check)
value = self._handle_none(default)
if value is None:
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _is_num_param(names, values, to_float=False):
"""
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
"""
fun = to_float and float or int
out_params = []
for (name, val) in zip(names, values):
if val is None:
out_params.append(val)
elif isinstance(val, (int, long, float, string_type)):
try:
out_params.append(fun(val))
except ValueError as e:
raise VdtParamError(name, val)
else:
raise VdtParamError(name, val)
return out_params
# built in checks
# you can override these by setting the appropriate name
# in Validator.functions
# note: if the params are specified wrongly in your input string,
# you will also raise errors.
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2')
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9)
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35')
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35)
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, (int, long, string_type)):
raise VdtTypeError(value)
if isinstance(value, string_type):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
def is_float(value, min=None, max=None):
"""
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0')
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
"""
(min_val, max_val) = _is_num_param(
('min', 'max'), (min, max), to_float=True)
if not isinstance(value, (int, long, float, string_type)):
raise VdtTypeError(value)
if not isinstance(value, float):
# if it's a string - does it represent a float ?
try:
value = float(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
bool_dict = {
True: True, 'on': True, '1': True, 'true': True, 'yes': True,
False: False, 'off': False, '0': False, 'false': False, 'no': False,
}
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '')
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up')
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_type):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value)
def is_ip_addr(value):
"""
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256')
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5')
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
value = value.strip()
try:
dottedQuadToNum(value)
except ValueError:
raise VdtValueError(value)
return value
def is_list(value, min=None, max=None):
"""
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
[1, 2, 3, 4]
>>> vtor.check('list', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
if isinstance(value, string_type):
raise VdtTypeError(value)
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return list(value)
def is_tuple(value, min=None, max=None):
"""
Check that the value is a tuple of values.
You can optionally specify the minimum and maximum number of members.
It does no check on members.
>>> vtor.check('tuple', ())
()
>>> vtor.check('tuple', [])
()
>>> vtor.check('tuple', (1, 2))
(1, 2)
>>> vtor.check('tuple', [1, 2])
(1, 2)
>>> vtor.check('tuple(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
(1, 2, 3, 4)
>>> vtor.check('tuple', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('tuple', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
return tuple(is_list(value, min, max))
def is_string(value, min=None, max=None):
"""
Check that the supplied value is a string.
You can optionally specify the minimum and maximum number of members.
>>> vtor.check('string', '0')
'0'
>>> vtor.check('string', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('string(2)', '12')
'12'
>>> vtor.check('string(2)', '1')
Traceback (most recent call last):
VdtValueTooShortError: the value "1" is too short.
>>> vtor.check('string(min=2, max=3)', '123')
'123'
>>> vtor.check('string(min=2, max=3)', '1234')
Traceback (most recent call last):
VdtValueTooLongError: the value "1234" is too long.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return value
def is_int_list(value, min=None, max=None):
"""
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_integer(mem) for mem in is_list(value, min, max)]
def is_bool_list(value, min=None, max=None):
"""
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_boolean(mem) for mem in is_list(value, min, max)]
def is_float_list(value, min=None, max=None):
"""
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_float(mem) for mem in is_list(value, min, max)]
def is_string_list(value, min=None, max=None):
"""
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1])
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello')
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
"""
if isinstance(value, string_type):
raise VdtTypeError(value)
return [is_string(mem) for mem in is_list(value, min, max)]
def is_ip_addr_list(value, min=None, max=None):
"""
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a'])
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
"""
return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max)
fun_dict = {
'integer': is_integer,
'float': is_float,
'ip_addr': is_ip_addr,
'string': is_string,
'boolean': is_boolean,
}
def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('mixed_list("yoda")', ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'"
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError as e:
raise VdtParamError('mixed_list', e)
def is_option(value, *options):
"""
This check matches the value to any of a set of options.
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed')
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
if not value in options:
raise VdtValueError(value)
return value
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... pprint(v.check(('test(%s)' % entry), 3))
(3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'min': '1', 'test': 'a b c'})
(3, (), {'min': '5', 'test': 'a, b, c'})
(3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'})
(3, (), {'min': '-100', 'test': '-99'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']})
(3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']})
(3, (), {'test': 'x=fish(3)'})
>>> v = Validator()
>>> v.check('integer(default=6)', '3')
3
>>> v.check('integer(default=6)', None, True)
6
>>> v.get_default_value('integer(default=6)')
6
>>> v.get_default_value('float(default=6)')
6.0
>>> v.get_default_value('pass(default=None)')
>>> v.get_default_value("string(default='None')")
'None'
>>> v.get_default_value('pass')
Traceback (most recent call last):
KeyError: 'Check "pass" has no default value.'
>>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
['1', '2', '3', '4']
>>> v = Validator()
>>> v.check("pass(default=None)", None, True)
>>> v.check("pass(default='None')", None, True)
'None'
>>> v.check('pass(default="None")', None, True)
'None'
>>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
['1', '2', '3', '4']
Bug test for unicode arguments
>>> v = Validator()
>>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234')
True
>>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> default = v.get_default_value('string(default=None)')
>>> default == None
1
"""
return (value, args, keywargs)
def _test2():
"""
>>>
>>> v = Validator()
>>> v.get_default_value('string(default="#ff00dd")')
'#ff00dd'
>>> v.get_default_value('integer(default=3) # comment')
3
"""
def _test3():
r"""
>>> vtor.check('string(default="")', '', missing=True)
''
>>> vtor.check('string(default="\n")', '', missing=True)
'\n'
>>> print(vtor.check('string(default="\n")', '', missing=True))
<BLANKLINE>
<BLANKLINE>
>>> vtor.check('string()', '\n')
'\n'
>>> vtor.check('string(default="\n\n\n")', '', missing=True)
'\n\n\n'
>>> vtor.check('string()', 'random \n text goes here\n\n')
'random \n text goes here\n\n'
>>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
... '', missing=True)
' \nrandom text\ngoes \n here\n\n '
>>> vtor.check("string(default='\n\n\n')", '', missing=True)
'\n\n\n'
>>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
'\n'
>>> vtor.check("string_list()", ['foo', '\n', 'bar'])
['foo', '\n', 'bar']
>>> vtor.check("string_list(default=list('\n'))", '', missing=True)
['\n']
"""
if __name__ == '__main__':
# run the code tests in doctest format
import sys
import doctest
m = sys.modules.get('__main__')
globs = m.__dict__.copy()
globs.update({
'vtor': Validator(),
})
failures, tests = doctest.testmod(
m, globs=globs,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
assert not failures, '{} failures out of {} tests'.format(failures, tests)
| ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/validate.py | Python | gpl-3.0 | 47,237 |
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.cube.Cube` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from itertools import permutations
import numpy as np
import numpy.ma as ma
from cf_units import Unit
import iris.analysis
import iris.aux_factory
import iris.coords
import iris.exceptions
from iris import FUTURE
from iris.analysis import WeightedAggregator, Aggregator
from iris.analysis import MEAN
from iris.cube import Cube
from iris.coords import AuxCoord, DimCoord, CellMeasure
from iris.exceptions import (CoordinateNotFoundError, CellMeasureNotFoundError,
UnitConversionError)
from iris._lazy_data import as_lazy_data
from iris.tests import mock
import iris.tests.stock as stock
class Test___init___data(tests.IrisTest):
def test_ndarray(self):
# np.ndarray should be allowed through
data = np.arange(12).reshape(3, 4)
cube = Cube(data)
self.assertEqual(type(cube.data), np.ndarray)
self.assertArrayEqual(cube.data, data)
def test_masked(self):
# ma.MaskedArray should be allowed through
data = ma.masked_greater(np.arange(12).reshape(3, 4), 1)
cube = Cube(data)
self.assertEqual(type(cube.data), ma.MaskedArray)
self.assertMaskedArrayEqual(cube.data, data)
def test_masked_no_mask(self):
# ma.MaskedArray should be allowed through even if it has no mask
data = ma.masked_array(np.arange(12).reshape(3, 4), False)
cube = Cube(data)
self.assertEqual(type(cube.data), ma.MaskedArray)
self.assertMaskedArrayEqual(cube.data, data)
def test_matrix(self):
# Subclasses of np.ndarray should be coerced back to np.ndarray.
# (Except for np.ma.MaskedArray.)
data = np.matrix([[1, 2, 3], [4, 5, 6]])
cube = Cube(data)
self.assertEqual(type(cube.data), np.ndarray)
self.assertArrayEqual(cube.data, data)
class Test_data_dtype_fillvalue(tests.IrisTest):
def _sample_data(self, dtype=('f4'), masked=False, fill_value=None,
lazy=False):
data = np.arange(6).reshape((2, 3))
dtype = np.dtype(dtype)
data = data.astype(dtype)
if masked:
data = ma.masked_array(data, mask=[[0, 1, 0], [0, 0, 0]],
fill_value=fill_value)
if lazy:
data = as_lazy_data(data)
return data
def _sample_cube(self, dtype=('f4'), masked=False, fill_value=None,
lazy=False):
data = self._sample_data(dtype=dtype, masked=masked,
fill_value=fill_value, lazy=lazy)
cube = Cube(data)
return cube
def test_realdata_change(self):
# Check re-assigning real data.
cube = self._sample_cube()
self.assertEqual(cube.dtype, np.float32)
new_dtype = np.dtype('i4')
new_data = self._sample_data(dtype=new_dtype)
cube.data = new_data
self.assertIs(cube.core_data(), new_data)
self.assertEqual(cube.dtype, new_dtype)
def test_realmaskdata_change(self):
# Check re-assigning real masked data.
cube = self._sample_cube(masked=True, fill_value=1234)
self.assertEqual(cube.dtype, np.float32)
new_dtype = np.dtype('i4')
new_fill_value = 4321
new_data = self._sample_data(masked=True,
fill_value=new_fill_value,
dtype=new_dtype)
cube.data = new_data
self.assertIs(cube.core_data(), new_data)
self.assertEqual(cube.dtype, new_dtype)
self.assertEqual(cube.data.fill_value, new_fill_value)
def test_lazydata_change(self):
# Check re-assigning lazy data.
cube = self._sample_cube(lazy=True)
self.assertEqual(cube.core_data().dtype, np.float32)
new_dtype = np.dtype('f8')
new_data = self._sample_data(new_dtype, lazy=True)
cube.data = new_data
self.assertIs(cube.core_data(), new_data)
self.assertEqual(cube.dtype, new_dtype)
def test_lazymaskdata_change(self):
# Check re-assigning lazy masked data.
cube = self._sample_cube(masked=True, fill_value=1234,
lazy=True)
self.assertEqual(cube.core_data().dtype, np.float32)
new_dtype = np.dtype('f8')
new_fill_value = 4321
new_data = self._sample_data(dtype=new_dtype, masked=True,
fill_value=new_fill_value, lazy=True)
cube.data = new_data
self.assertIs(cube.core_data(), new_data)
self.assertEqual(cube.dtype, new_dtype)
self.assertEqual(cube.data.fill_value, new_fill_value)
def test_lazydata_realise(self):
# Check touching lazy data.
cube = self._sample_cube(lazy=True)
data = cube.data
self.assertIs(cube.core_data(), data)
self.assertEqual(cube.dtype, np.float32)
def test_lazymaskdata_realise(self):
# Check touching masked lazy data.
fill_value = 27.3
cube = self._sample_cube(masked=True, fill_value=fill_value, lazy=True)
data = cube.data
self.assertIs(cube.core_data(), data)
self.assertEqual(cube.dtype, np.float32)
self.assertEqual(data.fill_value, np.float32(fill_value))
def test_realmaskedconstantint_realise(self):
masked_data = ma.masked_array([666], mask=True)
masked_constant = masked_data[0]
cube = Cube(masked_constant)
data = cube.data
self.assertTrue(ma.isMaskedArray(data))
self.assertNotIsInstance(data, ma.core.MaskedConstant)
def test_lazymaskedconstantint_realise(self):
dtype = np.dtype('i2')
masked_data = ma.masked_array([666], mask=True, dtype=dtype)
masked_constant = masked_data[0]
masked_constant_lazy = as_lazy_data(masked_constant)
cube = Cube(masked_constant_lazy)
data = cube.data
self.assertTrue(ma.isMaskedArray(data))
self.assertNotIsInstance(data, ma.core.MaskedConstant)
def test_lazydata___getitem__dtype(self):
fill_value = 1234
dtype = np.dtype('int16')
masked_array = ma.masked_array(np.arange(5),
mask=[0, 0, 1, 0, 0],
fill_value=fill_value,
dtype=dtype)
lazy_masked_array = as_lazy_data(masked_array)
cube = Cube(lazy_masked_array)
subcube = cube[3:]
self.assertEqual(subcube.dtype, dtype)
self.assertEqual(subcube.data.fill_value, fill_value)
class Test_extract(tests.IrisTest):
def test_scalar_cube_exists(self):
# Ensure that extract is able to extract a scalar cube.
constraint = iris.Constraint(name='a1')
cube = Cube(1, long_name='a1')
res = cube.extract(constraint)
self.assertIs(res, cube)
def test_scalar_cube_noexists(self):
# Ensure that extract does not return a non-matching scalar cube.
constraint = iris.Constraint(name='a2')
cube = Cube(1, long_name='a1')
res = cube.extract(constraint)
self.assertIs(res, None)
def test_scalar_cube_coord_match(self):
# Ensure that extract is able to extract a scalar cube according to
# constrained scalar coordinate.
constraint = iris.Constraint(scalar_coord=0)
cube = Cube(1, long_name='a1')
coord = iris.coords.AuxCoord(0, long_name='scalar_coord')
cube.add_aux_coord(coord, None)
res = cube.extract(constraint)
self.assertIs(res, cube)
def test_scalar_cube_coord_nomatch(self):
# Ensure that extract is not extracting a scalar cube with scalar
# coordinate that does not match the constraint.
constraint = iris.Constraint(scalar_coord=1)
cube = Cube(1, long_name='a1')
coord = iris.coords.AuxCoord(0, long_name='scalar_coord')
cube.add_aux_coord(coord, None)
res = cube.extract(constraint)
self.assertIs(res, None)
def test_1d_cube_exists(self):
# Ensure that extract is able to extract from a 1d cube.
constraint = iris.Constraint(name='a1')
cube = Cube([1], long_name='a1')
res = cube.extract(constraint)
self.assertIs(res, cube)
def test_1d_cube_noexists(self):
# Ensure that extract does not return a non-matching 1d cube.
constraint = iris.Constraint(name='a2')
cube = Cube([1], long_name='a1')
res = cube.extract(constraint)
self.assertIs(res, None)
class Test_xml(tests.IrisTest):
def test_checksum_ignores_masked_values(self):
# Mask out an single element.
data = ma.arange(12).reshape(3, 4)
data[1, 2] = ma.masked
cube = Cube(data)
self.assertCML(cube)
# If we change the underlying value before masking it, the
# checksum should be unaffected.
data = ma.arange(12).reshape(3, 4)
data[1, 2] = 42
data[1, 2] = ma.masked
cube = Cube(data)
self.assertCML(cube)
def test_byteorder_default(self):
cube = Cube(np.arange(3))
self.assertIn('byteorder', cube.xml())
def test_byteorder_false(self):
cube = Cube(np.arange(3))
self.assertNotIn('byteorder', cube.xml(byteorder=False))
def test_byteorder_true(self):
cube = Cube(np.arange(3))
self.assertIn('byteorder', cube.xml(byteorder=True))
class Test_collapsed__lazy(tests.IrisTest):
def setUp(self):
self.data = np.arange(6.0).reshape((2, 3))
self.lazydata = as_lazy_data(self.data)
cube = Cube(self.lazydata)
for i_dim, name in enumerate(('y', 'x')):
npts = cube.shape[i_dim]
coord = DimCoord(np.arange(npts), long_name=name)
cube.add_dim_coord(coord, i_dim)
self.cube = cube
def test_dim0_lazy(self):
cube_collapsed = self.cube.collapsed('y', MEAN)
self.assertTrue(cube_collapsed.has_lazy_data())
self.assertArrayAlmostEqual(cube_collapsed.data, [1.5, 2.5, 3.5])
self.assertFalse(cube_collapsed.has_lazy_data())
def test_dim1_lazy(self):
cube_collapsed = self.cube.collapsed('x', MEAN)
self.assertTrue(cube_collapsed.has_lazy_data())
self.assertArrayAlmostEqual(cube_collapsed.data, [1.0, 4.0])
self.assertFalse(cube_collapsed.has_lazy_data())
def test_multidims(self):
# Check that MEAN works with multiple dims.
cube_collapsed = self.cube.collapsed(('x', 'y'), MEAN)
self.assertTrue(cube_collapsed.has_lazy_data())
self.assertArrayAllClose(cube_collapsed.data, 2.5)
def test_non_lazy_aggregator(self):
# An aggregator which doesn't have a lazy function should still work.
dummy_agg = Aggregator('custom_op',
lambda x, axis=None: np.mean(x, axis=axis))
result = self.cube.collapsed('x', dummy_agg)
self.assertFalse(result.has_lazy_data())
self.assertArrayEqual(result.data, np.mean(self.data, axis=1))
class Test_collapsed__warning(tests.IrisTest):
def setUp(self):
self.cube = Cube([[1, 2], [1, 2]])
lat = DimCoord([1, 2], standard_name='latitude')
lon = DimCoord([1, 2], standard_name='longitude')
grid_lat = AuxCoord([1, 2], standard_name='grid_latitude')
grid_lon = AuxCoord([1, 2], standard_name='grid_longitude')
wibble = AuxCoord([1, 2], long_name='wibble')
self.cube.add_dim_coord(lat, 0)
self.cube.add_dim_coord(lon, 1)
self.cube.add_aux_coord(grid_lat, 0)
self.cube.add_aux_coord(grid_lon, 1)
self.cube.add_aux_coord(wibble, 1)
def _aggregator(self, uses_weighting):
# Returns a mock aggregator with a mocked method (uses_weighting)
# which returns the given True/False condition.
aggregator = mock.Mock(spec=WeightedAggregator, lazy_func=None)
aggregator.cell_method = None
aggregator.uses_weighting = mock.Mock(return_value=uses_weighting)
return aggregator
def _assert_warn_collapse_without_weight(self, coords, warn):
# Ensure that warning is raised.
msg = "Collapsing spatial coordinate {!r} without weighting"
for coord in coords:
self.assertIn(mock.call(msg.format(coord)), warn.call_args_list)
def _assert_nowarn_collapse_without_weight(self, coords, warn):
# Ensure that warning is not rised.
msg = "Collapsing spatial coordinate {!r} without weighting"
for coord in coords:
self.assertNotIn(mock.call(msg.format(coord)), warn.call_args_list)
def test_lat_lon_noweighted_aggregator(self):
# Collapse latitude coordinate with unweighted aggregator.
aggregator = mock.Mock(spec=Aggregator, lazy_func=None)
aggregator.cell_method = None
coords = ['latitude', 'longitude']
with mock.patch('warnings.warn') as warn:
self.cube.collapsed(coords, aggregator, somekeyword='bla')
self._assert_nowarn_collapse_without_weight(coords, warn)
def test_lat_lon_weighted_aggregator(self):
# Collapse latitude coordinate with weighted aggregator without
# providing weights.
aggregator = self._aggregator(False)
coords = ['latitude', 'longitude']
with mock.patch('warnings.warn') as warn:
self.cube.collapsed(coords, aggregator)
coords = [coord for coord in coords if 'latitude' in coord]
self._assert_warn_collapse_without_weight(coords, warn)
def test_lat_lon_weighted_aggregator_with_weights(self):
# Collapse latitude coordinate with a weighted aggregators and
# providing suitable weights.
weights = np.array([[0.1, 0.5], [0.3, 0.2]])
aggregator = self._aggregator(True)
coords = ['latitude', 'longitude']
with mock.patch('warnings.warn') as warn:
self.cube.collapsed(coords, aggregator, weights=weights)
self._assert_nowarn_collapse_without_weight(coords, warn)
def test_lat_lon_weighted_aggregator_alt(self):
# Collapse grid_latitude coordinate with weighted aggregator without
# providing weights. Tests coordinate matching logic.
aggregator = self._aggregator(False)
coords = ['grid_latitude', 'grid_longitude']
with mock.patch('warnings.warn') as warn:
self.cube.collapsed(coords, aggregator)
coords = [coord for coord in coords if 'latitude' in coord]
self._assert_warn_collapse_without_weight(coords, warn)
def test_no_lat_weighted_aggregator_mixed(self):
# Collapse grid_latitude and an unmatched coordinate (not lat/lon)
# with weighted aggregator without providing weights.
# Tests coordinate matching logic.
aggregator = self._aggregator(False)
coords = ['wibble']
with mock.patch('warnings.warn') as warn:
self.cube.collapsed(coords, aggregator)
self._assert_nowarn_collapse_without_weight(coords, warn)
class Test_summary(tests.IrisTest):
def setUp(self):
self.cube = Cube(0)
def test_cell_datetime_objects(self):
# Check the scalar coordinate summary still works even when
# iris.FUTURE.cell_datetime_objects is True.
self.cube.add_aux_coord(AuxCoord(42, units='hours since epoch'))
summary = self.cube.summary()
self.assertIn('1970-01-02 18:00:00', summary)
def test_scalar_str_coord(self):
str_value = 'foo'
self.cube.add_aux_coord(AuxCoord(str_value))
summary = self.cube.summary()
self.assertIn(str_value, summary)
class Test_is_compatible(tests.IrisTest):
def setUp(self):
self.test_cube = Cube([1.])
self.other_cube = self.test_cube.copy()
def test_noncommon_array_attrs_compatible(self):
# Non-common array attributes should be ok.
self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3])
self.assertTrue(self.test_cube.is_compatible(self.other_cube))
def test_matching_array_attrs_compatible(self):
# Matching array attributes should be ok.
self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3])
self.other_cube.attributes['array_test'] = np.array([1.0, 2, 3])
self.assertTrue(self.test_cube.is_compatible(self.other_cube))
def test_different_array_attrs_incompatible(self):
# Differing array attributes should make the cubes incompatible.
self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3])
self.other_cube.attributes['array_test'] = np.array([1.0, 2, 777.7])
self.assertFalse(self.test_cube.is_compatible(self.other_cube))
class Test_aggregated_by(tests.IrisTest):
def setUp(self):
self.cube = Cube(np.arange(11))
val_coord = AuxCoord([0, 0, 0, 1, 1, 2, 0, 0, 2, 0, 1],
long_name="val")
label_coord = AuxCoord(['alpha', 'alpha', 'beta',
'beta', 'alpha', 'gamma',
'alpha', 'alpha', 'alpha',
'gamma', 'beta'],
long_name='label', units='no_unit')
self.cube.add_aux_coord(val_coord, 0)
self.cube.add_aux_coord(label_coord, 0)
self.mock_agg = mock.Mock(spec=Aggregator)
self.mock_agg.cell_method = []
self.mock_agg.aggregate = mock.Mock(
return_value=mock.Mock(dtype='object'))
self.mock_agg.aggregate_shape = mock.Mock(return_value=())
self.mock_agg.post_process = mock.Mock(side_effect=lambda x, y, z: x)
def test_string_coord_agg_by_label(self):
# Aggregate a cube on a string coordinate label where label
# and val entries are not in step; the resulting cube has a val
# coord of bounded cells and a label coord of single string entries.
res_cube = self.cube.aggregated_by('label', self.mock_agg)
val_coord = AuxCoord(np.array([1., 0.5, 1.]),
bounds=np.array([[0, 2], [0, 1], [2, 0]]),
long_name='val')
label_coord = AuxCoord(np.array(['alpha', 'beta', 'gamma']),
long_name='label', units='no_unit')
self.assertEqual(res_cube.coord('val'), val_coord)
self.assertEqual(res_cube.coord('label'), label_coord)
def test_string_coord_agg_by_val(self):
# Aggregate a cube on a numeric coordinate val where label
# and val entries are not in step; the resulting cube has a label
# coord with serialised labels from the aggregated cells.
res_cube = self.cube.aggregated_by('val', self.mock_agg)
val_coord = AuxCoord(np.array([0, 1, 2]), long_name='val')
exp0 = 'alpha|alpha|beta|alpha|alpha|gamma'
exp1 = 'beta|alpha|beta'
exp2 = 'gamma|alpha'
label_coord = AuxCoord(np.array((exp0, exp1, exp2)),
long_name='label', units='no_unit')
self.assertEqual(res_cube.coord('val'), val_coord)
self.assertEqual(res_cube.coord('label'), label_coord)
def test_single_string_aggregation(self):
aux_coords = [(AuxCoord(['a', 'b', 'a'], long_name='foo'), 0),
(AuxCoord(['a', 'a', 'a'], long_name='bar'), 0)]
cube = iris.cube.Cube(np.arange(12).reshape(3, 4),
aux_coords_and_dims=aux_coords)
result = cube.aggregated_by('foo', MEAN)
self.assertEqual(result.shape, (2, 4))
self.assertEqual(result.coord('bar'),
AuxCoord(['a|a', 'a'], long_name='bar'))
class Test_rolling_window(tests.IrisTest):
def setUp(self):
self.cube = Cube(np.arange(6))
val_coord = DimCoord([0, 1, 2, 3, 4, 5], long_name="val")
month_coord = AuxCoord(['jan', 'feb', 'mar', 'apr', 'may', 'jun'],
long_name='month')
self.cube.add_dim_coord(val_coord, 0)
self.cube.add_aux_coord(month_coord, 0)
self.mock_agg = mock.Mock(spec=Aggregator)
self.mock_agg.aggregate = mock.Mock(
return_value=np.empty([4]))
self.mock_agg.post_process = mock.Mock(side_effect=lambda x, y, z: x)
def test_string_coord(self):
# Rolling window on a cube that contains a string coordinate.
res_cube = self.cube.rolling_window('val', self.mock_agg, 3)
val_coord = DimCoord(np.array([1, 2, 3, 4]),
bounds=np.array([[0, 2], [1, 3], [2, 4], [3, 5]]),
long_name='val')
month_coord = AuxCoord(
np.array(['jan|feb|mar', 'feb|mar|apr', 'mar|apr|may',
'apr|may|jun']),
bounds=np.array([['jan', 'mar'], ['feb', 'apr'],
['mar', 'may'], ['apr', 'jun']]),
long_name='month')
self.assertEqual(res_cube.coord('val'), val_coord)
self.assertEqual(res_cube.coord('month'), month_coord)
def test_kwargs(self):
# Rolling window with missing data not tolerated
window = 2
self.cube.data = ma.array(self.cube.data,
mask=([True, False, False,
False, True, False]))
res_cube = self.cube.rolling_window('val', iris.analysis.MEAN,
window, mdtol=0)
expected_result = ma.array([-99., 1.5, 2.5, -99., -99.],
mask=[True, False, False, True, True],
dtype=np.float64)
self.assertMaskedArrayEqual(expected_result, res_cube.data)
class Test_slices_dim_order(tests.IrisTest):
'''
This class tests the capability of iris.cube.Cube.slices(), including its
ability to correctly re-order the dimensions.
'''
def setUp(self):
'''
setup a 4D iris cube, each dimension is length 1.
The dimensions are;
dim1: time
dim2: height
dim3: latitude
dim4: longitude
'''
self.cube = iris.cube.Cube(np.array([[[[8.]]]]))
self.cube.add_dim_coord(iris.coords.DimCoord([0], "time"), [0])
self.cube.add_dim_coord(iris.coords.DimCoord([0], "height"), [1])
self.cube.add_dim_coord(iris.coords.DimCoord([0], "latitude"), [2])
self.cube.add_dim_coord(iris.coords.DimCoord([0], "longitude"), [3])
@staticmethod
def expected_cube_setup(dim1name, dim2name, dim3name):
'''
input:
------
dim1name: str
name of the first dimension coordinate
dim2name: str
name of the second dimension coordinate
dim3name: str
name of the third dimension coordinate
output:
------
cube: iris cube
iris cube with the specified axis holding the data 8
'''
cube = iris.cube.Cube(np.array([[[8.]]]))
cube.add_dim_coord(iris.coords.DimCoord([0], dim1name), [0])
cube.add_dim_coord(iris.coords.DimCoord([0], dim2name), [1])
cube.add_dim_coord(iris.coords.DimCoord([0], dim3name), [2])
return cube
def check_order(self, dim1, dim2, dim3, dim_to_remove):
'''
does two things:
(1) slices the 4D cube in dim1, dim2, dim3 (and removes the scalar
coordinate) and
(2) sets up a 3D cube with dim1, dim2, dim3.
input:
-----
dim1: str
name of first dimension
dim2: str
name of second dimension
dim3: str
name of third dimension
dim_to_remove: str
name of the dimension that transforms into a scalar coordinate
when slicing the cube.
output:
------
sliced_cube: 3D cube
the cube that results if slicing the original cube
expected_cube: 3D cube
a cube set up with the axis corresponding to the dims
'''
sliced_cube = next(self.cube.slices([dim1, dim2, dim3]))
sliced_cube.remove_coord(dim_to_remove)
expected_cube = self.expected_cube_setup(dim1, dim2, dim3)
self.assertEqual(sliced_cube, expected_cube)
def test_all_permutations(self):
for perm in permutations(["time", "height", "latitude", "longitude"]):
self.check_order(*perm)
@tests.skip_data
class Test_slices_over(tests.IrisTest):
def setUp(self):
self.cube = stock.realistic_4d()
# Define expected iterators for 1D and 2D test cases.
self.exp_iter_1d = range(
len(self.cube.coord('model_level_number').points))
self.exp_iter_2d = np.ndindex(6, 70, 1, 1)
# Define maximum number of interations for particularly long
# (and so time-consuming) iterators.
self.long_iterator_max = 5
def test_1d_slice_coord_given(self):
res = self.cube.slices_over(self.cube.coord('model_level_number'))
for i, res_cube in zip(self.exp_iter_1d, res):
expected = self.cube[:, i]
self.assertEqual(res_cube, expected)
def test_1d_slice_nonexistent_coord_given(self):
with self.assertRaises(CoordinateNotFoundError):
res = self.cube.slices_over(self.cube.coord('wibble'))
def test_1d_slice_coord_name_given(self):
res = self.cube.slices_over('model_level_number')
for i, res_cube in zip(self.exp_iter_1d, res):
expected = self.cube[:, i]
self.assertEqual(res_cube, expected)
def test_1d_slice_nonexistent_coord_name_given(self):
with self.assertRaises(CoordinateNotFoundError):
res = self.cube.slices_over('wibble')
def test_1d_slice_dimension_given(self):
res = self.cube.slices_over(1)
for i, res_cube in zip(self.exp_iter_1d, res):
expected = self.cube[:, i]
self.assertEqual(res_cube, expected)
def test_1d_slice_nonexistent_dimension_given(self):
with self.assertRaisesRegexp(ValueError, 'iterator over a dimension'):
res = self.cube.slices_over(self.cube.ndim + 1)
def test_2d_slice_coord_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over([self.cube.coord('time'),
self.cube.coord('model_level_number')])
for ct in range(self.long_iterator_max):
indices = list(next(self.exp_iter_2d))
# Replace the dimensions not iterated over with spanning slices.
indices[2] = indices[3] = slice(None)
expected = self.cube[tuple(indices)]
self.assertEqual(next(res), expected)
def test_2d_slice_nonexistent_coord_given(self):
with self.assertRaises(CoordinateNotFoundError):
res = self.cube.slices_over([self.cube.coord('time'),
self.cube.coord('wibble')])
def test_2d_slice_coord_name_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over(['time', 'model_level_number'])
for ct in range(self.long_iterator_max):
indices = list(next(self.exp_iter_2d))
# Replace the dimensions not iterated over with spanning slices.
indices[2] = indices[3] = slice(None)
expected = self.cube[tuple(indices)]
self.assertEqual(next(res), expected)
def test_2d_slice_nonexistent_coord_name_given(self):
with self.assertRaises(CoordinateNotFoundError):
res = self.cube.slices_over(['time', 'wibble'])
def test_2d_slice_dimension_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over([0, 1])
for ct in range(self.long_iterator_max):
indices = list(next(self.exp_iter_2d))
# Replace the dimensions not iterated over with spanning slices.
indices[2] = indices[3] = slice(None)
expected = self.cube[tuple(indices)]
self.assertEqual(next(res), expected)
def test_2d_slice_reversed_dimension_given(self):
# Confirm that reversing the order of the dimensions returns the same
# results as the above test.
res = self.cube.slices_over([1, 0])
for ct in range(self.long_iterator_max):
indices = list(next(self.exp_iter_2d))
# Replace the dimensions not iterated over with spanning slices.
indices[2] = indices[3] = slice(None)
expected = self.cube[tuple(indices)]
self.assertEqual(next(res), expected)
def test_2d_slice_nonexistent_dimension_given(self):
with self.assertRaisesRegexp(ValueError, 'iterator over a dimension'):
res = self.cube.slices_over([0, self.cube.ndim + 1])
def test_multidim_slice_coord_given(self):
# Slicing over surface altitude returns 100x100 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over('surface_altitude')
# Define special ndindex iterator for the different dims sliced over.
nditer = np.ndindex(1, 1, 100, 100)
for ct in range(self.long_iterator_max):
indices = list(next(nditer))
# Replace the dimensions not iterated over with spanning slices.
indices[0] = indices[1] = slice(None)
expected = self.cube[tuple(indices)]
self.assertEqual(next(res), expected)
def test_duplicate_coordinate_given(self):
res = self.cube.slices_over([1, 1])
for i, res_cube in zip(self.exp_iter_1d, res):
expected = self.cube[:, i]
self.assertEqual(res_cube, expected)
def test_non_orthogonal_coordinates_given(self):
res = self.cube.slices_over(['model_level_number', 'sigma'])
for i, res_cube in zip(self.exp_iter_1d, res):
expected = self.cube[:, i]
self.assertEqual(res_cube, expected)
def test_nodimension(self):
# Slicing over no dimension should return the whole cube.
res = self.cube.slices_over([])
self.assertEqual(next(res), self.cube)
def create_cube(lon_min, lon_max, bounds=False):
n_lons = max(lon_min, lon_max) - min(lon_max, lon_min)
data = np.arange(4 * 3 * n_lons, dtype='f4').reshape(4, 3, -1)
data = as_lazy_data(data)
cube = Cube(data, standard_name='x_wind', units='ms-1')
cube.add_dim_coord(iris.coords.DimCoord([0, 20, 40, 80],
long_name='level_height',
units='m'), 0)
cube.add_aux_coord(iris.coords.AuxCoord([1.0, 0.9, 0.8, 0.6],
long_name='sigma'), 0)
cube.add_dim_coord(iris.coords.DimCoord([-45, 0, 45], 'latitude',
units='degrees'), 1)
step = 1 if lon_max > lon_min else -1
circular = (abs(lon_max - lon_min) == 360)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(lon_min, lon_max, step),
'longitude', units='degrees',
circular=circular), 2)
if bounds:
cube.coord('longitude').guess_bounds()
cube.add_aux_coord(iris.coords.AuxCoord(
np.arange(3 * n_lons).reshape(3, -1) * 10, 'surface_altitude',
units='m'), [1, 2])
cube.add_aux_factory(iris.aux_factory.HybridHeightFactory(
cube.coord('level_height'), cube.coord('sigma'),
cube.coord('surface_altitude')))
return cube
# Ensure all the other coordinates and factories are correctly preserved.
class Test_intersection__Metadata(tests.IrisTest):
def test_metadata(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190))
self.assertCMLApproxData(result)
def test_metadata_wrapped(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(170, 190))
self.assertCMLApproxData(result)
# Explicitly check the handling of `circular` on the result.
class Test_intersection__Circular(tests.IrisTest):
def test_regional(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190))
self.assertFalse(result.coord('longitude').circular)
def test_regional_wrapped(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(170, 190))
self.assertFalse(result.coord('longitude').circular)
def test_global(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(-180, 180))
self.assertTrue(result.coord('longitude').circular)
def test_global_wrapped(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(10, 370))
self.assertTrue(result.coord('longitude').circular)
# Check the various error conditions.
class Test_intersection__Invalid(tests.IrisTest):
def test_reversed_min_max(self):
cube = create_cube(0, 360)
with self.assertRaises(ValueError):
cube.intersection(longitude=(30, 10))
def test_dest_too_large(self):
cube = create_cube(0, 360)
with self.assertRaises(ValueError):
cube.intersection(longitude=(30, 500))
def test_src_too_large(self):
cube = create_cube(0, 400)
with self.assertRaises(ValueError):
cube.intersection(longitude=(10, 30))
def test_missing_coord(self):
cube = create_cube(0, 360)
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
cube.intersection(parrots=(10, 30))
def test_multi_dim_coord(self):
cube = create_cube(0, 360)
with self.assertRaises(iris.exceptions.CoordinateMultiDimError):
cube.intersection(surface_altitude=(10, 30))
def test_null_region(self):
# 10 <= v < 10
cube = create_cube(0, 360)
with self.assertRaises(IndexError):
cube.intersection(longitude=(10, 10, False, False))
class Test_intersection__Lazy(tests.IrisTest):
def test_real_data(self):
cube = create_cube(0, 360)
cube.data
result = cube.intersection(longitude=(170, 190))
self.assertFalse(result.has_lazy_data())
self.assertArrayEqual(result.coord('longitude').points,
np.arange(170, 191))
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_real_data_wrapped(self):
cube = create_cube(-180, 180)
cube.data
result = cube.intersection(longitude=(170, 190))
self.assertFalse(result.has_lazy_data())
self.assertArrayEqual(result.coord('longitude').points,
np.arange(170, 191))
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_lazy_data(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190))
self.assertTrue(result.has_lazy_data())
self.assertArrayEqual(result.coord('longitude').points,
np.arange(170, 191))
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_lazy_data_wrapped(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(170, 190))
self.assertTrue(result.has_lazy_data())
self.assertArrayEqual(result.coord('longitude').points,
np.arange(170, 191))
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
class Test_intersection_Points(tests.IrisTest):
def test_ignore_bounds(self):
cube = create_cube(0, 30, bounds=True)
result = cube.intersection(longitude=(9.5, 12.5), ignore_bounds=True)
self.assertArrayEqual(result.coord('longitude').points,
np.arange(10, 13))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[9.5, 10.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[11.5, 12.5])
# Check what happens with a regional, points-only circular intersection
# coordinate.
class Test_intersection__RegionalSrcModulus(tests.IrisTest):
def test_request_subset(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(45, 50))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(45, 51))
self.assertArrayEqual(result.data[0, 0], np.arange(5, 11))
def test_request_left(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(35, 45))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(40, 46))
self.assertArrayEqual(result.data[0, 0], np.arange(0, 6))
def test_request_right(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(55, 65))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(55, 60))
self.assertArrayEqual(result.data[0, 0], np.arange(15, 20))
def test_request_superset(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(35, 65))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(40, 60))
self.assertArrayEqual(result.data[0, 0], np.arange(0, 20))
def test_request_subset_modulus(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(45 + 360, 50 + 360))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(45 + 360, 51 + 360))
self.assertArrayEqual(result.data[0, 0], np.arange(5, 11))
def test_request_left_modulus(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(35 + 360, 45 + 360))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(40 + 360, 46 + 360))
self.assertArrayEqual(result.data[0, 0], np.arange(0, 6))
def test_request_right_modulus(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(55 + 360, 65 + 360))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(55 + 360, 60 + 360))
self.assertArrayEqual(result.data[0, 0], np.arange(15, 20))
def test_request_superset_modulus(self):
cube = create_cube(40, 60)
result = cube.intersection(longitude=(35 + 360, 65 + 360))
self.assertArrayEqual(result.coord('longitude').points,
np.arange(40 + 360, 60 + 360))
self.assertArrayEqual(result.data[0, 0], np.arange(0, 20))
def test_tolerance_f4(self):
cube = create_cube(0, 5)
cube.coord('longitude').points = np.array([0., 3.74999905, 7.49999809,
11.24999714, 14.99999619],
dtype='f4')
result = cube.intersection(longitude=(0, 5))
def test_tolerance_f8(self):
cube = create_cube(0, 5)
cube.coord('longitude').points = np.array([0., 3.74999905, 7.49999809,
11.24999714, 14.99999619],
dtype='f8')
result = cube.intersection(longitude=(0, 5))
# Check what happens with a global, points-only circular intersection
# coordinate.
class Test_intersection__GlobalSrcModulus(tests.IrisTest):
def test_global_wrapped_extreme_increasing_base_period(self):
# Ensure that we can correctly handle points defined at (base + period)
cube = create_cube(-180., 180.)
lons = cube.coord('longitude')
# Redefine longitude so that points at (base + period)
lons.points = np.linspace(-180., 180, lons.points.size)
result = cube.intersection(longitude=(lons.points.min(),
lons.points.max()))
self.assertArrayEqual(result.data, cube.data)
def test_global_wrapped_extreme_decreasing_base_period(self):
# Ensure that we can correctly handle points defined at (base + period)
cube = create_cube(180., -180.)
lons = cube.coord('longitude')
# Redefine longitude so that points at (base + period)
lons.points = np.linspace(180., -180., lons.points.size)
result = cube.intersection(longitude=(lons.points.min(),
lons.points.max()))
self.assertArrayEqual(result.data, cube.data)
def test_global(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(0, 360))
self.assertEqual(result.coord('longitude').points[0], 0)
self.assertEqual(result.coord('longitude').points[-1], 359)
self.assertEqual(result.data[0, 0, 0], 0)
self.assertEqual(result.data[0, 0, -1], 359)
def test_global_wrapped(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(-180, 180))
self.assertEqual(result.coord('longitude').points[0], -180)
self.assertEqual(result.coord('longitude').points[-1], 179)
self.assertEqual(result.data[0, 0, 0], 180)
self.assertEqual(result.data[0, 0, -1], 179)
def test_aux_coord(self):
cube = create_cube(0, 360)
cube.replace_coord(iris.coords.AuxCoord.from_coord(
cube.coord('longitude')))
result = cube.intersection(longitude=(0, 360))
self.assertEqual(result.coord('longitude').points[0], 0)
self.assertEqual(result.coord('longitude').points[-1], 359)
self.assertEqual(result.data[0, 0, 0], 0)
self.assertEqual(result.data[0, 0, -1], 359)
def test_aux_coord_wrapped(self):
cube = create_cube(0, 360)
cube.replace_coord(iris.coords.AuxCoord.from_coord(
cube.coord('longitude')))
result = cube.intersection(longitude=(-180, 180))
self.assertEqual(result.coord('longitude').points[0], 0)
self.assertEqual(result.coord('longitude').points[-1], -1)
self.assertEqual(result.data[0, 0, 0], 0)
self.assertEqual(result.data[0, 0, -1], 359)
def test_aux_coord_non_contiguous_wrapped(self):
cube = create_cube(0, 360)
coord = iris.coords.AuxCoord.from_coord(cube.coord('longitude'))
coord.points = (coord.points * 1.5) % 360
cube.replace_coord(coord)
result = cube.intersection(longitude=(-90, 90))
self.assertEqual(result.coord('longitude').points[0], 0)
self.assertEqual(result.coord('longitude').points[-1], 90)
self.assertEqual(result.data[0, 0, 0], 0)
self.assertEqual(result.data[0, 0, -1], 300)
def test_decrementing(self):
cube = create_cube(360, 0)
result = cube.intersection(longitude=(40, 60))
self.assertEqual(result.coord('longitude').points[0], 60)
self.assertEqual(result.coord('longitude').points[-1], 40)
self.assertEqual(result.data[0, 0, 0], 300)
self.assertEqual(result.data[0, 0, -1], 320)
def test_decrementing_wrapped(self):
cube = create_cube(360, 0)
result = cube.intersection(longitude=(-10, 10))
self.assertEqual(result.coord('longitude').points[0], 10)
self.assertEqual(result.coord('longitude').points[-1], -10)
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_no_wrap_after_modulus(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170 + 360, 190 + 360))
self.assertEqual(result.coord('longitude').points[0], 170 + 360)
self.assertEqual(result.coord('longitude').points[-1], 190 + 360)
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_wrap_after_modulus(self):
cube = create_cube(-180, 180)
result = cube.intersection(longitude=(170 + 360, 190 + 360))
self.assertEqual(result.coord('longitude').points[0], 170 + 360)
self.assertEqual(result.coord('longitude').points[-1], 190 + 360)
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_select_by_coord(self):
cube = create_cube(0, 360)
coord = iris.coords.DimCoord(0, 'longitude', units='degrees')
result = cube.intersection(iris.coords.CoordExtent(coord, 10, 30))
self.assertEqual(result.coord('longitude').points[0], 10)
self.assertEqual(result.coord('longitude').points[-1], 30)
self.assertEqual(result.data[0, 0, 0], 10)
self.assertEqual(result.data[0, 0, -1], 30)
def test_inclusive_exclusive(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190, True, False))
self.assertEqual(result.coord('longitude').points[0], 170)
self.assertEqual(result.coord('longitude').points[-1], 189)
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 189)
def test_exclusive_inclusive(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190, False))
self.assertEqual(result.coord('longitude').points[0], 171)
self.assertEqual(result.coord('longitude').points[-1], 190)
self.assertEqual(result.data[0, 0, 0], 171)
self.assertEqual(result.data[0, 0, -1], 190)
def test_exclusive_exclusive(self):
cube = create_cube(0, 360)
result = cube.intersection(longitude=(170, 190, False, False))
self.assertEqual(result.coord('longitude').points[0], 171)
self.assertEqual(result.coord('longitude').points[-1], 189)
self.assertEqual(result.data[0, 0, 0], 171)
self.assertEqual(result.data[0, 0, -1], 189)
def test_single_point(self):
# 10 <= v <= 10
cube = create_cube(0, 360)
result = cube.intersection(longitude=(10, 10))
self.assertEqual(result.coord('longitude').points[0], 10)
self.assertEqual(result.coord('longitude').points[-1], 10)
self.assertEqual(result.data[0, 0, 0], 10)
self.assertEqual(result.data[0, 0, -1], 10)
def test_two_points(self):
# -1.5 <= v <= 0.5
cube = create_cube(0, 360)
result = cube.intersection(longitude=(-1.5, 0.5))
self.assertEqual(result.coord('longitude').points[0], -1)
self.assertEqual(result.coord('longitude').points[-1], 0)
self.assertEqual(result.data[0, 0, 0], 359)
self.assertEqual(result.data[0, 0, -1], 0)
def test_wrap_radians(self):
cube = create_cube(0, 360)
cube.coord('longitude').convert_units('radians')
result = cube.intersection(longitude=(-1, 0.5))
self.assertEqual(result.coord('longitude').points[0],
-0.99483767363676634)
self.assertEqual(result.coord('longitude').points[-1],
0.48869219055841207)
self.assertEqual(result.data[0, 0, 0], 303)
self.assertEqual(result.data[0, 0, -1], 28)
def test_tolerance_bug(self):
# Floating point changes introduced by wrapping mean
# the resulting coordinate values are not equal to their
# equivalents. This led to a bug that this test checks.
cube = create_cube(0, 400)
cube.coord('longitude').points = np.linspace(-179.55, 179.55, 400)
result = cube.intersection(longitude=(125, 145))
self.assertArrayAlmostEqual(result.coord('longitude').points,
cube.coord('longitude').points[339:361])
def test_tolerance_bug_wrapped(self):
cube = create_cube(0, 400)
cube.coord('longitude').points = np.linspace(-179.55, 179.55, 400)
result = cube.intersection(longitude=(-190, -170))
# Expected result is the last 11 and first 11 points.
expected = np.append(cube.coord('longitude').points[389:] - 360.,
cube.coord('longitude').points[:11])
self.assertArrayAlmostEqual(result.coord('longitude').points,
expected)
# Check what happens with a global, points-and-bounds circular
# intersection coordinate.
class Test_intersection__ModulusBounds(tests.IrisTest):
def test_global_wrapped_extreme_increasing_base_period(self):
# Ensure that we can correctly handle bounds defined at (base + period)
cube = create_cube(-180., 180., bounds=True)
lons = cube.coord('longitude')
result = cube.intersection(longitude=(lons.bounds.min(),
lons.bounds.max()))
self.assertArrayEqual(result.data, cube.data)
def test_global_wrapped_extreme_decreasing_base_period(self):
# Ensure that we can correctly handle bounds defined at (base + period)
cube = create_cube(180., -180., bounds=True)
lons = cube.coord('longitude')
result = cube.intersection(longitude=(lons.bounds.min(),
lons.bounds.max()))
self.assertArrayEqual(result.data, cube.data)
def test_misaligned_points_inside(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(169.75, 190.25))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[169.5, 170.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[189.5, 190.5])
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_misaligned_points_outside(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(170.25, 189.75))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[169.5, 170.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[189.5, 190.5])
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_misaligned_bounds(self):
cube = create_cube(-180, 180, bounds=True)
result = cube.intersection(longitude=(0, 360))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[-0.5, 0.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[358.5, 359.5])
self.assertEqual(result.data[0, 0, 0], 180)
self.assertEqual(result.data[0, 0, -1], 179)
def test_misaligned_bounds_decreasing(self):
cube = create_cube(180, -180, bounds=True)
result = cube.intersection(longitude=(0, 360))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[359.5, 358.5])
self.assertArrayEqual(result.coord('longitude').points[-1], 0)
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[0.5, -0.5])
self.assertEqual(result.data[0, 0, 0], 181)
self.assertEqual(result.data[0, 0, -1], 180)
def test_aligned_inclusive(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(170.5, 189.5))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[169.5, 170.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[189.5, 190.5])
self.assertEqual(result.data[0, 0, 0], 170)
self.assertEqual(result.data[0, 0, -1], 190)
def test_aligned_exclusive(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(170.5, 189.5, False, False))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[170.5, 171.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[188.5, 189.5])
self.assertEqual(result.data[0, 0, 0], 171)
self.assertEqual(result.data[0, 0, -1], 189)
def test_negative_misaligned_points_inside(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(-10.25, 10.25))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[-10.5, -9.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[9.5, 10.5])
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_negative_misaligned_points_outside(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(-9.75, 9.75))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[-10.5, -9.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[9.5, 10.5])
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_negative_aligned_inclusive(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(-10.5, 10.5))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[-11.5, -10.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[10.5, 11.5])
self.assertEqual(result.data[0, 0, 0], 349)
self.assertEqual(result.data[0, 0, -1], 11)
def test_negative_aligned_exclusive(self):
cube = create_cube(0, 360, bounds=True)
result = cube.intersection(longitude=(-10.5, 10.5, False, False))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[-10.5, -9.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[9.5, 10.5])
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_decrementing(self):
cube = create_cube(360, 0, bounds=True)
result = cube.intersection(longitude=(40, 60))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[60.5, 59.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[40.5, 39.5])
self.assertEqual(result.data[0, 0, 0], 300)
self.assertEqual(result.data[0, 0, -1], 320)
def test_decrementing_wrapped(self):
cube = create_cube(360, 0, bounds=True)
result = cube.intersection(longitude=(-10, 10))
self.assertArrayEqual(result.coord('longitude').bounds[0],
[10.5, 9.5])
self.assertArrayEqual(result.coord('longitude').bounds[-1],
[-9.5, -10.5])
self.assertEqual(result.data[0, 0, 0], 350)
self.assertEqual(result.data[0, 0, -1], 10)
def test_numerical_tolerance(self):
# test the tolerance on the coordinate value is not causing a
# modulus wrapping
cube = create_cube(28.5, 68.5, bounds=True)
result = cube.intersection(longitude=(27.74, 68.61))
self.assertAlmostEqual(result.coord('longitude').points[0], 28.5)
self.assertAlmostEqual(result.coord('longitude').points[-1], 67.5)
def unrolled_cube():
data = np.arange(5, dtype='f4')
cube = Cube(data)
cube.add_aux_coord(iris.coords.AuxCoord([5.0, 10.0, 8.0, 5.0, 3.0],
'longitude', units='degrees'), 0)
cube.add_aux_coord(iris.coords.AuxCoord([1.0, 3.0, -2.0, -1.0, -4.0],
'latitude'), 0)
return cube
# Check what happens with a "unrolled" scatter-point data with a circular
# intersection coordinate.
class Test_intersection__ScatterModulus(tests.IrisTest):
def test_subset(self):
cube = unrolled_cube()
result = cube.intersection(longitude=(5, 8))
self.assertArrayEqual(result.coord('longitude').points, [5, 8, 5])
self.assertArrayEqual(result.data, [0, 2, 3])
def test_subset_wrapped(self):
cube = unrolled_cube()
result = cube.intersection(longitude=(5 + 360, 8 + 360))
self.assertArrayEqual(result.coord('longitude').points,
[365, 368, 365])
self.assertArrayEqual(result.data, [0, 2, 3])
def test_superset(self):
cube = unrolled_cube()
result = cube.intersection(longitude=(0, 15))
self.assertArrayEqual(result.coord('longitude').points,
[5, 10, 8, 5, 3])
self.assertArrayEqual(result.data, np.arange(5))
# Test the API of the cube interpolation method.
class Test_interpolate(tests.IrisTest):
def setUp(self):
self.cube = stock.simple_2d()
self.scheme = mock.Mock(name='interpolation scheme')
self.interpolator = mock.Mock(name='interpolator')
self.interpolator.return_value = mock.sentinel.RESULT
self.scheme.interpolator.return_value = self.interpolator
self.collapse_coord = True
def test_api(self):
sample_points = (('foo', 0.5), ('bar', 0.6))
result = self.cube.interpolate(sample_points, self.scheme,
self.collapse_coord)
self.scheme.interpolator.assert_called_once_with(
self.cube, ('foo', 'bar'))
self.interpolator.assert_called_once_with(
(0.5, 0.6), collapse_scalar=self.collapse_coord)
self.assertIs(result, mock.sentinel.RESULT)
class Test_regrid(tests.IrisTest):
def test(self):
# Test that Cube.regrid() just defers to the regridder of the
# given scheme.
# Define a fake scheme and its associated regridder which just
# capture their arguments and return them in place of the
# regridded cube.
class FakeRegridder(object):
def __init__(self, *args):
self.args = args
def __call__(self, cube):
return self.args + (cube,)
class FakeScheme(object):
def regridder(self, src, target):
return FakeRegridder(self, src, target)
cube = Cube(0)
scheme = FakeScheme()
result = cube.regrid(mock.sentinel.TARGET, scheme)
self.assertEqual(result, (scheme, cube, mock.sentinel.TARGET, cube))
class Test_copy(tests.IrisTest):
def _check_copy(self, cube, cube_copy):
self.assertIsNot(cube_copy, cube)
self.assertEqual(cube_copy, cube)
self.assertIsNot(cube_copy.data, cube.data)
if ma.isMaskedArray(cube.data):
self.assertMaskedArrayEqual(cube_copy.data, cube.data)
if cube.data.mask is not ma.nomask:
# "No mask" is a constant : all other cases must be distinct.
self.assertIsNot(cube_copy.data.mask, cube.data.mask)
else:
self.assertArrayEqual(cube_copy.data, cube.data)
def test(self):
cube = stock.simple_3d()
self._check_copy(cube, cube.copy())
def test__masked_emptymask(self):
cube = Cube(ma.array([0, 1]))
self._check_copy(cube, cube.copy())
def test__masked_arraymask(self):
cube = Cube(ma.array([0, 1], mask=[True, False]))
self._check_copy(cube, cube.copy())
def test__scalar(self):
cube = Cube(0)
self._check_copy(cube, cube.copy())
def test__masked_scalar_emptymask(self):
cube = Cube(ma.array(0))
self._check_copy(cube, cube.copy())
def test__masked_scalar_arraymask(self):
cube = Cube(ma.array(0, mask=False))
self._check_copy(cube, cube.copy())
def test__lazy(self):
cube = Cube(as_lazy_data(np.array([1, 0])))
self._check_copy(cube, cube.copy())
class Test_dtype(tests.IrisTest):
def setUp(self):
self.dtypes = (np.dtype('int'), np.dtype('uint'),
np.dtype('bool'), np.dtype('float'))
def test_real_data(self):
for dtype in self.dtypes:
data = np.array([0, 1], dtype=dtype)
cube = Cube(data)
self.assertEqual(cube.dtype, dtype)
def test_real_data_masked__mask_unset(self):
for dtype in self.dtypes:
data = ma.array([0, 1], dtype=dtype)
cube = Cube(data)
self.assertEqual(cube.dtype, dtype)
def test_real_data_masked__mask_set(self):
for dtype in self.dtypes:
data = ma.array([0, 1], dtype=dtype)
data[0] = ma.masked
cube = Cube(data)
self.assertEqual(cube.dtype, dtype)
def test_lazy_data(self):
for dtype in self.dtypes:
data = np.array([0, 1], dtype=dtype)
cube = Cube(as_lazy_data(data))
self.assertEqual(cube.dtype, dtype)
# Check that accessing the dtype does not trigger loading
# of the data.
self.assertTrue(cube.has_lazy_data())
def test_lazy_data_masked__mask_unset(self):
for dtype in self.dtypes:
data = ma.array([0, 1], dtype=dtype)
cube = Cube(as_lazy_data(data))
self.assertEqual(cube.dtype, dtype)
# Check that accessing the dtype does not trigger loading
# of the data.
self.assertTrue(cube.has_lazy_data())
def test_lazy_data_masked__mask_set(self):
for dtype in self.dtypes:
data = ma.array([0, 1], dtype=dtype)
data[0] = ma.masked
cube = Cube(as_lazy_data(data))
self.assertEqual(cube.dtype, dtype)
# Check that accessing the dtype does not trigger loading
# of the data.
self.assertTrue(cube.has_lazy_data())
class TestSubset(tests.IrisTest):
def test_scalar_coordinate(self):
cube = Cube(0, long_name='apricot', units='1')
cube.add_aux_coord(DimCoord([0], long_name='banana', units='1'))
result = cube.subset(cube.coord('banana'))
self.assertEqual(cube, result)
def test_dimensional_coordinate(self):
cube = Cube(np.zeros((4)), long_name='tinned_peach', units='1')
cube.add_dim_coord(DimCoord([0, 1, 2, 3],
long_name='sixteen_ton_weight',
units='1'),
0)
result = cube.subset(cube.coord('sixteen_ton_weight'))
self.assertEqual(cube, result)
def test_missing_coordinate(self):
cube = Cube(0, long_name='raspberry', units='1')
cube.add_aux_coord(DimCoord([0], long_name='loganberry', units='1'))
bad_coord = DimCoord([0], long_name='tiger', units='1')
self.assertRaises(CoordinateNotFoundError, cube.subset, bad_coord)
def test_different_coordinate(self):
cube = Cube(0, long_name='raspberry', units='1')
cube.add_aux_coord(DimCoord([0], long_name='loganberry', units='1'))
different_coord = DimCoord([2], long_name='loganberry', units='1')
result = cube.subset(different_coord)
self.assertEqual(result, None)
def test_not_coordinate(self):
cube = Cube(0, long_name='peach', units='1')
cube.add_aux_coord(DimCoord([0], long_name='crocodile', units='1'))
self.assertRaises(ValueError, cube.subset, 'Pointed Stick')
class Test_add_metadata(tests.IrisTest):
def test_add_dim_coord(self):
cube = Cube(np.arange(3))
x_coord = DimCoord(points=np.array([2, 3, 4]),
long_name='x')
cube.add_dim_coord(x_coord, 0)
self.assertEqual(cube.coord('x'), x_coord)
def test_add_aux_coord(self):
cube = Cube(np.arange(6).reshape(2, 3))
x_coord = AuxCoord(points=np.arange(6).reshape(2, 3),
long_name='x')
cube.add_aux_coord(x_coord, [0, 1])
self.assertEqual(cube.coord('x'), x_coord)
def test_add_cell_measure(self):
cube = Cube(np.arange(6).reshape(2, 3))
a_cell_measure = CellMeasure(data=np.arange(6).reshape(2, 3),
long_name='area', measure='area')
cube.add_cell_measure(a_cell_measure, [0, 1])
self.assertEqual(cube.cell_measure('area'), a_cell_measure)
class Test_remove_metadata(tests.IrisTest):
def setUp(self):
cube = Cube(np.arange(6).reshape(2, 3))
x_coord = DimCoord(points=np.array([2, 3, 4]),
long_name='x')
cube.add_dim_coord(x_coord, 1)
z_coord = AuxCoord(points=np.arange(6).reshape(2, 3),
long_name='z')
cube.add_aux_coord(z_coord, [0, 1])
a_cell_measure = CellMeasure(data=np.arange(6).reshape(2, 3),
long_name='area', measure='area')
self.b_cell_measure = CellMeasure(data=np.arange(6).reshape(2, 3),
long_name='other_area',
measure='area')
cube.add_cell_measure(a_cell_measure, [0, 1])
cube.add_cell_measure(self.b_cell_measure, [0, 1])
self.cube = cube
def test_remove_dim_coord(self):
self.cube.remove_coord(self.cube.coord('x'))
self.assertEqual(self.cube.coords('x'), [])
def test_remove_aux_coord(self):
self.cube.remove_coord(self.cube.coord('z'))
self.assertEqual(self.cube.coords('z'), [])
def test_remove_cell_measure(self):
self.cube.remove_cell_measure(self.cube.cell_measure('area'))
self.assertEqual(self.cube._cell_measures_and_dims,
[[self.b_cell_measure, (0, 1)]])
class Test__getitem_CellMeasure(tests.IrisTest):
def setUp(self):
cube = Cube(np.arange(6).reshape(2, 3))
x_coord = DimCoord(points=np.array([2, 3, 4]),
long_name='x')
cube.add_dim_coord(x_coord, 1)
y_coord = DimCoord(points=np.array([5, 6]),
long_name='y')
cube.add_dim_coord(y_coord, 0)
z_coord = AuxCoord(points=np.arange(6).reshape(2, 3),
long_name='z')
cube.add_aux_coord(z_coord, [0, 1])
a_cell_measure = CellMeasure(data=np.arange(6).reshape(2, 3),
long_name='area', measure='area')
cube.add_cell_measure(a_cell_measure, [0, 1])
self.cube = cube
def test_cell_measure_2d(self):
result = self.cube[0:2, 0:2]
self.assertEqual(len(result.cell_measures()), 1)
self.assertEqual(result.shape,
result.cell_measures()[0].data.shape)
def test_cell_measure_1d(self):
result = self.cube[0, 0:2]
self.assertEqual(len(result.cell_measures()), 1)
self.assertEqual(result.shape,
result.cell_measures()[0].data.shape)
class TestCellMeasures(tests.IrisTest):
def setUp(self):
cube = Cube(np.arange(6).reshape(2, 3))
x_coord = DimCoord(points=np.array([2, 3, 4]),
long_name='x')
cube.add_dim_coord(x_coord, 1)
z_coord = AuxCoord(points=np.arange(6).reshape(2, 3),
long_name='z')
cube.add_aux_coord(z_coord, [0, 1])
self.a_cell_measure = CellMeasure(data=np.arange(6).reshape(2, 3),
long_name='area', measure='area',
units='m2')
cube.add_cell_measure(self.a_cell_measure, [0, 1])
self.cube = cube
def test_get_cell_measure(self):
cm = self.cube.cell_measure('area')
self.assertEqual(cm, self.a_cell_measure)
def test_get_cell_measures(self):
cms = self.cube.cell_measures()
self.assertEqual(len(cms), 1)
self.assertEqual(cms[0], self.a_cell_measure)
def test_get_cell_measures_obj(self):
cms = self.cube.cell_measures(self.a_cell_measure)
self.assertEqual(len(cms), 1)
self.assertEqual(cms[0], self.a_cell_measure)
def test_fail_get_cell_measure(self):
with self.assertRaises(CellMeasureNotFoundError):
cm = self.cube.cell_measure('notarea')
def test_fail_get_cell_measures_obj(self):
a_cell_measure = self.a_cell_measure.copy()
a_cell_measure.units = 'km2'
with self.assertRaises(CellMeasureNotFoundError):
cms = self.cube.cell_measure(a_cell_measure)
def test_cell_measure_dims(self):
cm_dims = self.cube.cell_measure_dims(self.a_cell_measure)
self.assertEqual(cm_dims, (0, 1))
def test_fail_cell_measure_dims(self):
a_cell_measure = self.a_cell_measure.copy()
a_cell_measure.units = 'km2'
with self.assertRaises(CellMeasureNotFoundError):
cm_dims = self.cube.cell_measure_dims(a_cell_measure)
class Test_transpose(tests.IrisTest):
def setUp(self):
self.data = np.arange(24).reshape(3, 2, 4)
self.cube = Cube(self.data)
self.lazy_cube = Cube(as_lazy_data(self.data))
def test_lazy_data(self):
cube = self.lazy_cube
cube.transpose()
self.assertTrue(cube.has_lazy_data())
self.assertArrayEqual(self.data.T, cube.data)
def test_real_data(self):
self.cube.transpose()
self.assertFalse(self.cube.has_lazy_data())
self.assertIs(self.data.base, self.cube.data.base)
self.assertArrayEqual(self.data.T, self.cube.data)
def test_real_data__new_order(self):
new_order = [2, 0, 1]
self.cube.transpose(new_order)
self.assertFalse(self.cube.has_lazy_data())
self.assertIs(self.data.base, self.cube.data.base)
self.assertArrayEqual(self.data.transpose(new_order), self.cube.data)
def test_lazy_data__new_order(self):
new_order = [2, 0, 1]
cube = self.lazy_cube
cube.transpose(new_order)
self.assertTrue(cube.has_lazy_data())
self.assertArrayEqual(self.data.transpose(new_order), cube.data)
def test_lazy_data__transpose_order_ndarray(self):
# Check that a transpose order supplied as an array does not trip up
# a dask transpose operation.
new_order = np.array([2, 0, 1])
cube = self.lazy_cube
cube.transpose(new_order)
self.assertTrue(cube.has_lazy_data())
self.assertArrayEqual(self.data.transpose(new_order), cube.data)
def test_bad_transpose_order(self):
exp_emsg = 'Incorrect number of dimensions'
with self.assertRaisesRegexp(ValueError, exp_emsg):
self.cube.transpose([1])
class Test_convert_units(tests.IrisTest):
def test_convert_unknown_units(self):
cube = iris.cube.Cube(1)
emsg = ('Cannot convert from unknown units. '
'The "cube.units" attribute may be set directly.')
with self.assertRaisesRegexp(UnitConversionError, emsg):
cube.convert_units('mm day-1')
def test_preserves_lazy(self):
real_data = np.arange(12.).reshape((3, 4))
lazy_data = as_lazy_data(real_data)
cube = iris.cube.Cube(lazy_data, units='m')
real_data_ft = Unit('m').convert(real_data, 'ft')
cube.convert_units('ft')
self.assertTrue(cube.has_lazy_data())
self.assertArrayAllClose(cube.data, real_data_ft)
if __name__ == '__main__':
tests.main()
| duncanwp/iris | lib/iris/tests/unit/cube/test_Cube.py | Python | lgpl-3.0 | 73,611 |
from rest_framework import serializers
# Serializer will look at models and convert them to JSON for us
from .models import List, Card #our models
# Have to load CardSerializer before List for order of operations
class CardSerializer(serializers.ModelSerializer):
# Model we are representing is Card
class Meta:
model = Card
fields = '__all__'
class ListSerializer(serializers.ModelSerializer):
# pulls the cards into the serializer
cards = CardSerializer(read_only=True, many=True)
# Model we are representing is List
class Meta:
model = List
fields = '__all__' # Now required in django/rest_framework
| jantaylor/djangular-scrum | scrumboard/serializers.py | Python | mit | 663 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# BEGIN PYTHON 2/3 COMPATIBILITY BOILERPLATE
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import nested_scopes
from __future__ import generators
from __future__ import unicode_literals
from __future__ import print_function
try:
xrange
except:
xrange = range
'''
Contains higher order functions to make creation of GPU functions more
succinct and compact. Also contains generic routines for manipulating CUDA
source objects.
'''
try:
import pycuda.gpuarray as gpuarray
import pycuda.cumath as cumath
from pycuda.elementwise import ElementwiseKernel
from pycuda.compiler import SourceModule
except:
import sys
def missing(*args,**kwargs):
if 'sphinx' in sys.modules:
print('Please locate and install the pycuda GPU library')
else:
raise ValueError('Please locate and install pycuda GPU library')
# TODO: shadow missing function with the above, which raises an error?
try:
from pytools import memoize
except:
print('Please install the pytools module')
print('Attempting fallback to neurotools')
from neurotools.tools import memoize
from math import log
import re
import numpy as np
from neurotools.gpu.cpu.util import *
from neurotools.gpu.cu.device import *
##############################################################################
# Source Code Utility Functions
##############################################################################
def format(code):
'''
This is a kernel source auto-formatter. It mostly just does auto-indent
'''
code = re.compile(r'//').sub(r'@',code)
code = re.compile(r'^([^@\n]*)@([\n]*)\n').sub(r'@\2\n\1\n',code)
code = re.compile(r'@').sub(r'//',code)
code = re.compile(r'//([^\n]*)\n').sub(r'/*\1*/\n',code)
code = re.compile(r'[\n\t ]+').sub(' ',code)
code = re.compile(r';[\n\t ]*').sub('; ',code)
code = re.compile(r';+').sub(';',code)
code = re.compile(r';').sub(';\n',code)
code = re.compile(r'[ ]*else[ ]*\{[ ]*\}[ ]*').sub(' ',code)
code = re.compile(r'\{').sub('\n {\n',code)
code = re.compile(r'\}').sub('}\n',code)
code = re.compile(r'for[ ]*\(([^;]*)\n*;\n*([^;]*)\n*;\n*').sub(r'for(\1;\2;',code)
code = re.compile(r'\*/').sub('\n',code)
code = re.compile(r'/\*').sub('//',code)
code = re.compile(r'^[ \t]*').sub('',code)
code = re.compile(r'//([^\n]*)\n').sub(r'',code)
newcode = ''
indents = 0
for line in code.split('\n'):
indents -= len(re.compile(r'\}').findall(line))
for i in xrange(0,indents):
newcode += ' '
indents += len(re.compile(r'\{').findall(line))
newcode += line+'\n'
return newcode
def printKernel(code):
'''
This prints out a kernel source with line numbers
'''
code = format(code)
code = code.split('\n')
labeldigits = ceil(log(len(code))/log(10))
formatstring = "%0"+str(labeldigits)+"d %s"
for i,line in enumerate(code):
print(formatstring%(i+2,line))
##############################################################################
# GPU function generting metafunctions
##############################################################################
@memoize
def gpubin(fun):
'''This is a small wrapper to simplify calling binary r = a op b kernels. It automates creation of the result array'''
def ll(a,b):
r=gpuarray.empty_like(a)
fun(a,b,r)
return r
return ll
gpuscalar=gpubin
@memoize
def gpumap(exp):
'''
This is a small wrapper to simplify creation of b[i] = f(a[i]) map
kernels. The map function is passed in as a string representing a CUDA
expression. The dollar sign $ should denote the argument variable. A
return array is automatically constructed. For example, `gpumap('$')`
creates a clone or idenitiy kernel, so `A=gpumap('$')(B)` will assign a
copy of B to A. As a nontrivial example, a nonlinear map might function
could be created as `gpumap('1/(1+exp(-$))')`
'''
exp = "z[i]="+expsub(exp)
map_kern = lambda:ElementwiseKernel("float *x, float *z",exp,"map_kern")
def f(v):
r=gpuarray.empty_like(v)
map_kern()(v,r)
return r
return f
@memoize
def gpuintmap(exp):
'''This is the same thing as gpumap except for integer datatypes'''
exp = "z[i]="+expsub(exp)
map_kern = lambda:ElementwiseKernel("int *x, int *z",exp,"map_kern")
def f(v):
r=gpuarray.empty_like(v)
map_kern()(v,r)
return r
return f
expsub = lambda exp:re.compile(r'\$').sub(r'x[i]',exp)
@memoize
def gpumapeq(exp):
'''This is a small wrapper to simplify creation of a[i] = f(a[i]) map
kernels. The map function is passed in as a string representing a CUDA
expression. The dollar sign $ should denote the argument variable. The
result is assigned into the original array, so no new memory is
allocated. For example, gpumap('$')
creates a clone or idenitiy kernel, so A = gpumap('$')(B) will assign a
copy of B to A. As a nontrivial example, a nonlinear map might function
could be created as gpumap('1/(1+exp(-$))')'''
exp = expsub("$="+exp)
map_kern = lambda:ElementwiseKernel("float *x",exp,"map_kern")
def f(v):
map_kern()(v)
return v
return f
"""
@memoize
def gpuparametermap(exp):
'''Similar to gpumap, except that the resulting obect accepts an
additional parameter list. I had to do this because I found I was
implementing maps with parameters, like log(x+c) with the parameter hard
compiled in, which was somewhat inefficient. At this point you may be
wondering why I'm not just using ElementWiseKernel. Anyway,
gpuparametermap(expression) returns a curried function that first
accepts a parameter list, then the data. The map expession should
indic'''
exp = expsub("$="+exp)
print(exp)
map_kern = lambda:ElementwiseKernel("float *x",exp,"map_kern")
def f(v):
map_kern()(v)
return v
return f
"""
@memoize
def gpubinaryeq(exp):
'''
This wrapper simplified the creation of kernels executing operators
like `{'+=','-=','*=','/='}`. That is, binary operators that assign the
result to the left operator. This is to suppliment the functionality of
PyCUDA GPUArrays, which support binary operations but always allocate a
new array to hold the result. This wrapper allows you to efficiently
execute binary operations that assign the result to one of the argument
arrays. For example, implement the GPU equivalent of `+=` as
`gpubinaryeq('$x+$y')(x,y)`. The result will automatically be assigned to
the first argument, x.
'''
exp = "$x="+exp
exp = (lambda exp:re.compile(r'\$x').sub(r'x[i]',exp))(exp)
exp = (lambda exp:re.compile(r'\$y').sub(r'y[i]',exp))(exp)
map_kern = lambda:ElementwiseKernel("float *x, float *y",exp,"map_kern")
def f(v,w):
map_kern()(v,w)
return v
return f
def guessGPUType(arg):
'''At the moment, this returns numpy.float32 for Python floats and
numpy.int32 for python integers, and is otherwise undefined'''
if arg.__class__==float:
return np.float32
elif arg.__class__==int:
return np.int32
else:
return lambda x:x
toGPUType = lambda arg:guessGPUType(arg)(arg)
'''A little wrapper to auto-cast floats/ints to respective numpy datatypes
for use on the GPU. This functionality probably exists elsewhere'''
@memoize
def ezkern(header, code, other=None):
'''
This is my easy kernel wrapper. This function accepts a header ( the
list of arguments ), a body ( the core of the loop ), and optionally
a block of helper function code. The core loop should reference "tid" as
the thread index variable. The distribution of threads on the GPU is
automatically managed.
'''
source = """
__global__ void fun(%(header)s, int ezkernelements, int ezkernstride) {
const int istart = (blockIdx.x*blockDim.x+blockIdx.y)*blockDim.y+threadIdx.x;
for (int tid=istart; tid<ezkernelements; tid+=ezkernstride) {
%(code)s;
}
}"""%{'header':header, 'code':code}
if other!=None:
source = other+source
printKernel(source)
myModule = SourceModule(source)
mykernel = myModule.get_function('fun')
estimateThreadsPerBlock(myModule)
@memoize
def init(n_units):
blocks = estimateBlocks(myModule,n_units)
myblock = (myModule.threads_per_block,1,1)
mygrid = (myModule.blocks,1)
otherargs = [np.int32(n_units),np.int32(myModule.threads_per_block*myModule.blocks)]
otherkwargs = {'block':myblock, 'grid':mygrid}
def execkern(*args):
a=cmap(toGPUType)(list(args))
a.extend(otherargs)
mykernel(*tuple(a),**otherkwargs)
return
return execkern
return init
kernel = ezkern
gpupointer = lambda gpuarr:gpuarr.gpudata
'''Returns the starting memory location of a GPUArray'''
cpu = lambda v:v.get()
'''Casts a gpu array to respective numpy array type'''
gpufloat = lambda v:gpuarray.to_gpu((np.array(v)).astype(np.float32))
'''Casts a python list to a float array on the gpu'''
gpufloatmat= lambda M:gpu(flat(M))
'''Moves a python list of lists of floats to a GPU row major packed integer matric simply by flattening the python datastructure and copying'''
gpufloatred= lambda fun:lambda v:float((fun(v,np.float32)).get())
'''Wraps a GPUArray reduction function into a succint form operating on float arrays'''
gpuint = lambda M:gpuarray.to_gpu(np.array(M).astype(np.int32))
'''Casts a python list to an integer array on the GPU'''
gpuintmat = lambda M:gpuint(flat(M))
'''Moves a python list of lists of integers to a GPU row major packed integer matric simply by flattening the python datastructure and copying'''
gpuintred = lambda fun:lambda v:float((fun(v,np.int32)).get())
'''Wraps a GPUArray reduction function into a succint form operating on int arrays'''
| michaelerule/neurotools | gpu/cu/function.py | Python | gpl-3.0 | 10,277 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v10.resources.types import (
campaign_bid_modifier as gagr_campaign_bid_modifier,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.services",
marshal="google.ads.googleads.v10",
manifest={
"MutateCampaignBidModifiersRequest",
"CampaignBidModifierOperation",
"MutateCampaignBidModifiersResponse",
"MutateCampaignBidModifierResult",
},
)
class MutateCampaignBidModifiersRequest(proto.Message):
r"""Request message for
[CampaignBidModifierService.MutateCampaignBidModifiers][google.ads.googleads.v10.services.CampaignBidModifierService.MutateCampaignBidModifiers].
Attributes:
customer_id (str):
Required. ID of the customer whose campaign
bid modifiers are being modified.
operations (Sequence[google.ads.googleads.v10.services.types.CampaignBidModifierOperation]):
Required. The list of operations to perform
on individual campaign bid modifiers.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CampaignBidModifierOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CampaignBidModifierOperation(proto.Message):
r"""A single operation (create, remove, update) on a campaign bid
modifier.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v10.resources.types.CampaignBidModifier):
Create operation: No resource name is
expected for the new campaign bid modifier.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v10.resources.types.CampaignBidModifier):
Update operation: The campaign bid modifier
is expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed campaign
bid modifier is expected, in this format:
``customers/{customer_id}/CampaignBidModifiers/{campaign_id}~{criterion_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_campaign_bid_modifier.CampaignBidModifier,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_campaign_bid_modifier.CampaignBidModifier,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateCampaignBidModifiersResponse(proto.Message):
r"""Response message for campaign bid modifiers mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v10.services.types.MutateCampaignBidModifierResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCampaignBidModifierResult",
)
class MutateCampaignBidModifierResult(proto.Message):
r"""The result for the criterion mutate.
Attributes:
resource_name (str):
Returned for successful operations.
campaign_bid_modifier (google.ads.googleads.v10.resources.types.CampaignBidModifier):
The mutated campaign bid modifier with only mutable fields
after mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign_bid_modifier = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_campaign_bid_modifier.CampaignBidModifier,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v10/services/types/campaign_bid_modifier_service.py | Python | apache-2.0 | 6,664 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.services.types import conversion_upload_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
ConversionUploadServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ConversionUploadServiceGrpcTransport
class ConversionUploadServiceClientMeta(type):
"""Metaclass for the ConversionUploadService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionUploadServiceTransport]]
_transport_registry["grpc"] = ConversionUploadServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConversionUploadServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConversionUploadServiceClient(
metaclass=ConversionUploadServiceClientMeta
):
"""Service to upload conversions."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionUploadServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionUploadServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversionUploadServiceTransport:
"""Return the transport used by the client instance.
Returns:
ConversionUploadServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def conversion_custom_variable_path(
customer_id: str, conversion_custom_variable_id: str,
) -> str:
"""Return a fully-qualified conversion_custom_variable string."""
return "customers/{customer_id}/conversionCustomVariables/{conversion_custom_variable_id}".format(
customer_id=customer_id,
conversion_custom_variable_id=conversion_custom_variable_id,
)
@staticmethod
def parse_conversion_custom_variable_path(path: str) -> Dict[str, str]:
"""Parse a conversion_custom_variable path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/conversionCustomVariables/(?P<conversion_custom_variable_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ConversionUploadServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the conversion upload service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversionUploadServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConversionUploadServiceTransport):
# transport is a ConversionUploadServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ConversionUploadServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def upload_click_conversions(
self,
request: conversion_upload_service.UploadClickConversionsRequest = None,
*,
customer_id: str = None,
conversions: Sequence[conversion_upload_service.ClickConversion] = None,
partial_failure: bool = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_upload_service.UploadClickConversionsResponse:
r"""Processes the given click conversions.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ConversionUploadError <>`__
`HeaderError <>`__ `InternalError <>`__
`PartialFailureError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.UploadClickConversionsRequest`):
The request object. Request message for
[ConversionUploadService.UploadClickConversions][google.ads.googleads.v8.services.ConversionUploadService.UploadClickConversions].
customer_id (:class:`str`):
Required. The ID of the customer
performing the upload.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversions (:class:`Sequence[google.ads.googleads.v8.services.types.ClickConversion]`):
Required. The conversions that are
being uploaded.
This corresponds to the ``conversions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
partial_failure (:class:`bool`):
Required. If true, successful
operations will be carried out and
invalid operations will return errors.
If false, all operations will be carried
out in one transaction if and only if
they are all valid. This should always
be set to true.
See
https://developers.google.com/google-
ads/api/docs/best-practices/partial-
failures for more information about
partial failure.
This corresponds to the ``partial_failure`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.UploadClickConversionsResponse:
Response message for
[ConversionUploadService.UploadClickConversions][google.ads.googleads.v8.services.ConversionUploadService.UploadClickConversions].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any(
[customer_id, conversions, partial_failure]
):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_upload_service.UploadClickConversionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversion_upload_service.UploadClickConversionsRequest
):
request = conversion_upload_service.UploadClickConversionsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if conversions is not None:
request.conversions = conversions
if partial_failure is not None:
request.partial_failure = partial_failure
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.upload_click_conversions
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def upload_call_conversions(
self,
request: conversion_upload_service.UploadCallConversionsRequest = None,
*,
customer_id: str = None,
conversions: Sequence[conversion_upload_service.CallConversion] = None,
partial_failure: bool = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_upload_service.UploadCallConversionsResponse:
r"""Processes the given call conversions.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `PartialFailureError <>`__
`QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.UploadCallConversionsRequest`):
The request object. Request message for
[ConversionUploadService.UploadCallConversions][google.ads.googleads.v8.services.ConversionUploadService.UploadCallConversions].
customer_id (:class:`str`):
Required. The ID of the customer
performing the upload.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversions (:class:`Sequence[google.ads.googleads.v8.services.types.CallConversion]`):
Required. The conversions that are
being uploaded.
This corresponds to the ``conversions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
partial_failure (:class:`bool`):
Required. If true, successful
operations will be carried out and
invalid operations will return errors.
If false, all operations will be carried
out in one transaction if and only if
they are all valid. This should always
be set to true.
See
https://developers.google.com/google-
ads/api/docs/best-practices/partial-
failures for more information about
partial failure.
This corresponds to the ``partial_failure`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.UploadCallConversionsResponse:
Response message for
[ConversionUploadService.UploadCallConversions][google.ads.googleads.v8.services.ConversionUploadService.UploadCallConversions].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any(
[customer_id, conversions, partial_failure]
):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_upload_service.UploadCallConversionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversion_upload_service.UploadCallConversionsRequest
):
request = conversion_upload_service.UploadCallConversionsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if conversions is not None:
request.conversions = conversions
if partial_failure is not None:
request.partial_failure = partial_failure
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.upload_call_conversions
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ConversionUploadServiceClient",)
| googleads/google-ads-python | google/ads/googleads/v8/services/services/conversion_upload_service/client.py | Python | apache-2.0 | 25,206 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskSchedulingError(Model):
"""Information about an error when scheduling a task.
:param category: The category of the task scheduling error. Possible
values include: 'userError', 'serverError', 'unmapped'
:type category: str or :class:`SchedulingErrorCategory
<azure.batch.models.SchedulingErrorCategory>`
:param code: An identifier for the task scheduling error. Codes are
invariant and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the task scheduling error, intended
to be suitable for display in a user interface.
:type message: str
:param details: The list of additional error details related to the
scheduling error.
:type details: list of :class:`NameValuePair
<azure.batch.models.NameValuePair>`
"""
_validation = {
'category': {'required': True},
}
_attribute_map = {
'category': {'key': 'category', 'type': 'SchedulingErrorCategory'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[NameValuePair]'},
}
def __init__(self, category, code=None, message=None, details=None):
self.category = category
self.code = code
self.message = message
self.details = details
| rjschwei/azure-sdk-for-python | azure-batch/azure/batch/models/task_scheduling_error.py | Python | mit | 1,889 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import base
from keystoneclient.auth.identity import generic
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
BaseIdentityPlugin = base.BaseIdentityPlugin
V2Password = v2.Password
V2Token = v2.Token
V3Password = v3.Password
V3Token = v3.Token
Password = generic.Password
Token = generic.Token
__all__ = ['BaseIdentityPlugin',
'Password',
'Token',
'V2Password',
'V2Token',
'V3Password',
'V3Token']
| sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/keystoneclient/auth/identity/__init__.py | Python | mit | 1,089 |
import os
import sys
import urllib
import time
import logging
import pytest
import mock
# Config
if sys.platform == "win32":
PHANTOMJS_PATH = "tools/phantomjs/bin/phantomjs.exe"
else:
PHANTOMJS_PATH = "phantomjs"
SITE_URL = "http://127.0.0.1:43110"
# Imports relative to src dir
sys.path.append(
os.path.abspath(os.path.dirname(__file__) + "/..")
)
from Config import config
config.argv = ["none"] # Dont pass any argv to config parser
config.parse()
config.data_dir = "src/Test/testdata" # Use test data for unittests
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
from Site import Site
from User import UserManager
from File import FileServer
from Connection import ConnectionServer
from Crypt import CryptConnection
import gevent
from gevent import monkey
monkey.patch_all(thread=False)
@pytest.fixture(scope="session")
def resetSettings(request):
os.chdir(os.path.abspath(os.path.dirname(__file__) + "/../..")) # Set working dir
open("%s/sites.json" % config.data_dir, "w").write("{}")
open("%s/users.json" % config.data_dir, "w").write("""
{
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
def cleanup():
os.unlink("%s/sites.json" % config.data_dir)
os.unlink("%s/users.json" % config.data_dir)
request.addfinalizer(cleanup)
@pytest.fixture(scope="session")
def resetTempSettings(request):
data_dir_temp = config.data_dir + "-temp"
if not os.path.isdir(data_dir_temp):
os.mkdir(data_dir_temp)
open("%s/sites.json" % data_dir_temp, "w").write("{}")
open("%s/users.json" % data_dir_temp, "w").write("""
{
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
def cleanup():
os.unlink("%s/sites.json" % data_dir_temp)
os.unlink("%s/users.json" % data_dir_temp)
request.addfinalizer(cleanup)
@pytest.fixture(scope="session")
def site():
site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
return site
@pytest.fixture()
def site_temp(request):
with mock.patch("Config.config.data_dir", config.data_dir+"-temp"):
site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
def cleanup():
site_temp.storage.deleteFiles()
request.addfinalizer(cleanup)
return site_temp
@pytest.fixture(scope="session")
def user():
user = UserManager.user_manager.get()
user.sites = {} # Reset user data
return user
@pytest.fixture(scope="session")
def browser():
try:
from selenium import webdriver
browser = webdriver.PhantomJS(executable_path=PHANTOMJS_PATH, service_log_path=os.path.devnull)
browser.set_window_size(1400, 1000)
except Exception, err:
raise pytest.skip("Test requires selenium + phantomjs: %s" % err)
return browser
@pytest.fixture(scope="session")
def site_url():
try:
urllib.urlopen(SITE_URL).read()
except Exception, err:
raise pytest.skip("Test requires zeronet client running: %s" % err)
return SITE_URL
@pytest.fixture(scope="session")
def file_server(request):
CryptConnection.manager.loadCerts() # Load and create certs
request.addfinalizer(CryptConnection.manager.removeCerts) # Remove cert files after end
file_server = FileServer("127.0.0.1", 1544)
gevent.spawn(lambda: ConnectionServer.start(file_server))
time.sleep(0) # Port opening
assert file_server.running
def stop():
file_server.stop()
request.addfinalizer(stop)
return file_server
| ak-67/ZeroNet | src/Test/conftest.py | Python | gpl-2.0 | 3,844 |
# -*- coding: utf-8 -*-
ADMIN_MAPPING = {
'admin_user_suspend': {
'resource': 'admin/users/{id}/suspend',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1suspend%2Fput'),
'methods': ['PUT'],
},
'admin_user_unsuspend': {
'resource': 'admin/users/{id}/unsuspend',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1unsuspend%2Fput'),
'methods': ['PUT'],
},
'admin_user_block': {
'resource': 'admin/users/{id}/block',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1block%2Fput'),
'methods': ['PUT'],
},
'admin_user_unblock': {
'resource': 'admin/users/{id}/unblock',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1unblock%2Fput'),
'methods': ['PUT'],
},
'admin_user_activate': {
'resource': 'admin/users/{id}/activate',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1activate%2Fput'),
'methods': ['PUT'],
},
'admin_user_anonymize': {
'resource': 'admin/users/{id}/anonymize',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1anonymize%2Fput'),
'methods': ['PUT'],
},
'admin_api_key_generate': {
'resource': 'admin/users/{id}/generate_api_key',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1generate_api_key%2Fpost'),
'methods': ['POST'],
},
'admin_group_assign': {
'resource': 'admin/users/{id}/groups',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1groups%2Fpost'),
'methods': ['POST'],
},
'admin_group_remove': {
'resource': 'admin/users/{id}/groups/{group_id}',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1users~1%7Bid%7D~1groups~1%7Bgroup_id%7D%2Fdelete'),
'methods': ['DELETE'],
},
'admin_group_create': {
'resource': 'admin/groups',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1groups%2Fpost'),
'methods': ['POST'],
},
'admin_group_delete': {
'resource': 'admin/groups/{group_id}.json',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1groups~1%7Bgroup_id%7D.json%2Fdelete'),
'methods': ['DELETE'],
},
'admin_group_members_list': {
'resource': 'groups/{group_name}/members.json',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1groups~1%7Bgroup_name%7D~1members.json%2Fget'),
'methods': ['GET'],
},
'admin_group_members_add': {
'resource': 'groups/{group_id}/members.json',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1groups~1%7Bgroup_id%7D~1members.json%2Fput'),
'methods': ['PUT'],
},
'admin_group_members_delete': {
'resource': 'groups/{group_id}/members.json',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1groups~1%7Bgroup_id%7D~1members.json%2Fdelete'),
'methods': ['DELETE'],
},
'admin_site_settings_show': {
'resource': 'admin/site_settings.json',
'docs': ('http://docs.discourse.org/#tag/'
'Admin%2Fpaths%2F~1admin~1site_settings.json%2Fget'),
'methods': ['GET'],
},
}
| 0xc0ffeec0de/tapioca-discourse | tapioca_discourse/resource_mapping/admin.py | Python | mit | 3,741 |
from couchdeploy import coudeploy
| torque59/nosqlpot | couchpot/__init__.py | Python | gpl-2.0 | 34 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import numpy as np
import tensorflow as tf
import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
#flags.DEFINE_string("data_path", '/home/comp/csshshi/data/tensorflow/lstm_data/data', "data_path")
flags.DEFINE_string("data_path", os.environ['HOME']+'/data/tensorflow/lstm_data/data', "data_path")
flags.DEFINE_integer("batchsize", 32, "mini-batch size")
flags.DEFINE_integer("hiddensize", 256, "hidden units number of each lstm layer")
flags.DEFINE_integer("numlayer", 2, "number of lstm layers")
flags.DEFINE_integer("seqlen", 32, "len of sample")
flags.DEFINE_string("device", '0', "select device id")
flags.DEFINE_integer("iters", 1000, "iterations for profiling")
flags.DEFINE_integer("max_max_epoch", 20, "max epochs for training")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
#lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * config.num_layers)
# print config
print("batch_size: ", config.batch_size)
print("num_steps: ", config.num_steps)
print("hidden_size: ", config.hidden_size)
print("num_layers: ", config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
#loss = tf.nn.seq2seq.sequence_loss_by_example(
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 0.1
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 1 # 4
max_max_epoch = 20
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
device = 0
iters = 1000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 1
max_max_epoch = 1
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
device = 0
iters = 1000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
device = 0
iters = 1000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
device = 0
iters = 1000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
print('m.initial_state:', m.initial_state)
state = session.run(m.initial_state) #.eval()
step = 0
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
print("Time for one epoch, %d iters: %.4f seconds" %
(step+1, time.time() - start_time))
average_batch_time = (time.time() - start_time)/(step+1)
print("Average time per minibatch in this epoch: %.4f seconds" % average_batch_time)
return np.exp(costs / iters), average_batch_time
def get_config():
config = None
if FLAGS.model == "small":
config = SmallConfig()
elif FLAGS.model == "medium":
config = MediumConfig()
elif FLAGS.model == "large":
config = LargeConfig()
elif FLAGS.model == "test":
config = TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
config.batch_size = FLAGS.batchsize
config.hidden_size = FLAGS.hiddensize
config.num_layers = FLAGS.numlayer
config.num_steps = FLAGS.seqlen
config.device = FLAGS.device
config.iters = FLAGS.iters
config.max_max_epoch = FLAGS.max_max_epoch
return config
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
if config.device == '-1':
tf_dev = '/cpu:0'
else:
tf_dev = '/gpu:' + config.device
print(tf_dev)
tconfig = tf.ConfigProto(allow_soft_placement=True)
if tf_dev.find('cpu') >= 0: # cpu version
num_threads = os.getenv('OMP_NUM_THREADS', 1)
tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
#mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.global_variables_initializer().run()
total_average_batch_time = 0.0
epochs_info = []
for i in range(config.max_max_epoch):
#lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
#m.assign_lr(session, config.learning_rate * lr_decay)
m.assign_lr(session, config.learning_rate)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity, average_batch_time = run_epoch(session, m, train_data, m.train_op, verbose=True)
total_average_batch_time += average_batch_time
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
if i % 2 == 0:
epochs_info.append('%d:_:%.3f'%(i, train_perplexity))
# valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
# print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
print("average_batch_time: %.6f" % (total_average_batch_time/int(config.max_max_epoch)))
print('epoch_info:'+','.join(epochs_info))
test_perplexity, test_average_batch_time = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
| hclhkbu/dlbench | tools/tensorflow/rnn/lstm/lstm.py | Python | mit | 12,444 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
import unittest
import glob
import requests
from mock import Mock
from mock import patch
from mock import ANY
from sanji.connection.mockup import Mockup
from sanji.message import Message
try:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../")
from index import Index
from status import Status as status
except ImportError as e:
print "Please check the python PATH for import test module. (%s)" \
% __file__
print (e)
exit(1)
class MockMessage(object):
pass
class TestIndexClass(unittest.TestCase):
@patch.object(status, "set_hostname")
def setUp(self, mock_set_hostname):
self.root_path = os.path.abspath(os.path.dirname(__file__) + "/../")
self.jsons = glob.glob(os.path.join(self.root_path, "data/*.json"))
self.backups = glob.glob(os.path.join(self.root_path, "data/*.backup"))
for file in self.jsons + self.backups:
os.unlink(file)
self.index = Index(connection=Mockup())
def tearDown(self):
files = glob.glob(os.path.join(self.root_path, "data/*.json")) + \
glob.glob(os.path.join(self.root_path, "data/*.backup"))
for prevConfig in files:
try:
os.unlink(prevConfig)
except:
pass
self.index.stop()
self.index = None
@patch.object(status, "get_disks")
@patch.object(status, "get_memory")
@patch.object(status, "get_memory_usage")
@patch.object(status, "get_cpu_usage")
@patch.object(status, "get_uptime")
@patch.object(status, "get_product_version")
@patch.object(status, "get_hostname")
def test__get_status(
self, mock_hostname, mock_version, mock_uptime,
mock_cpu_usage, mock_memory_usage, mock_memory,
mock_disks):
"""test__get_status: Get system status"""
mock_hostname.return_value = "Moxa"
mock_version.return_value = "1.1"
mock_uptime.return_value = 181499
mock_cpu_usage.return_value = 98.7
mock_memory_usage.return_value = 16.8
mock_memory.return_value = 257286144
mock_disks.return_value = []
resp = Mock()
mock_message = MockMessage()
mock_message.query = {}
self.index.get_status(message=mock_message, response=resp, test=True)
resp.assert_called_once_with(
data={
"hostname": mock_hostname.return_value,
"version": mock_version.return_value,
"uptimeSec": mock_uptime.return_value,
"cpuUsage": mock_cpu_usage.return_value,
"memoryUsage": mock_memory_usage.return_value,
"memory": mock_memory.return_value,
"disks": mock_disks.return_value
}
)
@patch.object(status, "get_disks")
@patch.object(status, "get_memory")
@patch.object(status, "get_memory_usage")
@patch.object(status, "get_cpu_usage")
@patch.object(status, "get_uptime")
@patch.object(status, "get_product_version")
@patch.object(status, "get_hostname")
def test__get_status_querystring(
self, mock_hostname, mock_version, mock_uptime,
mock_cpu_usage, mock_memory_usage, mock_memory,
mock_disks):
"""test__get_status: Get system status"""
mock_hostname.return_value = "Moxa"
mock_version.return_value = "1.1"
mock_uptime.return_value = 181499
mock_cpu_usage.return_value = 98.7
mock_memory_usage.return_value = 16.8
mock_memory.return_value = 257286144
mock_disks.return_value = []
resp = Mock()
mock_message = MockMessage()
mock_message.query = {
"fields": "cpuUsage,disks"
}
self.index.get_status(message=mock_message, response=resp, test=True)
resp.assert_called_once_with(
data={
"cpuUsage": mock_cpu_usage.return_value,
"disks": mock_disks.return_value
}
)
self.assertFalse(mock_hostname.called)
self.assertFalse(mock_version.called)
self.assertFalse(mock_uptime.called)
self.assertFalse(mock_memory_usage.called)
self.assertFalse(mock_memory.called)
@patch.object(status, "set_hostname")
def test__put_status(self, mock_set_hostname):
"""test__put_status: Update hostname"""
resp = Mock()
message = Message({
"data": {
"hostname": "test"
}
})
self.index.put_status(message=message, response=resp, test=True)
resp.assert_called_once_with(data=message.data)
@patch.object(status, "get_net_interfaces")
def test__get_net_interfaces(self, mock_netifaces):
"""test__get_net_interfaces: Get network interface list"""
mock_netifaces.return_value = ["eth0", "eth1", "wwan0"]
resp = Mock()
self.index.get_net_interface(message=None, response=resp, test=True)
resp.assert_called_once_with(data=mock_netifaces.return_value)
@patch("status.tar_syslog_files")
@patch("index.requests.post")
@patch("index.sh")
def test_post_syslog(
self, mock_sh, mock_post_requests, mock_tar_syslog_files):
"""
post
"data": {
"hostname": "test"
}
"""
message = Message({
"data": {
"headers": {
"xxx": "yyy"
},
"url": "https://localhost"
}, "query": {}, "param": {}})
download_url = "https://localhost/api/v1/download/123456789"
filename = "xxx.tar.gz"
mock_tar_syslog_files.return_value = filename
mock_post_result = Mock()
mock_post_requests.return_value = mock_post_result
mock_post_result.status_code = requests.codes.ok
mock_post_result.json.return_value = {
"url": download_url
}
def resp(code=200, data=None):
self.assertEqual(200, code)
self.assertEqual(download_url, data["url"])
with patch("__builtin__.open"):
self.index.post_syslog(message=message, response=resp, test=True)
mock_tar_syslog_files.assert_called_once_with(ANY)
self.assertTrue(mock_sh.rm.called)
mock_post_requests.assert_called_once_with(
message.data["url"],
files={filename: ANY},
headers=message.data["headers"],
verify=False
)
if __name__ == "__main__":
unittest.main()
| Sanji-IO/sanji-status | tests/test_index.py | Python | gpl-2.0 | 6,649 |
from .workout import Workout
from .duration import Time
from .duration import Distance | claha/suunto | suunto/__init__.py | Python | mit | 86 |
from django.core.files.base import File
from django.db.models.fields.files import (
FieldFile,
ImageFieldFile,
ImageFileDescriptor
)
from .mixins import VersatileImageMixIn
class VersatileImageFieldFile(VersatileImageMixIn, ImageFieldFile):
def __getstate__(self):
# VersatileImageFieldFile needs access to its associated model field
# and an instance it's attached to in order to work properly, but the
# only necessary data to be pickled is the file's name itself.
# Everything else will be restored later, by
# VersatileImageFileDescriptor below.
state = super(VersatileImageFieldFile, self).__getstate__()
state['_create_on_demand'] = self._create_on_demand
return state
class VersatileImageFileDescriptor(ImageFileDescriptor):
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(VersatileImageFileDescriptor, self).__set__(instance, value)
# Updating ppoi_field on attribute set
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
self.field.update_ppoi_field(instance)
def __get__(self, instance=None, owner=None):
if instance is None:
return self
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the VersatileImageFieldFile API so that users can easily do things
# like instance.file.path & have that delegated to the file storage
# engine. Easy enough if we're strict about assignment in __set__, but
# if you peek below you can see that we're not. So depending on the
# current value of the field we have to dynamically construct some
# sort of "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# Call the placeholder procecess method on VersatileImageField.
# (This was called inside the VersatileImageField __init__ before) Fixes #28
self.field.process_placeholder_image()
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields and their subclasses, like this class;
# it's also conceivable that user subclasses might also want to
# subclass the attribute class]. This object understands how to convert
# a path to a file, and also how to handle None.
if isinstance(file, str) or file is None:
attr = self.field.attr_class(
instance=instance,
field=self.field,
name=file
)
# Check if this field has a ppoi_field assigned
if attr.field.ppoi_field:
# Pulling the current value of the ppoi_field...
ppoi = instance.__dict__[attr.field.ppoi_field]
# ...and assigning it to VersatileImageField instance
attr.ppoi = ppoi
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
if file.field.ppoi_field:
ppoi = instance.__dict__[file.field.ppoi_field]
file.ppoi = ppoi
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
| WGBH/django-versatileimagefield | versatileimagefield/files.py | Python | mit | 4,601 |
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2014, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
"""Periodic progress logging for long-running operations.
Example usage:
import logging
LOG = logging.getLogger(__name__)
import time
import progresslog
mylist = range(100)
progress = ProgressLogger(
LOG,
prefix="progress",
total=len(mylist),
interval=1)
for i in mylist:
progress.increment()
time.sleep(0.2)
"""
import datetime
import logging
class ProgressLogger(object):
"""Periodic progress logging for long-running operations."""
def __init__(
self,
logger,
level=logging.INFO,
prefix='',
total=None,
interval=60):
self.logger = logger
self.level = level
self.prefix = prefix
self.total = total
self.interval = datetime.timedelta(seconds=interval)
self.pos = 0
self.start_time = datetime.datetime.now()
self.last_time = self.start_time
def increment(self, by=1):
"""Increment internal position and emit progress log if needed."""
self.pos += by
now = datetime.datetime.now()
if now - self.last_time >= self.interval:
self.last_time = now
progress = '{} of {}'.format(
self.pos,
self.total if self.total else '?')
elapsed = now - self.start_time
if self.total:
per = elapsed / self.pos
remaining = per * (self.total - self.pos)
msg = '{}, elapsed={}, remaining={}'.format(
progress,
str(elapsed).split('.', 1)[0],
str(remaining).split('.', 1)[0])
else:
msg = '{}, elapsed={}'.format(
progress,
str(elapsed).split('.', 1)[0])
if self.prefix:
msg = '{}: {}'.format(self.prefix, msg)
self.logger.log(self.level, msg)
| krull/docker-zenoss4 | init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.Microsoft.Windows-2.6.9.egg/ZenPacks/zenoss/Microsoft/Windows/progresslog.py | Python | gpl-3.0 | 2,355 |
"""Generic linux daemon base class for python 3.x."""
import sys, os, time, atexit, signal
class Daemon:
"""A generic daemon class.
Usage: subclass the daemon class and override the run() method."""
def __init__(self, pidfile): self.pidfile = pidfile
def daemonize(self):
"""Deamonize class. UNIX double fork mechanism."""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #1 failed: {0}\n'.format(err))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #2 failed: {0}\n'.format(err))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile,'w+') as f:
f.write(pid + '\n')
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""Start the daemon."""
# Check for a pidfile to see if the daemon already runs
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if pid:
message = "pidfile {0} already exist. " + \
"Daemon already running?\n"
sys.stderr.write(message.format(self.pidfile))
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""Stop the daemon."""
# Get the pid from the pidfile
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if not pid:
message = "pidfile {0} does not exist. " + \
"Daemon not running?\n"
sys.stderr.write(message.format(self.pidfile))
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError as err:
e = str(err.args)
if e.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print (str(err.args))
sys.exit(1)
def restart(self):
"""Restart the daemon."""
self.stop()
self.start()
def run(self):
"""You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by
start() or restart()."""
| mmalter/eudaimonia | src/eudaimon.py | Python | gpl-3.0 | 2,659 |
# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""LogPane Info Toolbar classes."""
from __future__ import annotations
import functools
import logging
from typing import TYPE_CHECKING
from prompt_toolkit.data_structures import Point
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
HSplit,
Window,
WindowAlign,
)
import pw_console.widgets.border
import pw_console.widgets.checkbox
import pw_console.widgets.mouse_handlers
if TYPE_CHECKING:
from pw_console.console_app import ConsoleApp
_LOG = logging.getLogger(__package__)
class QuitDialog(ConditionalContainer):
"""Confirmation quit dialog box."""
DIALOG_HEIGHT = 2
def __init__(self, application: ConsoleApp):
self.application = application
self.show_dialog = False
# Tracks the last focused container, to enable restoring focus after
# closing the dialog.
self.last_focused_pane = None
# Quit keybindings are active when this dialog is in focus
key_bindings = KeyBindings()
@key_bindings.add('y')
@key_bindings.add('c-d')
def _quit(_event: KeyPressEvent) -> None:
"""Close save as bar."""
self.quit_action()
@key_bindings.add('escape')
@key_bindings.add('n')
@key_bindings.add('c-c')
def _cancel(_event: KeyPressEvent) -> None:
"""Close save as bar."""
self.close_dialog()
self.exit_message = 'Quit? y/n '
action_bar_control = FormattedTextControl(
self.get_action_fragments,
show_cursor=True,
focusable=True,
key_bindings=key_bindings,
# Cursor will appear after the exit_message
get_cursor_position=lambda: Point(len(self.exit_message), 0),
)
action_bar_window = Window(content=action_bar_control,
height=QuitDialog.DIALOG_HEIGHT,
align=WindowAlign.LEFT,
dont_extend_width=False)
super().__init__(
pw_console.widgets.border.create_border(
HSplit(
[action_bar_window],
height=QuitDialog.DIALOG_HEIGHT,
style='class:quit-dialog',
),
QuitDialog.DIALOG_HEIGHT,
border_style='class:quit-dialog-border',
left_margin_columns=1,
),
filter=Condition(lambda: self.show_dialog),
)
def focus_self(self):
self.application.layout.focus(self)
def close_dialog(self):
"""Close this dialog box."""
self.show_dialog = False
# Restore original focus if possible.
if self.last_focused_pane:
self.application.layout.focus(self.last_focused_pane)
else:
# Fallback to focusing on the main menu.
self.application.focus_main_menu()
def open_dialog(self):
self.show_dialog = True
self.last_focused_pane = self.application.focused_window()
self.focus_self()
self.application.redraw_ui()
def quit_action(self):
self.application.application.exit()
def get_action_fragments(self):
"""Return FormattedText with action buttons."""
# Mouse handlers
focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.focus_self)
cancel = functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.close_dialog)
quit_action = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self.quit_action)
# Separator should have the focus mouse handler so clicking on any
# whitespace focuses the input field.
separator_text = ('', ' ', focus)
# Default button style
button_style = 'class:toolbar-button-inactive'
fragments = [('', self.exit_message), separator_text]
fragments.append(('', '\n'))
# Cancel button
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
key='n / Ctrl-c',
description='Cancel',
mouse_handler=cancel,
base_style=button_style,
))
# Two space separator
fragments.append(separator_text)
# Save button
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
key='y / Ctrl-d',
description='Quit',
mouse_handler=quit_action,
base_style=button_style,
))
# One space separator
fragments.append(('', ' ', focus))
return fragments
| google/pigweed | pw_console/py/pw_console/quit_dialog.py | Python | apache-2.0 | 5,489 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "corpus_browser.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| myaser/DAPOS_corpus_browser | corpus_browser/manage.py | Python | mit | 257 |
import time
name = input("Enter your name: ")
age = input("Enter your age: ")
repeat = input("How many times would you like to print this message? ")
current_year = time.localtime().tm_year
born_year = current_year - int(age)
for count in range(int(repeat)):
print("\nPrinting message #%d:" % int(count+1), "\tDear %s, you will be 100 years old in %d." % (name, born_year + 100))
| stackingfunctions/practicepython | src/01-character_input.py | Python | mit | 388 |
import pandas
import numpy as np
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0,2,3,4,5,6,7,8,9])
def test_structarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X)
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_recarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X.view(np.recarray))
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_dataframe():
X = np.random.random((10,5))
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(vals, df.values)
np.testing.assert_equal(rnames.tolist(), df.index.tolist())
np.testing.assert_equal(cnames, df.columns.tolist())
def test_patsy_577():
X = np.random.random((10, 2))
df = pandas.DataFrame(X, columns=["var1", "var2"])
from patsy import dmatrix
endog = dmatrix("var1 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
| hlin117/statsmodels | statsmodels/tools/tests/test_data.py | Python | bsd-3-clause | 1,758 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#********************************************************************
# ZYNTHIAN PROJECT: update_envars.py
#
# Update $ZYNTHIAN_CONFIG_DIR/zynthian_envars.sh with the
# file given as first argument
#
# Copyright (C) 2015-2020 Fernando Moyano <[email protected]>
#
#********************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#********************************************************************
import os
import sys
import shutil
sys.path.append(os.environ.get('ZYNTHIAN_UI_DIR',"/zynthian/zynthian-ui"))
import zynconf
#--------------------------------------------------------------------
input_envars_file = sys.argv[1]z
envars_file = "{}/zynthian_envars.sh".format(os.environ.get('ZYNTHIAN_CONFIG_DIR',"/zynthian/config"))
envars_backup_file = "{}/zynthian_envars_backup.sh".format(os.environ.get('ZYNTHIAN_CONFIG_DIR',"/zynthian/config"))
if os.path.isfile(input_envars_file):
try:
print("Loading config input file '{}' ...".format(input_envars_file))
config = zynconf.load_config(False, input_envars_file)
except Exception as e:
print("ERROR: Config input file {} can't be parsed. Check the syntax! => \n{}".format(input_envars_file, e))
try:
print("Saving config backup '{}' ...".format(envars_backup_file))
shutil.copyfile(envars_file, envars_backup_file)
except Exception as e:
print("ERROR: Can't perform a config backup! => \n{}".format(e))
try:
print("Updating config on '{}' ...".format(envars_file))
zynconf.save_config(config, True)
except Exception as e:
print("ERROR: Config can't be updated! => \n{}".format(e))
try:
print("Deleting config input file '{}' ...".format(input_envars_file))
os.remove(input_envars_file)
except Exception as e:
print("ERROR: Input config file can't be removed! => \n{}".format(e))
else:
print("Config input file '{}' doesn't exist.".format(update_envars_file))
#--------------------------------------------------------------------
| zynthian/zynthian-sys | sbin/update_envars.py | Python | gpl-3.0 | 2,536 |
import types
import signal
from PyQt4 import QtGui
from PyQt4 import QtCore
from qt4_gui import _GUI, _PropertiesDialog, _BasicNodeActions
import layouts
from ete_dev import Tree, PhyloTree, ClusterTree
from main import save
from qt4_render import _TreeScene, render, get_tree_img_map, init_tree_style
__all__ = ["show_tree", "render_tree"]
_QApp = None
GUI_TIMEOUT = None
def exit_gui(a,b):
_QApp.exit(0)
def init_scene(t, layout, ts):
global _QApp
ts = init_tree_style(t, ts)
if layout:
ts.layout_fn = layout
if not _QApp:
_QApp = QtGui.QApplication(["ETE"])
scene = _TreeScene()
#ts._scale = None
return scene, ts
def show_tree(t, layout=None, tree_style=None, win_name=None):
""" Interactively shows a tree."""
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.addItem(scene.master_item)
mainapp = _GUI(scene)
if win_name:
mainapp.setObjectName(win_name)
mainapp.show()
mainapp.on_actionFit2tree_triggered()
# Restore Ctrl-C behavior
signal.signal(signal.SIGINT, signal.SIG_DFL)
if GUI_TIMEOUT is not None:
signal.signal(signal.SIGALRM, exit_gui)
signal.alarm(GUI_TIMEOUT)
_QApp.exec_()
def render_tree(t, imgName, w=None, h=None, layout=None,
tree_style = None, header=None, units="px",
dpi=90):
""" Render tree image into a file."""
global _QApp
for nid, n in enumerate(t.traverse("preorder")):
n.add_feature("_nid", nid)
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.master_item.setPos(0,0)
scene.addItem(scene.master_item)
if imgName == "%%inline":
imgmap = save(scene, imgName, w=w, h=h, units=units, dpi=dpi)
else:
x_scale, y_scale = save(scene, imgName, w=w, h=h, units=units, dpi=dpi)
imgmap = get_tree_img_map(n2i, x_scale, y_scale)
_QApp.quit()
_QApp = None
return imgmap
def get_img(t, w=None, h=None, layout=None, tree_style = None,
header=None, units="px", dpi=90):
global _QApp
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.master_item.setPos(0,0)
scene.addItem(scene.master_item)
x_scale, y_scale, imgdata = save(scene, "%%return", w=w, h=h, units=units, dpi=dpi)
_QApp.quit()
_QApp = None
return imgdata, {}
| khughitt/ete | ete_dev/treeview/drawer.py | Python | gpl-3.0 | 2,766 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from nikola.plugin_categories import Task
from nikola import utils
class Sources(Task):
"""Copy page sources into the output."""
name = "render_sources"
def gen_tasks(self):
"""Publish the page sources into the output.
Required keyword arguments:
translations
default_lang
post_pages
output_folder
"""
kw = {
"translations": self.site.config["TRANSLATIONS"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"default_lang": self.site.config["DEFAULT_LANG"],
"show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'],
}
self.site.scan_posts()
yield self.group_task()
if self.site.config['COPY_SOURCES']:
for lang in kw["translations"]:
for post in self.site.timeline:
if not kw["show_untranslated_posts"] and lang not in post.translated_to:
continue
if post.meta('password'):
continue
output_name = os.path.join(
kw['output_folder'], post.destination_path(
lang, post.source_ext(True)))
# do not publish PHP sources
if post.source_ext(True) == post.compiler.extension():
continue
source = post.source_path
if lang != kw["default_lang"]:
source_lang = utils.get_translation_candidate(self.site.config, source, lang)
if os.path.exists(source_lang):
source = source_lang
if os.path.isfile(source):
yield {
'basename': 'render_sources',
'name': os.path.normpath(output_name),
'file_dep': [source],
'targets': [output_name],
'actions': [(utils.copy_file, (source, output_name))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.sources')],
}
| immanetize/nikola | nikola/plugins/task/sources.py | Python | mit | 3,426 |
VERSION = (0, 1, 3)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION)) | arktos65/elasticsearch-tools | lib/__init__.py | Python | apache-2.0 | 86 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import os.path
import pathlib
import platform
import unittest
from .lib.testcase import IntegrationTestCase
if platform.system() != "Windows":
from .lib.linux import LinuxCgroup, is_cgroup_v2_mounted
else:
def is_cgroup_v2_mounted():
return False
class LinuxCgroupTest(IntegrationTestCase):
def test_parse_proc_file(self) -> None:
proc_file_content = (
b"0::/user.slice/user-6986.slice/session-33.scope/init.scope\n"
)
self.assertEqual(
LinuxCgroup._parse_proc_file(proc_file_content),
b"/user.slice/user-6986.slice/session-33.scope/init.scope",
)
def test_parsing_empty_proc_file_fails(self) -> None:
with self.assertRaises(ValueError):
LinuxCgroup._parse_proc_file(b"")
with self.assertRaises(ValueError):
LinuxCgroup._parse_proc_file(b"\n")
def test_parsing_proc_file_with_multiple_cgroups_v1_hierarchies_fails(self) -> None:
proc_file_content = (
b"12:cpuacct:/user.slice/user-2233.slice/session-163872.scope\n"
b"11:freezer:/\n"
b"10:hugetlb:/\n"
b"9:blkio:/user.slice/user-2233.slice/session-163872.scope\n"
b"8:cpuset:/\n"
b"7:pids:/user.slice/user-2233.slice/session-163872.scope\n"
b"6:devices:/user.slice\n"
b"5:memory:/user.slice/user-2233.slice/session-163872.scope\n"
b"4:perf_event:/\n"
b"3:net_cls,net_prio:/\n"
b"2:cpu:/user.slice/user-2233.slice/session-163872.scope\n"
b"1:name=systemd:/user.slice/user-2233.slice/session-163872.scope\n"
)
with self.assertRaises(NotImplementedError):
LinuxCgroup._parse_proc_file(proc_file_content)
def test_cgroup_from_sys_fs_cgroup_path(self) -> None:
path = pathlib.PosixPath("/sys/fs/cgroup/system.slice")
cgroup = LinuxCgroup.from_sys_fs_cgroup_path(path)
self.assertEqual(cgroup.name, b"/system.slice")
def test_sys_fs_cgroup_path(self) -> None:
cgroup = LinuxCgroup(b"/user.slice/user-6986.slice/session-13.scope/init.scope")
self.assertEqual(
cgroup.sys_fs_cgroup_path,
pathlib.PosixPath(
"/sys/fs/cgroup//user.slice/user-6986.slice/session-13.scope/init.scope"
),
)
@unittest.skipIf(
not is_cgroup_v2_mounted(),
"T36934106: Fix EdenFS systemd integration tests for cgroups v1",
)
def test_cgroup_from_current_process_includes_current_process_id(self) -> None:
cgroup = LinuxCgroup.from_current_process()
self.assertIn(os.getpid(), cgroup.query_process_ids())
| facebookexperimental/eden | eden/integration/linux_cgroup_test.py | Python | gpl-2.0 | 2,902 |
from base import Base
import nodes | Derfies/doodads | dynamicInheritance/game/__init__.py | Python | mit | 34 |
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.config._base import Config
from synapse.util.check_dependencies import check_requirements
class RedisConfig(Config):
section = "redis"
def read_config(self, config, **kwargs):
redis_config = config.get("redis") or {}
self.redis_enabled = redis_config.get("enabled", False)
if not self.redis_enabled:
return
check_requirements("redis")
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
self.redis_password = redis_config.get("password")
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
# Configuration for Redis when using workers. This *must* be enabled when
# using workers (unless using old style direct TCP configuration).
#
redis:
# Uncomment the below to enable Redis support.
#
#enabled: true
# Optional host and port to use to connect to redis. Defaults to
# localhost and 6379
#
#host: localhost
#port: 6379
# Optional password if configured on the Redis instance
#
#password: <secret_password>
"""
| matrix-org/synapse | synapse/config/redis.py | Python | apache-2.0 | 1,857 |
#!/usr/bin/env python
import sys
import rospy
from geometry_msgs.msg import Point, Pose
class left_data:
"""Pose data for left arm"""
def __init__(self):
self.targetPose0 = Pose()
"""Pose 0 left arm"""
self.targetPose1 = Pose()
"""Pose 1 left arm"""
self.targetPose2 = Pose()
"""Pose 2 left arm"""
self.targetPose3 = Pose()
"""Pose 3 left arm"""
self.targetPose4 = Pose()
"""Pose 4 left arm"""
self.targetPose0.position.x= 0.573963693952;
self.targetPose0.position.y= 0.19098049632;
self.targetPose0.position.z= 0.212310502061;
self.targetPose0.orientation.x = 0.132155113873;
self.targetPose0.orientation.y = 0.990777211955;
self.targetPose0.orientation.z =0.0188116079957;
self.targetPose0.orientation.w = 0.0232737094724;
self.targetPose1.position.x= 0.807391932272;
self.targetPose1.position.y= -0.102345610774;
self.targetPose1.position.z= 0.47878228044;
self.targetPose1.orientation.x = 0.225691462877;
self.targetPose1.orientation.y = 0.662760439831;
self.targetPose1.orientation.z = -0.325454321835;
self.targetPose1.orientation.w = 0.635524545064;
self.targetPose2.position.x= 0.69272842411;
self.targetPose2.position.y= 0.907005233803;
self.targetPose2.position.z= 0.589525777499;
self.targetPose2.orientation.x= -0.427967175878;
self.targetPose2.orientation.y= 0.766648144394;
self.targetPose2.orientation.z= 0.128392891026;
self.targetPose2.orientation.w= 0.461096502483;
self.targetPose3.position.x= 0.937253310239;
self.targetPose3.position.y= 0.511237168002;
self.targetPose3.position.z= 0.754267763991;
self.targetPose3.orientation.x= 0.0247381559907;
self.targetPose3.orientation.y= 0.419280690616;
self.targetPose3.orientation.z= 0.00864617134984;
self.targetPose3.orientation.w= 0.907478357778;
self.targetPose4.position.x= 0.907019195816;
self.targetPose4.position.y= 0.244553790075;
self.targetPose4.position.z= 0.457360792649;
self.targetPose4.orientation.x= -0.0179657894914;
self.targetPose4.orientation.y= 0.721568084785;
self.targetPose4.orientation.z= -0.0378135882797;
self.targetPose4.orientation.w= 0.691076596311;
class right_data:
"""Pose data for right arm"""
def __init__(self):
self.targetPose0 = Pose()
"""Pose 0 right arm"""
self.targetPose1 = Pose()
"""Pose 1 right arm"""
self.targetPose0.position.x= 0.570475291901;
self.targetPose0.position.y= -0.180389075365;
self.targetPose0.position.z= 0.217060958454;
self.targetPose0.orientation.x = -0.143469579271;
self.targetPose0.orientation.y = 0.989203323172;
self.targetPose0.orientation.z = -0.0141004555444;
self.targetPose0.orientation.w = 0.0263522751129;
self.targetPose1.position.x= 0.399460891212;
self.targetPose1.position.y= -0.509055254324;
self.targetPose1.position.z= 0.164683136981;
self.targetPose1.orientation.x = 0.115313561634;
self.targetPose1.orientation.y = 0.986697808427;
self.targetPose1.orientation.z = -0.0297995189095;
self.targetPose1.orientation.w = -0.110644502902;
| enriquecoronadozu/baxter_pointpy | src/point_data.py | Python | gpl-2.0 | 3,046 |
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of metOcean-mapping.
#
# metOcean-mapping is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# metOcean-mapping is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with metOcean-mapping. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
from string import Template
import sys
import time
import urllib
from django import forms
from django.core.urlresolvers import reverse
from django.utils import formats
from django.utils.safestring import mark_safe
import metocean
import metocean.prefixes as prefixes
from settings import READ_ONLY
from settings import fuseki_process
def get_states():
"""
Helper method to return valid states.
(consider storing these in the triple store and
providing access via a query).
"""
STATES = (
'"Draft"',
'"Proposed"',
'"Approved"',
'"Broken"',
'"Deprecated"',
)
return STATES
def get_reasons():
"""
Helper method to return valid reasons.
(consider storing these in the triple store and
providing access via a query).
"""
REASONS = (
'"new mapping"',
'"added metadata"',
'"corrected metadata"',
'"linked to new format"',
'"corrected links"',
'"changed status"'
)
return REASONS
def formats():
"""
Temporary, returns formats
These should be stored in the triple store and
provided by a query
"""
choices = [('<http://www.metarelate.net/metOcean/format/grib>', 'GRIB'),
('<http://www.metarelate.net/metOcean/format/um>', 'UM'),
('<http://www.metarelate.net/metOcean/format/cf>', 'CF')]
return choices
class MappingFormats(forms.Form):
"""
form to define the file format of the source and target
for a mapping
"""
source_format = forms.ChoiceField(choices=formats())
target_format = forms.ChoiceField(choices=formats())
def clean(self):
data = self.cleaned_data
if data['source_format'] == data['target_format']:
raise forms.ValidationError(
'The source and target formats must be different')
return self.cleaned_data
class Mediator(forms.Form):
"""
form to select a mediator from the list of mediators
defined mappings, grouped into a named collection
"""
mediator = forms.ChoiceField()
def __init__(self, *args, **kwargs):
fformat = kwargs.pop('fformat')
super(Mediator, self).__init__(*args, **kwargs)
qstr = metocean.Mediator.sparql_retriever(fformat)
meds = fuseki_process.retrieve(qstr)
if isinstance(meds, list):
meds = [(med['mediator'], med['label']) for med in meds]
else:
meds = [(meds['mediator'], meds['label'])]
self.fields['mediator'].choices = meds
class NewMediator(forms.Form):
""" form to create a new mediator """
mediator = forms.CharField()
class MappingConcept(forms.Form):
"""
form to define the concepts for a mapping
the work of the form is handled by the json
in the referrer, not the form class
"""
def clean(self):
return self.cleaned_data
class Value(forms.Form):
"""
form to define a value for use in a concept
format specific
"""
#two name fields are provided, 'name' is a drop down list of known names,
#'_name' is a free text field for unknown names
#only one may be used, validated in clean()
name = forms.ChoiceField(required=False)
_name = forms.CharField(required=False)
value = forms.CharField(required=False)
operator = forms.CharField(required=False)
ops = fuseki_process.subject_and_plabel('http://openmath/tests.ttl')
ops = [(op['subject'], op['notation']) for op in ops]
ops = [('','')] + ops
operator = forms.ChoiceField(required=False, choices=ops)
def __init__(self, *args, **kwargs):
self.fformat = kwargs.pop('fformat')
super(Value, self).__init__(*args, **kwargs)
if self.fformat == 'um':
umRes = fuseki_process.subject_and_plabel('http://um/umdpF3.ttl')
choices = [(um['subject'], um['notation']) for um in umRes]
choices = [('','')] + choices
self.fields['name'].choices = choices
sns = fuseki_process.subject_and_plabel('http://um/stashconcepts.ttl')
sn_choices = [('','')]
sn_choices += [(um['subject'], um['notation']) for um in sns]
self.fields['stash_code'] = forms.ChoiceField(required=False,
choices=sn_choices)
fcs = fuseki_process.subject_and_plabel('http://um/fieldcode.ttl')
fc_choices = [('','')]
fc_choices += [(um['subject'], um['notation']) for um in fcs]
self.fields['field_code'] = forms.ChoiceField(required=False,
choices=fc_choices)
elif self.fformat == 'cf':
cfRes = fuseki_process.subject_and_plabel('http://cf/cf-model.ttl')
choices = [(cf['subject'], cf['notation']) for cf in cfRes]
choices = [('','')] + choices
self.fields['name'].choices = choices
sns = fuseki_process.subject_and_plabel('http://cf/cf-standard-name-table.ttl')
sn_choices = [('','')]
sn_choices += [(sn['subject'], sn['notation']) for sn in sns]
self.fields['standard_name'] = forms.ChoiceField(required=False,
choices=sn_choices)
mod = fuseki_process.subject_and_plabel('http://cf/cf-model.ttl')
md_choices = [('','')]
md_choices += [(mo['subject'], mo['notation']) for mo in mod]
print md_choices
self.fields['cf model'] = forms.ChoiceField(required=False,
choices=md_choices)
elif self.fformat == 'grib':
grRes = fuseki_process.subject_and_plabel('http://grib/apikeys.ttl')
choices = [(grib['subject'], grib['notation']) for grib in grRes]
choices = [('','')] + choices
self.fields['name'].choices = choices
else:
raise ValueError('invalid format supplied: {}'.format(self.fformat))
def clean(self):
name = self.cleaned_data.get('name')
_name = self.cleaned_data.get('_name')
stcode = self.cleaned_data.get('stash_code')
fcode = self.cleaned_data.get('field_code')
lit = self.cleaned_data.get('value')
st_name = self.cleaned_data.get('standard_name')
cfmodel = self.cleaned_data.get('cf model')
op = self.cleaned_data.get('operator')
if name and _name:
# only one of name and _name may be used in a valid form entry
raise forms.ValidationError('Name, name are mutually exclusive')
elif not name and not _name:
# one name must be selected
raise forms.ValidationError('a name must be selected')
elif _name:
n = '<http://'
if self.fformat == 'cf':
n += 'def.cfconventions.org/datamodel/attribute_name#{}>'
elif self.fformat == 'um':
n += 'reference.metoffice.gov.uk/def/um/computed_value#{}>'
elif self.fformat == 'grib':
n += 'reference.metoffice.gov.uk/def/grib/computed_value#{}>'
self.cleaned_data['name'] = n.format(_name)
if op and not (fcode or lit or stcode or st_name or cfmodel):
raise forms.ValidationError('if operator is set '
'then a value or code is '
'required')
if not op and (fcode or lit or stcode or st_name or cfmodel):
raise forms.ValidationError('if operator is not set '
'then no value or code can be '
'interpreted')
if stcode:
if fcode or lit:
raise forms.ValidationError('only one of value, stash code'
' or fieldcode may be entered')
else:
lit = stcode
elif fcode:
if stcode or lit:
raise forms.ValidationError('only one of value, stash code'
' or fieldcode may be entered')
else:
lit = fcode
elif st_name:
if lit or cfmodel:
raise forms.ValidationError('only one of value or standard_name'
' or cf model may be entered')
else:
lit = st_name
elif cfmodel:
if lit or st_name:
raise forms.ValidationError('only one of value or standard_name'
' or cf model may be entered')
else:
lit = cfmodel
try:
float(lit)
except ValueError:
if lit.startswith('http'):
lit = '<{}>'.format(lit)
elif lit.startswith('<http'):
lit = lit
else:
lit = '"{}"'.format(lit)
self.cleaned_data['value'] = lit
return self.cleaned_data
def _unpack_values(vals):
"""
return the entries for the ChoiceField choices for a list of values
available to map
"""
vals = [json.loads(aVal) for aVal in vals]
newVals = []
for aVal in vals:
newS = [json.dumps(aVal), '', '', '']
if not aVal.get('mr:subject'):
newS[1] = aVal.get('mr:hasProperty',{}).get('mr:name', '').split('/')[-1]
else:
newS[1] = aVal.get('mr:subject').get('mr:hasProperty',{}).get('mr:name', '').split('/')[-1]
newS[2] = aVal.get('mr:operator', '').split('#')[-1]
if isinstance(aVal.get('mr:object'), unicode):
newS[3] = aVal.get('mr:object')
else:
newS[3] = aVal.get('mr:object', {}).get('mr:hasProperty',{})
newS[3] = newS[3].get('mr:name', '').split('/')[-1]
newVals.append(newS)
choices = [(aVal[0],'{su} {op} {ob}'.format(su=aVal[1], op=aVal[2],
ob=aVal[3])) for aVal in newVals]
return choices
class ValueMap(forms.Form):
"""
form to define a value map
using the available values
"""
source_value = forms.ChoiceField()
target_value = forms.ChoiceField()
def __init__(self, *args, **kwargs):
sc_vals = kwargs.pop('sc')
sc = _unpack_values(sc_vals)
tc_vals = kwargs.pop('tc')
tc = _unpack_values(tc_vals)
super(ValueMap, self).__init__(*args, **kwargs)
self.fields['source_value'].choices = sc
self.fields['target_value'].choices = tc
class DerivedValue(forms.Form):
"""
form to define a derived value
using the available values
"""
ops = fuseki_process.subject_and_plabel('http://openmath/ops.ttl')
ops = [('','')] + [(op['subject'], op['notation']) for op in ops]
_operator = forms.ChoiceField(choices=ops)
_subject = forms.ChoiceField()
_object = forms.ChoiceField(required=False)
_object_literal = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
comp_vals = kwargs.pop('components')
components = _unpack_values(comp_vals)
super(DerivedValue, self).__init__(*args, **kwargs)
# components = [json.loads(component) for component in components]
# components = [(json.dumps(component),component['mr:subject']['mr:hasProperty']['mr:name']) for
# component in components]
self.fields['_subject'].choices = components
self.fields['_object'].choices = [('','')] + components
def clean(self):
op = self.data.get('_operator')
obj = self.data.get('_object')
obj_lit = self.data.get('_object_literal')
if not (obj or obj_lit):
msg = 'an object (choice or literal) is required'
raise forms.ValidationError(msg)
elif obj and obj_lit:
msg = 'the object and object_literal fields are mutually exclusive'
raise forms.ValidationError(msg)
elif obj_lit:
try:
float(obj_lit)
except ValueError:
raise forms.ValidationError('object_literal must be a number')
return self.cleaned_data
class MappingMeta(forms.Form):
"""
form to define the metadata for a mapping
once the source, target and value maps are defined
"""
isoformat = "%Y-%m-%dT%H:%M:%S.%f"
#invertible = forms.BooleanField(required=False)
invertible = forms.ChoiceField(choices=[('"True"', 'True'),
('"False"', 'False')])
mapping = forms.CharField(max_length=200, required=False,
widget=forms.TextInput(attrs={'readonly':True}))
last_edit = forms.CharField(max_length=50, required=False,
widget=forms.TextInput(attrs={'readonly':True}))
last_editor = forms.CharField(max_length=50, required=False,
widget=forms.TextInput(
attrs={'readonly':True}))
editor = forms.ChoiceField([(r['s'],r['prefLabel'].split('/')[-1]) for
r in fuseki_process.get_contacts('people')]
, required=False)
# editor = forms.ChoiceField([(r['s'],r['s'].split('/')[-1]) for
# r in moq.get_contacts('people')],
# widget=SelectWithPopUp)
note = forms.CharField(max_length=200, required=False,
widget=forms.Textarea(attrs={'readonly':True}))
comment = forms.CharField(max_length=200,required=False,
widget=forms.Textarea)
reason = forms.CharField(max_length=50, required=False,
widget=forms.TextInput(attrs={'readonly':True}))
next_reason = forms.ChoiceField(choices=[(x,x) for x in get_reasons()],
required=False)
# owners = forms.CharField(max_length=200, required=False,
# widget=forms.TextInput(attrs={'readonly':True}))
# add_owners = forms.CharField(max_length=200, required=False)
# remove_owners = forms.CharField(max_length=200, required=False)
# watchers = forms.CharField(max_length=200, required=False,
# widget=forms.TextInput(attrs={'readonly':True}))
# add_watchers = forms.CharField(max_length=200, required=False)
# remove_watchers = forms.CharField(max_length=200, required=False)
replaces = forms.CharField(max_length=128, required=False,
widget=forms.TextInput(attrs={'readonly':True}))
status = forms.CharField(max_length=15, required=False,
widget=forms.TextInput(attrs={'readonly':True}))
next_status = forms.ChoiceField(choices=[(x,x) for x in get_states()],
required=False)
source = forms.CharField(max_length=200,
widget=forms.TextInput(attrs={'hidden':True}))
target = forms.CharField(max_length=200,
widget=forms.TextInput(attrs={'hidden':True}))
valueMaps = forms.CharField(max_length=1000, required=False, widget=forms.TextInput(attrs={'hidden':True}))
def clean(self):
"""process the form"""
source = self.data.get('source')
map_id = self.data.get('mapping')
# if source:
# src_maps = moq.multiple_mappings(fuseki_process, source)
# if len(src_maps) > 1:
# e = 'mappings already exist for this source'
# raise forms.ValidationError(e)
# worried about this, prevents updates to deprecate etc
if map_id:
qstr = metocean.Mapping.sparql_retriever(map_id)
mapping = fuseki_process.retrieve(qstr)
if not mapping:
raise forms.ValidationError('the mapping Id is not valid')
changed = False
changes = []
change_keys = [('source','source'), ('target','target'),
('invertible','invertible'), ('status','status'),
('replaces', 'replaces'), ('comment','note'),
('next_reason', 'reason'), ('editor', 'creator'),
('valueMaps', 'valueMaps')]
for fkey, mkey in change_keys:
if self.data.get(fkey) != mapping.get(mkey, ''):
changed = True
changes.append((mkey,(self.data.get(fkey),
mapping.get(mkey, ''))))
if not changed:
raise forms.ValidationError('No update: mapping not changed')
else:
print 'changes:', changes
return self.cleaned_data
# if False:
# return self.cleaned_Data
# else:
# raise forms.ValidationError('well, I would have returned')
class URLwidget(forms.TextInput):
"""helper widget"""
def render(self, name, value, attrs=None):
if value in ('None', None):
tpl = value
else:
tpl = u'<a href="%s">%s</a>' % (reverse('mapdisplay',
kwargs={'hashval' : value}), "go to replaces")
return mark_safe(tpl)
def clean(self):
return self.cleaned_data
class HomeForm(forms.Form):
"""
Form to support the home control panel
and control buttons
"""
cache_status = forms.CharField(max_length=200,
widget=forms.TextInput(attrs={'size': '100',
'readonly':True
}))
cache_state = forms.CharField(required=False,
widget=forms.Textarea(attrs={'cols': 100,
'rows': 50,
'readonly':True
}))
def clean(self):
if self.data.has_key('load'):
print 'data loaded'
fuseki_process.load()
elif self.data.has_key('revert'):
print 'save cache reverted'
fuseki_process.revert()
elif self.data.has_key('save'):
print 'cached changes saved'
fuseki_process.save()
elif self.data.has_key('validate'):
print 'validate triplestore'
self.cleaned_data['validation'] = fuseki_process.validate()
return self.cleaned_data
class ContactForm(forms.Form):
required_css_class = 'required'
error_css_class = 'error'
isoformat = ("%Y-%m-%dT%H:%M:%S.%f",)
github_name = forms.CharField(max_length=50)
types = (('http://www.metarelate.net/metOcean/people','people'),
('http://www.metarelate.net/metOcean/organisations',
'organisations'))
register = forms.ChoiceField(choices=types)
def clean(self):
if READ_ONLY:
raise ValidationError('System in Read-Only mode')
else:
return self.cleaned_data
class MappingForm(forms.Form):
"""Form for the display and selection of mappings"""
mapping = forms.CharField(max_length=200)
source = forms.CharField(max_length=200)
target = forms.CharField(max_length=200)
display = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(MappingForm, self).__init__(*args, **kwargs)
self.fields['mapping'].widget.attrs['readonly'] = True
self.fields['source'].widget.attrs['readonly'] = True
self.fields['target'].widget.attrs['readonly'] = True
# self.fields['mapping'].widget = forms.HiddenInput()
| metarelate/metOcean-mapping | lib/editor/app/forms.py | Python | gpl-3.0 | 20,915 |
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
err = np.linspace(-3.0, 3.0, 60)
L1 = abs(err);
L2 = err**2;
delta = 1.5;
ind = abs(err) <= delta;
huber = np.multiply(0.5*ind, (err**2)) + np.multiply((1-ind) , (delta*(abs(err)-delta/2)))
vapnik = np.multiply(ind, 0) + np.multiply((1-ind), (abs(err) - delta))
plt.plot(err, L1, 'k-.')
plt.plot(err, L2, 'r-')
plt.plot(err, vapnik, 'b:')
plt.plot(err, huber, 'g-.')
plt.legend(['L1', 'L2','$ϵ$-insensitive', 'huber'])
plt.ylim((-0.5, 5))
pml.savefig('Huber.pdf')
plt.show()
| probml/pyprobml | scripts/huberLossPlot.py | Python | mit | 583 |
from redis.connection import ConnectionPool, UnixDomainSocketConnection
try:
from redis.connection import SSLConnection
except ImportError:
SSLConnection = None
from threading import Lock
from rb.router import PartitionRouter
from rb.clients import RoutingClient, LocalClient
class HostInfo(object):
def __init__(self, host_id, host, port, unix_socket_path=None, db=0,
password=None, ssl=False, ssl_options=None):
self.host_id = host_id
self.host = host
self.unix_socket_path = unix_socket_path
self.port = port
self.db = db
self.password = password
self.ssl = ssl
self.ssl_options = ssl_options
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.host_id == other.host_id
def __ne__(self, other):
rv = self.__eq__(other)
if rv is NotImplemented:
return NotImplemented
return rv
def __hash__(self):
return self.host_id
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
' '.join('%s=%r' % x for x in sorted(self.__dict__.items())),
)
def _iter_hosts(iterable):
if isinstance(iterable, dict):
iterable = iterable.iteritems()
for item in iterable:
if isinstance(item, tuple):
host_id, cfg = item
cfg = dict(cfg)
cfg['host_id'] = host_id
else:
cfg = item
yield cfg
class Cluster(object):
"""The cluster is the core object behind rb. It holds the connection
pools to the individual nodes and can be shared for the duration of
the application in a central location.
Basic example of a cluster over four redis instances with the default
router::
cluster = Cluster(hosts={
0: {'port': 6379},
1: {'port': 6380},
2: {'port': 6381},
3: {'port': 6382},
}, host_defaults={
'host': '127.0.0.1',
})
`hosts` is a dictionary of hosts which maps the number host IDs to
configuration parameters. The parameters correspond to the signature
of the :meth:`add_host` function. The defaults for these parameters
are pulled from `host_defaults`. To override the pool class the
`pool_cls` and `pool_options` parameters can be used. The same
applies to `router_cls` and `router_options` for the router. The pool
options are useful for setting socket timeouts and similar parameters.
"""
def __init__(self, hosts, host_defaults=None, pool_cls=None,
pool_options=None, router_cls=None, router_options=None):
if pool_cls is None:
pool_cls = ConnectionPool
if router_cls is None:
router_cls = PartitionRouter
self._lock = Lock()
self.pool_cls = pool_cls
self.pool_options = pool_options
self.router_cls = router_cls
self.router_options = router_options
self._pools = {}
self._router = None
self.hosts = {}
self._hosts_age = 0
self.host_defaults = host_defaults or {}
for host_config in _iter_hosts(hosts):
self.add_host(**host_config)
def add_host(self, host_id=None, host='localhost', port=6379,
unix_socket_path=None, db=0, password=None,
ssl=False, ssl_options=None):
"""Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense.
"""
if host_id is None:
raise RuntimeError('Host ID is required')
elif not isinstance(host_id, (int, long)):
raise ValueError('The host ID has to be an integer')
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError('Two hosts share the same host id (%r)' %
(host_id,))
self.hosts[host_id] = HostInfo(host_id=host_id, host=host,
port=port, db=db,
unix_socket_path=unix_socket_path,
password=password, ssl=ssl,
ssl_options=ssl_options)
self._hosts_age += 1
def remove_host(self, host_id):
"""Removes a host from the client. This only really useful for
unittests.
"""
with self._lock:
rv = self._hosts.pop(host_id, None) is not None
pool = self._pools.pop(host_id, None)
if pool is not None:
pool.disconnect()
self._hosts_age += 1
return rv
def disconnect_pools(self):
"""Disconnects all connections from the internal pools."""
with self._lock:
for pool in self._pools.itervalues():
pool.disconnect()
self._pools.clear()
def get_router(self):
"""Returns the router for the cluster. If the cluster reconfigures
the router will be recreated. Usually you do not need to interface
with the router yourself as the cluster's routing client does that
automatically.
This returns an instance of :class:`BaseRouter`.
"""
cached_router = self._router
ref_age = self._hosts_age
if cached_router is not None:
router, router_age = cached_router
if router_age == ref_age:
return router
with self._lock:
router = self.router_cls(self, **(self.router_options or {}))
self._router = (router, ref_age)
return router
def get_pool_for_host(self, host_id):
"""Returns the connection pool for the given host.
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly. If you want to use
a custom redis client you can pass this in as connection pool
manually.
"""
if isinstance(host_id, HostInfo):
host_info = host_id
host_id = host_info.host_id
else:
host_info = self.hosts.get(host_id)
if host_info is None:
raise LookupError('Host %r does not exist' % (host_id,))
rv = self._pools.get(host_id)
if rv is not None:
return rv
with self._lock:
rv = self._pools.get(host_id)
if rv is None:
opts = dict(self.pool_options or ())
opts['db'] = host_info.db
opts['password'] = host_info.password
if host_info.unix_socket_path is not None:
opts['path'] = host_info.unix_socket_path
opts['connection_class'] = UnixDomainSocketConnection
if host_info.ssl:
raise TypeError('SSL is not supported for unix '
'domain sockets.')
else:
opts['host'] = host_info.host
opts['port'] = host_info.port
if host_info.ssl:
if SSLConnection is None:
raise TypeError('This version of py-redis does '
'not support SSL connections.')
opts['connection_class'] = SSLConnection
opts.update(('ssl_' + k, v) for k, v in
(host_info.ssl_options or {}).iteritems())
rv = self.pool_cls(**opts)
self._pools[host_id] = rv
return rv
def get_local_client(self, host_id):
"""Returns a localized client for a specific host ID. This client
works like a regular Python redis client and returns results
immediately.
"""
return LocalClient(
self, connection_pool=self.get_pool_for_host(host_id))
def get_local_client_for_key(self, key):
"""Similar to :meth:`get_local_client_for_key` but returns the
client based on what the router says the key destination is.
"""
return self.get_local_client(self.get_router().get_host_for_key(key))
def get_routing_client(self):
"""Returns a routing client. This client is able to automatically
route the requests to the individual hosts. It's thread safe and
can be used similar to the host local client but it will refused
to execute commands that cannot be directly routed to an
individual node.
See :class:`RoutingClient` for more information.
"""
return RoutingClient(self)
def map(self, timeout=None, max_concurrency=64):
"""Shortcut context manager for getting a routing client, beginning
a map operation and joining over the result.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return self.get_routing_client().map(
timeout=timeout, max_concurrency=max_concurrency)
def fanout(self, hosts=None, timeout=None, max_concurrency=64):
"""Shortcut context manager for getting a routing client, beginning
a fanout operation and joining over the result.
In the context manager the client available is a
:class:`FanoutClient`. Example usage::
with cluster.fanout(hosts='all') as client:
client.flushdb()
"""
return self.get_routing_client().fanout(
hosts=hosts, timeout=timeout, max_concurrency=max_concurrency)
def all(self, timeout=None, max_concurrency=64):
"""Fanout to all hosts. Works otherwise exactly like :meth:`fanout`.
Example::
with cluster.all() as client:
client.flushdb()
"""
return self.fanout('all', timeout=timeout,
max_concurrency=max_concurrency)
| Stranger6667/rb | rb/cluster.py | Python | apache-2.0 | 10,521 |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
views = UnwrapElement(IN[0])
booleans = list()
for view in views:
if str(view.ViewType) == 'Schedule':
booleans.append(True)
else:
booleans.append(False)
OUT = booleans | andydandy74/ClockworkForDynamo | nodes/0.7.x/python/View.TypeIsSchedule.py | Python | mit | 250 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 11:32:41 2017
@author: Ludi Cao
"""
import time
import datetime
import csv
from Adafruit_BME280 import *
import os
import numpy as np
import dateutil
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
from collections import deque
class weather_DAQ(object):
def __init__(self, maxdata, n_merge):
self.sensor = None
self.running=False
self.time_queue=deque()
self.temp_queue=deque()
self.humid_queue=deque()
self.press_queue=deque()
self.temp_err=deque()
self.humid_err=deque()
self.press_err=deque()
self.maxdata=int(maxdata)
self.n_merge=int(n_merge)
self.temp_list=[]
self.humid_list=[]
self.press_list=[]
self.time_list=[]
self.merge_test=False
self.first_data = True
self.last_time = None
def close(self,plot_id):
plt.close(plot_id)
def create_file(self):
global results
self.sensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)
file_time= time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
id_info = []
with open ('/home/pi/config/server_config.csv') as f:
reader = csv.reader(f)
for row in reader:
id_info.append(row)
filename = "/home/pi/data/"+"_".join(row)+"_weathe"+file_time+".csv"
results=csv.writer(open(filename, "ab+"), delimiter = ",")
metadata=["Time", "Temp (C)","Temp SD","Pressure (hPa)", "Pressure SD","Humidity (%)","Humidity SD"]
results.writerow(metadata)
def start(self):
global results
date_time = datetime.datetime.now()
degrees = self.sensor.read_temperature()
pascals = self.sensor.read_pressure()
hectopascals = pascals / 100
humidity = self.sensor.read_humidity()
data=[]
self.merge_test=False
self.add_data(self.temp_queue,self.temp_err,self.temp_list,degrees)
self.add_data(self.humid_queue,self.humid_err,self.humid_list,humidity)
self.add_data(self.press_queue,self.press_err,self.press_list,hectopascals)
self.add_time(self.time_queue,self.time_list, date_time)
# data.append(date_time)
# data.append(degrees)
# data.append(hectopascals)
# data.append(humidity)
# results.writerow(data)
if self.first_data and len(self.temp_queue) != 0:
for i in range(len(self.temp_queue)):
data = []
data.append(self.time_queue[i])
data.append(self.temp_queue[i])
data.append(self.temp_err[i])
data.append(self.press_queue[i])
data.append(self.press_err[i])
data.append(self.humid_queue[i])
data.append(self.humid_err[i])
results.writerow(data)
self.last_time = data[0]
self.first_data = False
elif not self.first_data:
try:
print(self.last_time)
if self.time_queue[-1] != self.last_time:
data = []
data.append(self.time_queue[-1])
data.append(self.temp_queue[-1])
data.append(self.temp_err[-1])
data.append(self.press_queue[-1])
data.append(self.press_err[-1])
data.append(self.humid_queue[-1])
data.append(self.humid_err[-1])
results.writerow(data)
self.last_time = self.time_queue[-1]
else:
print('duplicated data.')
except IndexError:
print('No new data being written.')
else:
print('No data acquired yet.')
print ('Temp = {0:0.3f} deg C'.format(degrees))
print ('Pressure = {0:0.2f} hPa'.format(hectopascals))
print ('Humidity = {0:0.2f} %\n'.format(humidity))
def press(self):
if len(self.time_queue)>0:
self.update_plot(3,self.time_queue,self.press_queue,self.press_err,"Time","Pressure(hPa)","Pressure vs. time")
def temp(self):
if len(self.time_queue)>0:
self.update_plot(1,self.time_queue,self.temp_queue,self.temp_err,"Time","Temperature(C)","Temperature vs. time")
def humid(self):
if len(self.time_queue)>0:
self.update_plot(2,self.time_queue,self.humid_queue,self.humid_err,"Time","Humidity(%)","Humidity vs.time")
def add_time(self, queue, timelist, data):
print('Input time: {}\n'.format(data))
timelist.append(data)
if len(timelist)>=self.n_merge:
self.merge_test=True
queue.append(timelist[int((self.n_merge)/2)])
print('Queue time: {}\n'.format(timelist[int((self.n_merge)/2)]))
for i in range(len(timelist)):
timelist.pop()
if len(queue)>self.maxdata:
queue.popleft()
def add_data(self, queue, queue_err,temp_list, data):
temp_list.append(data)
if len(temp_list)>=self.n_merge:
queue.append(np.mean(np.asarray(temp_list)))
queue_err.append(np.std(np.asarray(temp_list)))
for i in range(len(temp_list)):
temp_list.pop()
if len(queue)>self.maxdata:
queue.popleft()
def update_plot(self,plot_id,xdata,ydata,yerr,xlabel,ylable,title):
plt.ion()
fig = plt.figure(plot_id)
plt.clf()
ax=fig.add_subplot(111)
plt.xlabel(xlabel)
plt.ylabel(ylable)
plt.title(title)
plt.plot(xdata,ydata,"r.")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax.errorbar(xdata, ydata, yerr=yerr)
fig.show()
plt.pause(0.0005)
def plotdata(self):
times=[]
degrees_list=[]
pressure_list=[]
humidity_list=[]
temp_ave=[]
temp_unc = []
pressure_ave=[]
pressure_unc=[]
humidity_ave=[]
humidity_unc=[]
merge_times = []
app=gui("Weather Plot","800x400")
app.addLabel("1","Please choose a following .csv file")
file_name=[]
for filename in os.listdir('.'):
if filename.endswith(".csv"):
file_name.append(os.path.join('.', filename))
app.setFont(20)
app.addOptionBox("Files",file_name)
app.setOptionBoxHeight("Files","4")
app.addLabel("2","Enter the number of data points to merge:")
app.setLabelFont("20","Heletica")
app.addNumericEntry("n")
app.setFocus("n")
def ok(btn):
user_file=app.getOptionBox("Files")
n_merge=int(app.getEntry("n"))
row_counter=0
results = csv.reader(open(user_file), delimiter=',')
for r in results:
if row_counter>0:
times.append(dateutil.parser.parse(r[0]))
degrees_list.append(float(r[1]))
pressure_list.append(float(r[2]))
humidity_list.append(float(r[3]))
row_counter+=1
ndata = int(len(degrees_list))
nsum_data = int(ndata/n_merge)
for i in range(nsum_data):
itemp = degrees_list[i*n_merge:(i+1)*n_merge]
itemp_array = np.asarray(itemp)
temp_mean = np.mean(itemp_array)
temp_sigma = np.sqrt(np.var(itemp_array))
temp_ave.append(temp_mean)
temp_unc.append(temp_sigma)
for i in range(nsum_data):
ipressure = pressure_list[i*n_merge:(i+1)*n_merge]
ipressure_array = np.asarray(ipressure)
pressure_mean = np.mean(ipressure_array)
pressure_sigma = np.sqrt(np.var(ipressure_array))
pressure_ave.append(pressure_mean)
pressure_unc.append(pressure_sigma)
for i in range(nsum_data):
ihumid = humidity_list[i*n_merge:(i+1)*n_merge]
ihumid_array = np.asarray(ihumid)
humid_mean = np.mean(ihumid_array)
humid_sigma = np.sqrt(np.var(ihumid_array))
humidity_ave.append(humid_mean)
humidity_unc.append(humid_sigma)
for i in range(nsum_data):
itimes = times[i*n_merge:(i+1)*n_merge]
itime = itimes[int(len(itimes)/2)]
merge_times.append(itime)
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, temp_ave, "b.")
plt.errorbar(merge_times, temp_ave, yerr = temp_unc)
plt.title("Temperature")
plt.xlabel("Time(s)")
plt.ylabel("Temperature(C)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, pressure_ave,"g." )
plt.errorbar(merge_times, pressure_ave, yerr = pressure_unc)
plt.title("Pressure")
plt.xlabel("Time(s)")
plt.ylabel("Pressure(hPa)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, humidity_ave,"r." )
plt.errorbar(merge_times, humidity_ave, yerr = humidity_unc)
plt.title("Humidity")
plt.xlabel("Time(s)")
plt.ylabel("Humidity(%)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
plt.show()
app.addButton("OK",ok)
app.setButtonWidth("OK","20")
app.setButtonHeight("OK","4")
app.setButtonFont("20","Helvetica")
app.go()
| cllamb0/dosenet-raspberrypi | weather_DAQ.py | Python | mit | 10,219 |
import requests
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class HipChatIsUpPlugin(WillPlugin):
@periodic(second='36')
def hipchat_is_up(self):
try:
r = requests.get("https://status.hipchat.com/api/v2/status.json")
last_status = self.load("last_hipchat_status")
if last_status and r.json()["status"]["indicator"] != last_status:
if r.json()["status"]["indicator"] != "none":
self.say("FYI everyone, HipChat is having trouble: %s" % r.json()["status"]["description"])
else:
self.say("Looks like HipChat's back up!")
self.save("last_hipchat_status", r.json()["status"]["indicator"])
except:
pass
| skoczen/will | will/plugins/devops/hipchat_is_up.py | Python | mit | 859 |
from django.db import models
from django.utils import timezone
class Note(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title | kcchaitanya/Notes_Django | notes/models.py | Python | mit | 501 |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class SaleAdvancePaymentInv(models.TransientModel):
_name = "sale.advance.payment.inv"
_description = "Sales Advance Payment Invoice"
@api.model
def _count(self):
return len(self._context.get('active_ids', []))
@api.model
def _get_advance_payment_method(self):
if self._count() == 1:
sale_obj = self.env['sale.order']
order = sale_obj.browse(self._context.get('active_ids'))[0]
if all([line.product_id.invoice_policy == 'order' for line in order.order_line]) or order.invoice_count:
return 'all'
return 'delivered'
@api.model
def _default_product_id(self):
product_id = self.env['ir.values'].get_default('sale.config.settings', 'deposit_product_id_setting')
return self.env['product.product'].browse(product_id)
@api.model
def _default_deposit_account_id(self):
return self._default_product_id().property_account_income_id
@api.model
def _default_deposit_taxes_id(self):
return self._default_product_id().taxes_id
advance_payment_method = fields.Selection([
('delivered', 'Invoiceable lines'),
('all', 'Invoiceable lines (deduct down payments)'),
('percentage', 'Down payment (percentage)'),
('fixed', 'Down payment (fixed amount)')
], string='What do you want to invoice?', default=_get_advance_payment_method, required=True)
product_id = fields.Many2one('product.product', string='Down Payment Product', domain=[('type', '=', 'service')],\
default=_default_product_id)
count = fields.Integer(default=_count, string='# of Orders')
amount = fields.Float('Down Payment Amount', digits=dp.get_precision('Account'), help="The amount to be invoiced in advance, taxes excluded.")
deposit_account_id = fields.Many2one("account.account", string="Income Account", domain=[('deprecated', '=', False)],\
help="Account used for deposits", default=_default_deposit_account_id)
deposit_taxes_id = fields.Many2many("account.tax", string="Customer Taxes", help="Taxes used for deposits", default=_default_deposit_taxes_id)
@api.onchange('advance_payment_method')
def onchange_advance_payment_method(self):
if self.advance_payment_method == 'percentage':
return {'value': {'amount': 0}}
return {}
@api.multi
def _create_invoice(self, order, so_line, amount):
inv_obj = self.env['account.invoice']
ir_property_obj = self.env['ir.property']
account_id = False
if self.product_id.id:
account_id = self.product_id.property_account_income_id.id
if not account_id:
prop = ir_property_obj.get('property_account_income_categ_id', 'product.category')
prop_id = prop and prop.id or False
account_id = order.fiscal_position_id.map_account(prop_id)
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
(self.product_id.name,))
if self.amount <= 0.00:
raise UserError(_('The value of the down payment amount must be positive.'))
if self.advance_payment_method == 'percentage':
amount = order.amount_untaxed * self.amount / 100
name = _("Down payment of %s%%") % (self.amount,)
else:
amount = self.amount
name = _('Down Payment')
invoice = inv_obj.create({
'name': order.client_order_ref or order.name,
'origin': order.name,
'type': 'out_invoice',
'reference': False,
'account_id': order.partner_id.property_account_receivable_id.id,
'partner_id': order.partner_invoice_id.id,
'invoice_line_ids': [(0, 0, {
'name': name,
'origin': order.name,
'account_id': account_id,
'price_unit': amount,
'quantity': 1.0,
'discount': 0.0,
'uom_id': self.product_id.uom_id.id,
'product_id': self.product_id.id,
'sale_line_ids': [(6, 0, [so_line.id])],
'invoice_line_tax_ids': [(6, 0, [x.id for x in self.product_id.taxes_id])],
'account_analytic_id': order.project_id.id or False,
})],
'currency_id': order.pricelist_id.currency_id.id,
'payment_term_id': order.payment_term_id.id,
'fiscal_position_id': order.fiscal_position_id.id or order.partner_id.property_account_position_id.id,
'team_id': order.team_id.id,
})
invoice.compute_taxes()
return invoice
@api.multi
def create_invoices(self):
sale_orders = self.env['sale.order'].browse(self._context.get('active_ids', []))
if self.advance_payment_method == 'delivered':
sale_orders.action_invoice_create()
elif self.advance_payment_method == 'all':
sale_orders.action_invoice_create(final=True)
else:
# Create deposit product if necessary
if not self.product_id:
vals = self._prepare_deposit_product()
self.product_id = self.env['product.product'].create(vals)
self.env['ir.values'].sudo().set_default('sale.config.settings', 'deposit_product_id_setting', self.product_id.id)
sale_line_obj = self.env['sale.order.line']
for order in sale_orders:
if self.advance_payment_method == 'percentage':
amount = order.amount_untaxed * self.amount / 100
else:
amount = self.amount
if self.product_id.invoice_policy != 'order':
raise UserError(_('The product used to invoice a down payment should have an invoice policy set to "Ordered quantities". Please update your deposit product to be able to create a deposit invoice.'))
if self.product_id.type != 'service':
raise UserError(_("The product used to invoice a down payment should be of type 'Service'. Please use another product or update this product."))
so_line = sale_line_obj.create({
'name': _('Advance: %s') % (time.strftime('%m %Y'),),
'price_unit': amount,
'product_uom_qty': 0.0,
'order_id': order.id,
'discount': 0.0,
'product_uom': self.product_id.uom_id.id,
'product_id': self.product_id.id,
'tax_id': [(6, 0, self.product_id.taxes_id.ids)],
})
self._create_invoice(order, so_line, amount)
if self._context.get('open_invoices', False):
return sale_orders.action_view_invoice()
return {'type': 'ir.actions.act_window_close'}
def _prepare_deposit_product(self):
return {
'name': 'Down payment',
'type': 'service',
'invoice_policy': 'order',
'property_account_income_id': self.deposit_account_id.id,
'taxes_id': [(6, 0, self.deposit_taxes_id.ids)],
}
| vileopratama/vitech | src/addons/sale/wizard/sale_make_invoice_advance.py | Python | mit | 7,554 |
data = [ i for i in range(10**12)] | heticor915/ProgrammingContest | CodeForces/318_A/solve.py | Python | lgpl-3.0 | 34 |
# # install postgres
# brew install postgres
# # start server
# pg_ctl -D /usr/local/var/postgres -l /usr/local/var/postgres/server.log start
# after running this you can query by psql -d acs
import subprocess
import sys
def create_db(db):
cmd = "CREATE DATABASE " + db
# needs to run from base db postgres
run_cmd("postgres", cmd)
def create_table(db, tbl, cols, types):
typed_cols = ", ".join([col + " " + type for col, type in zip(cols, types)])
cmd = "CREATE TABLE %s(%s)" % (tbl, typed_cols)
run_cmd(db, cmd)
def insert_data(db, tbl, file):
cmd = "COPY %s FROM '%s' WITH HEADER CSV" % (tbl, file)
run_cmd(db, cmd)
def run_cmd(db, cmd):
try:
subprocess.call(["psql", "-d", db, "-c", cmd ], shell = False)
except OSError:
print "Install postgres first"
sys.exit(1)
def make_acs_db(file_name):
csv_data = open(file_name)
columns_str = csv_data.readline()
csv_data.close()
columns = [col[1:-1].lower() for col in columns_str.strip().split(",")]
create_db("acs")
create_table("acs", "hh", columns, ['integer'] * len(columns))
insert_data("acs", "hh", file_name)
if __name__ == "__main__":
if(len(sys.argv) != 2):
print "Usage: <full-path-file-name>"
sys.exit(1)
else:
make_acs_db(sys.argv[1])
| jfeser/ImputeDB | acs/make_acs_db.py | Python | mit | 1,275 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from chaco.label import Label
from six.moves import map
from pychron.core.ui import set_qt
set_qt()
# ============= enthought library imports =======================
from chaco.abstract_overlay import AbstractOverlay
from kiva.fonttools import str_to_font
from traits.api import HasTraits, Instance, Float, File, Property, Str, List
from traitsui.api import View, Controller, UItem
from chaco.api import OverlayPlotContainer
from enable.component_editor import ComponentEditor
from pyface.api import FileDialog, OK
# ============= standard library imports ========================
from lxml.etree import ElementTree, Element
from chaco.plot import Plot
from chaco.array_plot_data import ArrayPlotData
from numpy import linspace, cos, sin, pi
import os
import csv
from chaco.data_label import DataLabel
from pychron.paths import paths
from chaco.plot_graphics_context import PlotGraphicsContext
from traitsui.menu import Action
import math
from pychron.core.helpers.strtools import to_bool
# ============= local library imports ==========================
class myDataLabel(DataLabel):
show_label_coords = False
marker_visible = False
label_position = "center"
border_visible = False
class LabelsOverlay(AbstractOverlay):
labels = List
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
with gc:
gc.set_font(str_to_font(None, None, "7"))
for x, y, l in self.labels:
ll = Label(x=x, y=y, text=l, font="modern 7")
w, h = ll.get_bounding_box(gc)
x, y = other_component.map_screen([(x, y)])[0]
gc.set_text_position(x - w / 2.0, y + 5)
gc.show_text(l)
class RotatingContainer(OverlayPlotContainer):
rotation = Float(0)
def _draw(self, gc, *args, **kw):
with gc:
w2 = self.width / 2
h2 = self.height / 2
# gc.translate_ctm(w2, h2)
# gc.rotate_ctm(math.radians(self.rotation))
# gc.translate_ctm(-w2, -h2)
super(RotatingContainer, self)._draw(gc, *args, **kw)
class GraphicGeneratorController(Controller):
def save(self, info):
self.model.save()
def traits_view(self):
w, h = 750, 750
v = View(
UItem("srcpath"),
# Item('rotation'),
UItem("container", editor=ComponentEditor(), style="custom"),
width=w + 2,
height=h + 56,
resizable=True,
buttons=[Action(name="Save", action="save"), "OK", "Cancel"],
)
return v
class GraphicModel(HasTraits):
srcpath = File
xmlpath = File
container = Instance(OverlayPlotContainer)
name = Property
_name = Str
rotation = Float(enter_set=True, auto_set=False)
initialized = False
def _get_name(self):
return os.path.splitext(
self._name if self._name else os.path.basename(self.srcpath)
)[0]
def save(self, path=None):
# print self.container.bounds
if path is None:
dlg = FileDialog(action="save as", default_directory=paths.data_dir or "")
if dlg.open() == OK:
path = dlg.path
if path is not None:
_, tail = os.path.splitext(path)
c = self.container
if tail == ".pdf":
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
gc = PdfPlotGraphicsContext(filename=path, pagesize="letter")
else:
if not tail in (".png", ".jpg", ".tiff"):
path = "{}.png".format(path)
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
# c.use_backbuffer = False
# for ci in c.components:
# try:
# ci.x_axis.visible = False
# ci.y_axis.visible = False
# except Exception:
# pass
# c.use_backbuffer = False
from reportlab.lib.pagesizes import LETTER
c.do_layout(size=(LETTER[1], LETTER[1]), force=True)
gc.render_component(c)
# c.use_backbuffer = True
gc.save(path)
self._name = os.path.basename(path)
def load(self, path):
parser = ElementTree(file=open(path, "r"))
circles = parser.find("circles")
outline = parser.find("outline")
bb = outline.find("bounding_box")
bs = bb.find("width"), bb.find("height")
w, h = [float(b.text) for b in bs]
use_label = parser.find("use_label")
if use_label is not None:
use_label = to_bool(use_label.text.strip())
else:
use_label = True
data = ArrayPlotData()
p = Plot(data=data, padding=10)
p.x_grid.visible = False
p.y_grid.visible = False
p.x_axis.visible = False
p.y_axis.visible = False
p.x_axis.title = "X cm"
p.y_axis.title = "Y cm"
p.index_range.low_setting = -w / 2
p.index_range.high_setting = w / 2
p.value_range.low_setting = -h / 2
p.value_range.high_setting = h / 2
thetas = linspace(0, 2 * pi)
radius = circles.find("radius").text
radius = float(radius)
face_color = circles.find("face_color")
if face_color is not None:
face_color = face_color.text
else:
face_color = "white"
labels = []
for i, pp in enumerate(circles.findall("point")):
x, y, l = pp.find("x").text, pp.find("y").text, pp.find("label").text
# print i, pp, x, y
# load hole specific attrs
r = pp.find("radius")
if r is None:
r = radius
else:
r = float(r.text)
fc = pp.find("face_color")
if fc is None:
fc = face_color
else:
fc = fc.text
x, y = list(map(float, (x, y)))
xs = x + r * sin(thetas)
ys = y + r * cos(thetas)
xn, yn = "px{:03d}".format(i), "py{:03d}".format(i)
data.set_data(xn, xs)
data.set_data(yn, ys)
plot = p.plot((xn, yn), face_color=fc, type="polygon")[0]
labels.append((x, y, l))
# if use_label:
# label = myDataLabel(component=plot,
# data_point=(x, y),
# label_text=l,
# bgcolor='transparent')
# plot.overlays.append(label)
if use_label:
p.overlays.append(LabelsOverlay(component=plot, labels=labels))
self.container.add(p)
self.container.invalidate_and_redraw()
def _srcpath_changed(self):
# default_radius=radius,
# default_bounds=bounds,
# convert_mm=convert_mm,
# use_label=use_label,
# make=make,
# rotate=rotate)
self._reload()
def _rotation_changed(self):
self._reload()
def _reload(self):
if self.initialized:
self.container = self._container_factory()
print(os.path.isfile(self.srcpath), self.srcpath)
if os.path.isfile(self.srcpath):
p = make_xml(
self.srcpath,
default_bounds=(2.54, 2.54),
default_radius=0.0175 * 2.54,
rotate=self.rotation,
convert_mm=True,
)
self.load(p)
def _container_default(self):
return self._container_factory()
def _container_factory(self):
return RotatingContainer(bgcolor="white")
def make_xml(
path,
offset=100,
default_bounds=(50, 50),
default_radius=3.0,
convert_mm=False,
make=True,
use_label=True,
rotate=0,
):
"""
convert a csv into an xml
use blank line as a group marker
circle labels are offset by ``offset*group_id``
ie. group 0. 1,2,3
group 1. 101,102,103
"""
out = "{}_from_csv.xml".format(os.path.splitext(path)[0])
if not make:
return out
root = Element("root")
ul = Element("use_label")
ul.text = "True" if use_label else "False"
root.append(ul)
outline = Element("outline")
bb = Element("bounding_box")
width, height = Element("width"), Element("height")
width.text, height.text = list(map(str, default_bounds))
bb.append(width)
bb.append(height)
outline.append(bb)
root.append(outline)
circles = Element("circles")
radius = Element("radius")
radius.text = str(default_radius)
circles.append(radius)
face_color = Element("face_color")
face_color.text = "white"
circles.append(face_color)
root.append(circles)
i = 0
off = 0
reader = csv.reader(open(path, "r"), delimiter=",")
# writer = open(path + 'angles.txt', 'w')
nwriter = None
if rotate:
nwriter = csv.writer(open(path + "rotated_{}.txt".format(rotate), "w"))
header = next(reader)
if nwriter:
nwriter.writerow(header)
theta = math.radians(rotate)
for k, row in enumerate(reader):
# print k, row
row = list(map(str.strip, row))
if row:
e = Element("point")
x, y, l = Element("x"), Element("y"), Element("label")
xx, yy = float(row[1]), float(row[2])
try:
r = float(row[4])
rr = Element("radius")
if convert_mm:
r *= 2.54
rr.text = str(r)
e.append(rr)
except IndexError:
r = None
px = math.cos(theta) * xx - math.sin(theta) * yy
py = math.sin(theta) * xx + math.cos(theta) * yy
xx, yy = px, py
if nwriter:
data = ["{:0.4f}".format(xx), "{:0.4f}".format(yy)]
if r is not None:
data.append("{:0.4f}".format(r))
nwriter.writerow(data)
if convert_mm:
xx = xx * 2.54
yy = yy * 2.54
xx *= 1.1
yy *= 1.1
x.text = str(xx)
y.text = str(yy)
# a = math.degrees(math.atan2(yy, xx))
# writer.write('{} {}\n'.format(k + 1, a))
l.text = str(i + 1 + off)
e.append(l)
e.append(x)
e.append(y)
circles.append(e)
i += 1
else:
# use blank rows as group markers
off += offset
i = 0
tree = ElementTree(root)
tree.write(out, xml_declaration=True, method="xml", pretty_print=True)
return out
def open_txt(
p, bounds, radius, use_label=True, convert_mm=False, make=True, rotate=None
):
gm = GraphicModel(srcpath=p, rotation=rotate or 0)
p = make_xml(
p,
offset=0,
default_radius=radius,
default_bounds=bounds,
convert_mm=convert_mm,
use_label=use_label,
make=make,
rotate=rotate,
)
# p = '/Users/ross/Sandbox/graphic_gen_from_csv.xml'
gm.load(p)
gm.initialized = True
gcc = GraphicGeneratorController(model=gm)
return gcc, gm
if __name__ == "__main__":
gm = GraphicModel()
# p = '/Users/ross/Sandbox/2mmirrad.txt'
# p = '/Users/ross/Sandbox/2mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_ordered1.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/1_75mmirrad_continuous.txt"
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/2mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/40_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes.txt'
# p = '/Users/ross/Desktop/72_spokes'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/16_40_ms.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes_rev2.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes-5.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/24_spokes.txt"
p = "/Users/ross/PychronDev/data/o2inch.txt"
p = "/Users/ross/PychronDev/data/421.txt"
gcc, gm = open_txt(p, (51, 51), 0.95, convert_mm=False, make=True, rotate=0)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_6-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_20-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# gm2.container.bgcolor = 'transparent'
# gm2.container.add(gm.container)
gcc.configure_traits()
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/entry/graphic_generator.py | Python | apache-2.0 | 14,662 |
import nbformat
from nbconvert.preprocessors import CellExecutionError
from nbconvert.preprocessors import ExecutePreprocessor
from glob import glob
notebook_filenames_l = glob("notebooks/*.ipynb")
for notebook_filename in notebook_filenames_l:
with open(notebook_filename) as f:
print("Executing notebook : {0}".format(notebook_filename))
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=1000, kernel_name='python3')
try:
ep.preprocess(nb, {'metadata': {'path': 'notebooks/'}})
except CellExecutionError as e:
print("{0} [FAILED]\n{1}".format(notebook_filename, e))
# exit with error status code
exit(1)
print("{0} [PASSED]".format(notebook_filename))
| tboch/mocpy | test_notebooks.py | Python | gpl-3.0 | 777 |
import time
t1 = time.time()
# use array to represent numbers backwards
def toArray(num):
a = []
while num >0:
a.append(num%10)
num = num //10
return a
def addReverse(x):
result = []
for i in range(0,len(x)):
result.append(x[i]+x[len(x)-1-i])
for i in range(0,len(result)-1):
if result[i] > 9:
result[i] -= 10
result[i+1] += 1
if result[-1] > 9:
result[-1] -= 10
result.append(1)
return result
def isPalindrome(x):
for i in range(0,len(x)//2):
if x[i] != x[len(x)-1-i]:
return False
return True
def isLychrel(x,it):
if it >= 50:
return True
if it > 0 and isPalindrome(x):
return False
return isLychrel(addReverse(x),it+1)
count = 0
for i in range(1,10001):
if isLychrel(toArray(i),0):
count += 1
print (count)
print("time:",time.time()-t1)
| Adamssss/projectEuler | Problem 001-150 Python/pb055.py | Python | mit | 932 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .iformatter import IFormatter
__all__ = ['CSVFormatter']
class CSVFormatter(IFormatter):
def __init__(self, field_separator=u';'):
IFormatter.__init__(self)
self.field_separator = field_separator
self.started = False
def flush(self):
self.started = False
def format_dict(self, item):
result = u''
if not self.started:
result += self.field_separator.join(item.iterkeys()) + '\n'
self.started = True
result += self.field_separator.join(unicode(v) for v in item.itervalues())
return result
| sputnick-dev/weboob | weboob/tools/application/formatters/csv.py | Python | agpl-3.0 | 1,335 |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ConfigurationListResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, pagination=None):
"""
ConfigurationListResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[Configuration]',
'pagination': 'Pagination'
}
self.attribute_map = {
'data': 'data',
'pagination': 'pagination'
}
self._data = data
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this ConfigurationListResponse.
:return: The data of this ConfigurationListResponse.
:rtype: list[Configuration]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this ConfigurationListResponse.
:param data: The data of this ConfigurationListResponse.
:type: list[Configuration]
"""
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this ConfigurationListResponse.
:return: The pagination of this ConfigurationListResponse.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this ConfigurationListResponse.
:param pagination: The pagination of this ConfigurationListResponse.
:type: Pagination
"""
self._pagination = pagination
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| kinow-io/kinow-python-sdk | kinow_client/models/configuration_list_response.py | Python | apache-2.0 | 3,544 |
#!/usr/local/bin/python
#demultiplex.py
#Class and function definitions providing functionality in the mubiomics
#package.
# Copyright (C) <2012> <Benjamin C. Smith>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
from patricia import *
from hamming import *
from numpy import *
import re, sys
class MultiIndexDict:
"""Thanks to Brad Chapman for posting this answer on StackOverflow.com
@usage: indata = SeqIO.index("f001", "fasta")
pairdata = SeqIO.index("f002", "fasta")
combo = MultiIndexDict(indata, pairdata)
print combo['gi|3318709|pdb|1A91|'].description
print combo['gi|1348917|gb|G26685|G26685'].description
print combo["key_failure"]
*Deprecated, no longer in use.*
"""
def __init__(self, *indexes):
self._indexes = indexes
def __getitem__(self, key):
for idx in self._indexes:
try:
return idx[key]
except KeyError:
pass
raise KeyError("{0} not found".format(key))
def __len__(self):
length=0
for idx in self._indexes:
length=len(idx)+length
return length
def quality_control(rec, min_score=28, \
n_trim=0, min_len=80, win=10):
"""Generator function to trim FASTQ files according to quality criteria.
Reads in a SeqRecord object from a FASTQ format file, if its average
quality score is greater than av_score it scans through the nucleotides
and returns all nucleotides until a window of size win has an average
quality score of less than min_score. It then trims off the first n_trim
nucleotides (which may be unwanted primer padding) and returns a trimmed
SeqRecord object if the length of the remaining sequence is greater than
min_length.
>>>for record in quality_control(record_iterator, av_score=25, \
min_score=15, n_trim=3, min_len=100, win=50]):
print record.format("fasta").rstrip("\n")
"""
try:
#get the 3' cut position
cut3 = max(j for j in arange(len(rec)-win) if
mean(rec[n_trim+j:n_trim+j+win].letter_annotations["phred_quality"])\
>=min_score)
except ValueError:
yield None
#test that the average score
if len(rec[n_trim:cut3]) >= min_len:
yield rec[n_trim:cut3]
else:
yield None
def mmDNAtrie(seqs, mismatches):
"""Creates a dictionaried patricia tree of seqs with mismatches.
Requires the patricia class.
seqs should be a list of strings, mismatches should be an integer of the
number of acceptable mismatches. Returns a patricia trie in the form of
a dictionary. The full tree can be viewed with triename._d, where triename
is the name you assigned to the DNAtrie function call.
This function was designed as a way to
quickly allow searches with mismatches for short (<12 bp) DNA barcodes.
Gets slow i.e. takes a few seconds to build the trie, when doing 2
mismatches on barcodes of >15 bp. Not sure about memory requirements.
"""
trie = patricia()
all_words = []
words = seqs
trie_buffer = []
count = 0
while count <= mismatches:
# print count
# print len(words)
for word in words:
if not trie.isWord(word):
trie.addWord(word)
all_words.append(word)
for i, nt in enumerate(word):
for alph in ['A', 'C', 'G', 'T']:
new_word = ''.join([word[0:i],alph,word[i+1:]])
if new_word not in trie_buffer:
trie_buffer.append(new_word)
words = trie_buffer
trie_buffer=[]
count += 1
# for case in all_words:
# print case
return trie
def ptrie_search(ptrie, rec, q_len=8, st_pos=-1, max_st_pos=0):
"Searches a patricia trie for a window, starting at st_pos, in rec."
s_len = len(rec)
if st_pos + 1 + q_len >= s_len:
return None
pos = st_pos
m = False
while m is False and pos+1 <= max_st_pos:
pos+=1
try:#ensure that if end of sequence is reached, loop exits properly
s = rec[pos:pos+q_len]
except (IndexError, UnboundLocalError):
return None
m = ptrie.isWord(s)
if m: #Test to be certain loop exited with m=True
return pos, s#if so, return position and located query
else:
return None
def ambiguous_search(rec, query, max_dist=0):
"""Search rec for query and return position of closest match.
query can contain ambiguous nucleotides. Maximum acceptable edit distance
between the query and the best match can be set with max_dist.
@returns: (start_position, end_position)
"""
win = len(query)
dist_dict = {}
for j in arange(len(rec) - win):#scan through sequence
test = rec[j:j+win]#select window to examine
dist = ambiguous_seq_dist(test, query)#calculate edit distance
if dist <= max_dist:
dist_dict[j] = dist #add to a dictionary with (key, value) = (start_position, distance)
#sort dist_dict according to value with lowest first and select first entry
try:
best_pos = sorted(dist_dict.iteritems(), key=lambda (k,v): (v,k))[0][0]
return (best_pos, best_pos+win)
except IndexError:
return None
def ambiguous_seq_dist(t_seq, q_seq):
"""Calculate Levenshtein distance between DNA sequences,
t_seq and q_seq.
Can use ambiguous values in q_seq
(like N = A or T or C or G, R = A or G etc.)
"""
dist = 0
for t_nt, q_nt in zip(t_seq, q_seq):
q_value = IUPACData.ambiguous_dna_values[q_nt]
if len(q_value) == 1:
if t_nt!=q_value:
dist += 1
else:
pattern = '[%s]' % q_value
if not re.match(pattern, t_nt):
dist += 1
return dist
def identify_read(ptrie, rec, barcodes, ids, primers, bc_len=8, rpad=4, \
max_pos=0, max_p_mismatch=2, bc_type='hamming_8') :
"""Finds the position of the barcode.
Looks for the barcode in the mismatch DNA radix trie. If it finds it,
decodes it using the Hamming decoding function then calculates the
Hamming distance between the input primer and the expected
primer (accepting ambiguous DNA codes in the expected primer). It also
simultaneously pulls out the id corresponding to the decoded barcode. After
finding all possible barcodes in the input read, it takes the one with
the smallest number of mismatches in the primer and returns the start
position of the read after trimming the barcode and primer, the id, the
barcode from the read, it's decoded or true counterpart and the number of
mismatches in the primer.
"""
# stores data associated with possible barcode matches at varying window
# starts, so as to find which start position minimizes edit distance
dist_dict = {}
id_dict = {}
old_bc_dict = {}
new_bc_dict = {}
seq_start_dict = {}
pos = -1
while len(str(rec.seq)) > bc_len :
bc_match = ptrie_search(ptrie, str(rec.seq), q_len=bc_len, st_pos=pos \
, max_st_pos=max_pos)
if bc_match:
pos = bc_match[0]
if bc_type == 'hamming_8':
true_bc = decode_barcode_8(bc_match[1]) # hamming, returns correct barcode.
#if it fails to decode, then the try statement below fails
# print "Decoded to " + true_bc
else:
#if not a hamming barcode, just calculate the distance between the
#potential bc at this position against all barcodes in the list
#of known barcodes and return the one with the smallest distance.
bc_dist_dict = {}
for bc in barcodes:
bc_dist_dict[bc] = ambiguous_seq_dist(bc, bc_match[1])
bc_dist_list = sorted(bc_dist_dict.iteritems(), \
key=lambda (k,v): (v,k))
true_bc = bc_dist_list[0][0]
try:
i = barcodes[true_bc]
new_id = ids[i]
#if there is known primer sequence, minimize the edit distance
#between it and the sequence where the primer should be, relative
#to where the potential barcode is.
if len(primers) != 0:
prim = primers[i]
len_prim = len(prim)
prim_start = pos + bc_len + rpad
prim_end = prim_start + len_prim
dist = \
ambiguous_seq_dist(str(rec.seq)[prim_start: \
prim_end], prim)
seq_start_dict[pos] = prim_end
#if no primers, i.e. fragmented DNA, minimize the edit distance
#of the potential barcodes.
else:
len_prim = 0
dist = bc_dist_list[0][1]
seq_start_dict[pos] = pos + bc_len
dist_dict[pos] = dist
id_dict[pos] = new_id
old_bc_dict[pos] = bc_match[1]
new_bc_dict[pos] = true_bc
except KeyError:
# print "Barcode not in list of barcodes."
pass
else:
# print "No barcode found at position."
break
try:
best_pos = sorted(dist_dict.iteritems(), key=lambda (k,v): (v,k))[0][0]
# print "======"
# print "Min. primer dist = " + str(dist_dict[best_pos])
if dist_dict[best_pos] <= max_p_mismatch :
# return best starting position in sequence, best ID, old barcode,
# new barcode, edit distance, and length of primer
return seq_start_dict[best_pos], id_dict[best_pos], \
old_bc_dict[best_pos], new_bc_dict[best_pos], \
dist_dict[best_pos], len_prim
else:
return 0, "Unassigned", "Unknown", "Unknown", dist_dict[best_pos] \
, 0
except IndexError:
return 0, "Unassigned", "Unknown", "Unknown", None, 0
def getname(seqrecord, splitchar, partn):
'''Extracts the name of the read based on sequencing platform.
A hash is used to reduce the size of the names stored in memory.
'''
try:
name=seqrecord.description.split(splitchar)[partn]
#return name #used with radix trie method below
return hash(name)
except AttributeError:
return None
| benjsmith/mubiomics | MPSDemultiplexer/demultiplex.py | Python | gpl-3.0 | 11,452 |
# ------------------------------------------------------------------------------
# This file is part of PyTango (http://pytango.rtfd.io)
#
# Copyright 2006-2012 CELLS / ALBA Synchrotron, Bellaterra, Spain
# Copyright 2013-2014 European Synchrotron Radiation Facility, Grenoble, France
#
# Distributed under the terms of the GNU Lesser General Public License,
# either version 3 of the License, or (at your option) any later version.
# See LICENSE.txt for more info.
# ------------------------------------------------------------------------------
# pylint: disable=deprecated-method
import os
import sys
import runpy
import struct
import subprocess
from ctypes.util import find_library
from setuptools import setup, Extension
from setuptools import Command
from setuptools.command.build_ext import build_ext as dftbuild_ext
from setuptools.command.install import install as dftinstall
from distutils.command.build import build as dftbuild
from distutils.unixccompiler import UnixCCompiler
from distutils.version import LooseVersion as V
# Sphinx imports
try:
import sphinx
import sphinx.util.console
sphinx.util.console.color_terminal = lambda: False
from sphinx.setup_command import BuildDoc
except ImportError:
sphinx = None
# Detect numpy
try:
import numpy
except ImportError:
numpy = None
# Platform constants
POSIX = 'posix' in os.name
WINDOWS = 'nt' in os.name
IS64 = 8 * struct.calcsize("P") == 64
PYTHON_VERSION = sys.version_info
PYTHON2 = (2,) <= PYTHON_VERSION < (3,)
PYTHON3 = (3,) <= PYTHON_VERSION < (4,)
# Arguments
TESTING = any(x in sys.argv for x in ['test', 'pytest'])
def get_readme(name='README.rst'):
"""Get readme file contents without the badges."""
with open(name) as f:
return '\n'.join(
line for line in f.read().splitlines()
if not line.startswith('|') or not line.endswith('|'))
def pkg_config(*packages, **config):
config_map = {
"-I": "include_dirs",
"-L": "library_dirs",
"-l": "libraries",
}
cmd = ["pkg-config", "--cflags-only-I",
"--libs-only-L", "--libs-only-l", " ".join(packages)]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = proc.wait()
result = str(proc.communicate()[0].decode("utf-8"))
for elem in result.split():
flag, value = elem[:2], elem[2:]
config_values = config.setdefault(config_map.get(flag), [])
if value not in config_values:
config_values.append(value)
return config
def abspath(*path):
"""A method to determine absolute path for a given relative path to the
directory where this setup.py script is located"""
setup_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(setup_dir, *path)
def get_release_info():
namespace = runpy.run_path(
abspath('tango/release.py'),
run_name='tango.release')
return namespace['Release']
def uniquify(seq):
no_dups = []
for elem in seq:
if elem not in no_dups:
no_dups.append(elem)
return no_dups
def get_c_numpy():
if numpy is None:
return
else:
get_include = getattr(numpy, "get_include", None)
if get_include is None:
get_include = getattr(numpy, "get_numpy_include", None)
if get_include is None:
return
inc = get_include()
if os.path.isdir(inc):
return inc
def has_c_numpy():
return get_c_numpy() is not None
def has_numpy(with_src=True):
ret = numpy is not None
if with_src:
ret &= has_c_numpy()
return ret
def add_lib(name, dirs, sys_libs,
env_name=None, lib_name=None, inc_suffix=None):
if env_name is None:
env_name = name.upper() + '_ROOT'
ENV = os.environ.get(env_name)
if lib_name is None:
lib_name = name
if ENV is None:
sys_libs.append(lib_name)
return
else:
inc_dir = os.path.join(ENV, 'include')
dirs['include_dirs'].append(inc_dir)
if inc_suffix is not None:
inc_dir = os.path.join(inc_dir, inc_suffix)
dirs['include_dirs'].append(inc_dir)
lib_dirs = [os.path.join(ENV, 'lib')]
if IS64:
lib64_dir = os.path.join(ENV, 'lib64')
if os.path.isdir(lib64_dir):
lib_dirs.insert(0, lib64_dir)
dirs['library_dirs'].extend(lib_dirs)
if lib_name.startswith('lib'):
lib_name = lib_name[3:]
dirs['libraries'].append(lib_name)
def add_lib_boost(dirs):
"""Add boost-python configuration details.
There are optional environment variables that can be used for
non-standard boost installations.
The BOOST_ROOT can be used for a custom boost installation in
a separate directory, like:
/opt/my_boost
|- include
|- lib
In this case, use:
BOOST_ROOT=/opt/my_boost
Alternatively, the header and library folders can be specified
individually (do not set BOOST_ROOT). For example, if the
python.hpp file is in /usr/local/include/boost123/boost/:
BOOST_HEADERS=/usr/local/include/boost123
If the libboost_python.so file is in /usr/local/lib/boost123:
BOOST_LIBRARIES=/usr/local/lib/boost123
Lastly, the boost-python library name can be specified, if the
automatic detection is not working. For example, if the
library is libboost_python_custom.so, then use:
BOOST_PYTHON_LIB=boost_python_custom
"""
BOOST_ROOT = os.environ.get('BOOST_ROOT')
BOOST_HEADERS = os.environ.get('BOOST_HEADERS')
BOOST_LIBRARIES = os.environ.get('BOOST_LIBRARIES')
BOOST_PYTHON_LIB = os.environ.get('BOOST_PYTHON_LIB')
boost_library_name = BOOST_PYTHON_LIB if BOOST_PYTHON_LIB else 'boost_python'
if BOOST_ROOT is None:
if POSIX and not BOOST_PYTHON_LIB:
# library name differs widely across distributions, so if it
# wasn't specified as an environment var, then try the
# various options, being as Python version specific as possible
suffixes = [
"{v[0]}{v[1]}".format(v=PYTHON_VERSION),
"-{v[0]}{v[1]}".format(v=PYTHON_VERSION),
"-py{v[0]}{v[1]}".format(v=PYTHON_VERSION),
"{v[0]}-py{v[0]}{v[1]}".format(v=PYTHON_VERSION),
"{v[0]}".format(v=PYTHON_VERSION),
""
]
for suffix in suffixes:
candidate = boost_library_name + suffix
if find_library(candidate):
boost_library_name = candidate
break
if BOOST_HEADERS:
dirs['include_dirs'].append(BOOST_HEADERS)
if BOOST_LIBRARIES:
dirs['library_dirs'].append(BOOST_LIBRARIES)
else:
inc_dir = os.path.join(BOOST_ROOT, 'include')
lib_dirs = [os.path.join(BOOST_ROOT, 'lib')]
if IS64:
lib64_dir = os.path.join(BOOST_ROOT, 'lib64')
if os.path.isdir(lib64_dir):
lib_dirs.insert(0, lib64_dir)
dirs['include_dirs'].append(inc_dir)
dirs['library_dirs'].extend(lib_dirs)
dirs['libraries'].append(boost_library_name)
class build(dftbuild):
user_options = list(dftbuild.user_options)
# Strip library option
user_options.append((
'strip-lib',
None,
"strips the shared library of debugging symbols"
" (Unix like systems only)"))
# No documentation option
user_options.append((
'no-doc',
None,
"do not build documentation"))
boolean_options = dftbuild.boolean_options + ['strip-lib', 'no-doc']
def initialize_options(self):
dftbuild.initialize_options(self)
self.strip_lib = None
self.no_doc = None
def finalize_options(self):
dftbuild.finalize_options(self)
def run(self):
if numpy is None:
self.warn('NOT using numpy: it is not available')
elif get_c_numpy() is None:
self.warn("NOT using numpy: numpy available but C source is not")
dftbuild.run(self)
if self.strip_lib:
self.strip_debug_symbols()
def strip_debug_symbols(self):
if not POSIX:
return
if os.system("type objcopy") != 0:
return
d = abspath(self.build_lib, "tango")
orig_dir = os.path.abspath(os.curdir)
so = "_tango.so"
dbg = so + ".dbg"
try:
os.chdir(d)
stripped_cmd = 'file %s | grep -q "not stripped" || exit 1' % so
not_stripped = os.system(stripped_cmd) == 0
if not_stripped:
os.system("objcopy --only-keep-debug %s %s" % (so, dbg))
os.system("objcopy --strip-debug --strip-unneeded %s" % (so,))
os.system("objcopy --add-gnu-debuglink=%s %s" % (dbg, so))
os.system("chmod -x %s" % (dbg,))
finally:
os.chdir(orig_dir)
def has_doc(self):
if self.no_doc:
return False
if sphinx is None:
return False
if V(sphinx.__version__) <= V("0.6.5"):
print("Documentation will not be generated:"
" sphinx version (%s) too low."
" Needs 0.6.6" % sphinx.__version__)
return False
setup_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.isdir(os.path.join(setup_dir, 'doc'))
sub_commands = dftbuild.sub_commands + [('build_doc', has_doc), ]
class build_ext(dftbuild_ext):
def build_extensions(self):
self.use_cpp_0x = False
if isinstance(self.compiler, UnixCCompiler):
compiler_pars = self.compiler.compiler_so
while '-Wstrict-prototypes' in compiler_pars:
del compiler_pars[compiler_pars.index('-Wstrict-prototypes')]
# self.compiler.compiler_so = " ".join(compiler_pars)
# mimic tango check to activate C++0x extension
compiler = self.compiler.compiler
proc = subprocess.Popen(
compiler + ["-dumpversion"],
stdout=subprocess.PIPE)
pipe = proc.stdout
proc.wait()
gcc_ver = pipe.readlines()[0].decode().strip()
if V(gcc_ver) >= V("4.3.3"):
self.use_cpp_0x = True
dftbuild_ext.build_extensions(self)
def build_extension(self, ext):
if self.use_cpp_0x:
ext.extra_compile_args += ['-std=c++0x']
ext.define_macros += [('PYTANGO_HAS_UNIQUE_PTR', '1')]
ext.extra_compile_args += ['-Wno-unused-variable',
'-Wno-deprecated-declarations',
'-Wno-maybe-uninitialized']
dftbuild_ext.build_extension(self, ext)
if sphinx:
class build_doc(BuildDoc):
def run(self):
# make sure the python path is pointing to the newly built
# code so that the documentation is built on this and not a
# previously installed version
build_cmd = self.get_finalized_command('build')
sys.path.insert(0, os.path.abspath(build_cmd.build_lib))
sphinx.setup_command.BuildDoc.run(self)
sys.path.pop(0)
class install_html(Command):
user_options = []
# Install directory option
user_options.append((
'install-dir=',
'd',
'base directory for installing HTML documentation files'))
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options(
'install',
('install_html', 'install_dir'))
def run(self):
build_doc_cmd = self.get_finalized_command('build_doc')
src_html_dir = abspath(build_doc_cmd.build_dir, 'html')
self.copy_tree(src_html_dir, self.install_dir)
class install(dftinstall):
user_options = list(dftinstall.user_options)
# HTML directory option
user_options.append((
'install-html=',
None,
"installation directory for HTML documentation"))
def initialize_options(self):
dftinstall.initialize_options(self)
self.install_html = None
def finalize_options(self):
dftinstall.finalize_options(self)
# We do a hack here. We cannot trust the 'install_base' value because
# it is not always the final target. For example, in unix, the
# install_base is '/usr' and all other install_* are directly relative
# to it. However,in unix-local (like ubuntu) install_base is still
# '/usr' but, for example, install_data, is '$install_base/local'
# which breaks everything.
# The hack consists in using install_data instead of install_base
# since install_data seems to be, in practice, the proper install_base
# on all different systems.
if self.install_html is None:
self.install_html = os.path.join(self.install_data,
'share', 'doc', 'pytango', 'html')
def has_html(self):
return sphinx is not None
sub_commands = list(dftinstall.sub_commands)
sub_commands.append(('install_html', has_html))
def setup_args():
directories = {
'include_dirs': [],
'library_dirs': [],
'libraries': [],
}
sys_libs = []
# Link specifically to libtango version 9
tangolib = ':libtango.so.9' if POSIX else 'tango'
directories['libraries'].append(tangolib)
add_lib('omni', directories, sys_libs, lib_name='omniORB4')
add_lib('zmq', directories, sys_libs, lib_name='libzmq')
add_lib('tango', directories, sys_libs, inc_suffix='tango')
add_lib_boost(directories)
# special numpy configuration
numpy_c_include = get_c_numpy()
if numpy_c_include is not None:
directories['include_dirs'].append(numpy_c_include)
macros = []
if not has_numpy():
macros.append(('DISABLE_PYTANGO_NUMPY', None))
else:
macros.append(('PYTANGO_NUMPY_VERSION', '"%s"' % numpy.__version__))
if POSIX:
directories = pkg_config(*sys_libs, **directories)
Release = get_release_info()
author = Release.authors['Coutinho']
please_debug = False
packages = [
'tango',
'tango.databaseds',
'tango.databaseds.db_access',
]
py_modules = [
'PyTango', # Backward compatibilty
]
provides = [
'tango',
'PyTango', # Backward compatibilty
]
requires = [
'boost_python (>=1.33)',
'numpy (>=1.1)',
'six (>=1.10)',
]
install_requires = [
'six (>=1.10)',
]
if PYTHON_VERSION < (3, 4):
install_requires.append('enum34')
setup_requires = []
if TESTING:
setup_requires += ['pytest-runner']
tests_require = [
'pytest-xdist',
'gevent != 1.5a1',
'psutil',
]
if PYTHON2:
tests_require += [
'trollius', 'futures', 'pyparsing < 3', 'pytest < 5', 'zipp >= 0.5, < 2']
else:
tests_require += ['pytest']
package_data = {
'tango.databaseds': ['*.xmi', '*.sql', '*.sh', 'DataBaseds'],
}
data_files = []
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved ::'
' GNU Library or Lesser General Public License (LGPL)',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
]
# Note for PyTango developers:
# Compilation time can be greatly reduced by compiling the file
# src/precompiled_header.hpp as src/precompiled_header.hpp.gch
# and then uncommenting this line. Someday maybe this will be
# automated...
extra_compile_args = [
# '-include ext/precompiled_header.hpp',
]
extra_link_args = [
]
if please_debug:
extra_compile_args += ['-g', '-O0']
extra_link_args += ['-g', '-O0']
src_dir = abspath('ext')
client_dir = src_dir
server_dir = os.path.join(src_dir, 'server')
clientfiles = sorted(
os.path.join(client_dir, fname)
for fname in os.listdir(client_dir)
if fname.endswith('.cpp'))
serverfiles = sorted(
os.path.join(server_dir, fname)
for fname in os.listdir(server_dir)
if fname.endswith('.cpp'))
cppfiles = clientfiles + serverfiles
directories['include_dirs'].extend([client_dir, server_dir])
include_dirs = uniquify(directories['include_dirs'])
library_dirs = uniquify(directories['library_dirs'])
libraries = uniquify(directories['libraries'])
pytango_ext = Extension(
name='_tango',
sources=cppfiles,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
define_macros=macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language='c++',
depends=[])
cmdclass = {
'build': build,
'build_ext': build_ext,
'install_html': install_html,
'install': install}
if sphinx:
cmdclass['build_doc'] = build_doc
long_description = get_readme()
opts = dict(
name='pytango',
version=Release.version_long,
description=Release.description,
long_description=long_description,
author=author[0],
author_email=author[1],
url=Release.url,
download_url=Release.download_url,
platforms=Release.platform,
license=Release.license,
packages=packages,
py_modules=py_modules,
classifiers=classifiers,
package_data=package_data,
data_files=data_files,
provides=provides,
keywords=Release.keywords,
requires=requires,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
ext_package='tango',
ext_modules=[pytango_ext],
cmdclass=cmdclass)
return opts
def main():
return setup(**setup_args())
if __name__ == "__main__":
main()
| tango-cs/PyTango | setup.py | Python | lgpl-3.0 | 18,899 |
from sm.so.service_orchestrator import LOG
from novaclient import client
from emm_exceptions.NotFoundException import NotFoundException
from model.Entities import Key, Flavor, Image, Quotas
__author__ = 'lto'
class Client:
def __init__(self, conf=None):
if not conf:
from util.SysUtil import SysUtil
self.conf = SysUtil().get_sys_conf()
else:
self.conf = conf
self.nova = client.Client('2', self.conf['os_username'], self.conf['os_password'], self.conf['os_tenant'],
self.conf['os_auth_url'], region_name=self.conf['os_region_name'])
def list_servers(self):
res = self.nova.servers.list()
for s in res:
LOG.debug(s)
for k, v in s.networks.iteritems():
for ip in v:
try:
LOG.debug(self.get_floating_ip(ip))
except:
continue
def get_floating_ip(self, ip):
res = self.nova.floating_ips.list()
for _fip in res:
if _fip.ip == ip:
return _fip
raise NotFoundException("Floating ip " + ip + " not found")
def get_floating_ips(self):
res = self.nova.floating_ips.list()
return res
def set_ips(self, unit):
for k, v in self.nova.servers.get(unit.ext_id).networks.iteritems():
for ip in v:
try:
unit.floating_ips[k] = self.get_floating_ip(ip).ip
LOG.debug(ip + " is a floating ip")
except NotFoundException as e:
unit.ips[k] = ip
LOG.debug(ip + " is a fixed ip")
LOG.debug("ips: " + str(unit.ips))
LOG.debug("floating_ips: " + str(unit.floating_ips))
def get_images(self, object=True):
images_repr = self.nova.images.list()
images = []
for image_repr in images_repr:
if object:
images.append(Image(name=image_repr.name, ext_id=image_repr.id, status=image_repr.status,
created=image_repr.created, updated=image_repr.updated))
else:
images.append(image_repr._info)
return images
def get_flavors(self, object=True):
flavors_repr = self.nova.flavors.list()
flavors = []
for flavor_repr in flavors_repr:
if object:
flavors.append(Flavor(name=flavor_repr.name, ram=flavor_repr.ram, vcpus=flavor_repr.vcpus))
else:
flavors.append(flavor_repr._info)
return flavors
def get_keys(self, object=True):
keys_repr = self.nova.keypairs.list()
keys = []
for key_repr in keys_repr:
if object:
keys.append(Key(name=(key_repr.name)))
else:
keys.append(key_repr._info)
return keys
def get_quotas(self, object=True):
quotas_repr = self.nova.quotas.get(tenant_id=self.conf.get('os_tenant'))
if object:
quotas = (Quotas(**quotas_repr._info))
else:
quotas = (quotas_repr._info)
return quotas | MobileCloudNetworking/maas | bundle/clients/nova.py | Python | apache-2.0 | 3,217 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.conf import settings
PAGE = settings.PAGE
class BaseController(object):
conn = None
def __init__(self, conn, **kw):
self.conn = conn
def getShareId(self):
return self.conn.getShareId()
###########################################################
# Paging
def doPaging(self, page, page_size, total_size, limit=PAGE):
total = list()
t = (total_size/limit) + (total_size % limit > 0 and 1 or 0)
if total_size > (limit * 10):
if page > 10:
total.append(-1)
for i in range((1, page-9)[page-9 >= 1],
(t+1, page+10)[page+9 < t]):
total.append(i)
if page < t-9:
total.append(-1)
elif total_size > limit and total_size <= (limit*10):
for i in range(1, t+1):
total.append(i)
else:
total.append(1)
next = None
if page_size == limit and (page*limit) < total_size:
next = page + 1
prev = None
if page > 1:
prev = page - 1
if len(total) > 1:
return {'page': page, 'total': total, 'next': next, "prev": prev}
return None
| tp81/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/__init__.py | Python | gpl-2.0 | 2,099 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Version(resource.Resource):
resource_key = 'version'
resources_key = 'versions'
base_path = '/'
# capabilities
allow_list = True
# Properties
media_types = resource.Body('media-types')
status = resource.Body('status')
updated = resource.Body('updated')
@classmethod
def list(cls, session, paginated=False, base_path=None, **params):
if base_path is None:
base_path = cls.base_path
resp = session.get(base_path,
params=params)
resp = resp.json()
for data in resp[cls.resources_key]['values']:
yield cls.existing(**data)
| ctrlaltdel/neutrinator | vendor/openstack/identity/version.py | Python | gpl-3.0 | 1,236 |
# Time-stamp: <2019-09-25 10:04:48 taoliu>
"""Description: Fine-tuning script to call broad peaks from a single
bedGraph track for scores.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import logging
from MACS2.IO import BedGraphIO
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( options ):
info("Read and build bedGraph...")
bio = BedGraphIO.bedGraphIO(options.ifile)
btrack = bio.build_bdgtrack(baseline_value=0)
info("Call peaks from bedGraph...")
bpeaks = btrack.call_broadpeaks (lvl1_cutoff=options.cutoffpeak, lvl2_cutoff=options.cutofflink, min_length=options.minlen, lvl1_max_gap=options.lvl1maxgap, lvl2_max_gap=options.lvl2maxgap)
info("Write peaks...")
if options.ofile:
bf = open( os.path.join( options.outdir, options.ofile ), "w" )
options.oprefix = options.ofile
else:
bf = open ( os.path.join( options.outdir, "%s_c%.1f_C%.2f_l%d_g%d_G%d_broad.bed12" % (options.oprefix,options.cutoffpeak,options.cutofflink,options.minlen,options.lvl1maxgap,options.lvl2maxgap)), "w" )
bpeaks.write_to_gappedPeak(bf, name_prefix=(options.oprefix+"_broadRegion").encode(), score_column="score", trackline=options.trackline)
info("Done")
| taoliu/MACS | MACS2/bdgbroadcall_cmd.py | Python | bsd-3-clause | 2,141 |
#!/usr/bin/python
import sys, os, operator, smtplib, re
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_email(addr, subject, msg_body):
email_subject = subject
from_addr="[email protected]"
to_addr = [addr, '[email protected]']
msg = MIMEMultipart()
msg['From'] = 'Confer Team <[email protected]>'
msg['To'] = addr
msg['Subject'] = email_subject
msg.attach(MIMEText(msg_body))
smtp_conn = smtplib.SMTP_SSL('login', 25)
smtp_conn.sendmail(from_addr, to_addr, msg.as_string())
smtp_conn.close()
def send_survey_email():
f = open(sys.argv[1]).read()
names = re.split('\n', f)
subject = "Confer@CSCW 2014 -- your feedback would help us improve."
for name in names:
tokens = re.split(',', name.strip())
tokens = map(lambda x: x.strip(), tokens)
print tokens
msg_body = """
Dear %s,
Hope you enjoyed using Confer at CSCW 2014! We would love to hear from you about your experience with Confer during the conference. Please share your comments and suggestions by completing this short survey:
https://docs.google.com/forms/d/1Vuc_tQsNwFtZ4k7b_Rumcaim7MM8hSPjw7uIdL6TSm8/viewform
We value your feedback and we look forward to serve you at future conferences! If you like, you can also contact us directly at [email protected].
Best,
The Confer Team
[email protected]
""" %(tokens[1])
send_email(tokens[0], subject, msg_body)
def main():
send_survey_email()
if __name__ == '__main__':
main()
| anantb/confer | scripts/cscw2014/send_email.py | Python | mit | 1,516 |
"""
kombu.transport.filesystem
==========================
Transport using the file system as the message store.
"""
from __future__ import absolute_import
from Queue import Empty
from anyjson import loads, dumps
import os
import shutil
import time
import uuid
import tempfile
from . import virtual
from kombu.exceptions import StdConnectionError, StdChannelError
from kombu.utils import cached_property
VERSION = (1, 0, 0)
__version__ = ".".join(map(str, VERSION))
# needs win32all to work on Windows
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
# 0 is the default
LOCK_SH = 0 # noqa
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # noqa
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped)
elif os.name == 'posix':
import fcntl
from fcntl import LOCK_EX, LOCK_SH, LOCK_NB # noqa
def lock(file, flags): # noqa
fcntl.flock(file.fileno(), flags)
def unlock(file): # noqa
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
raise RuntimeError(
'Filesystem plugin only defined for NT and POSIX platforms')
class Channel(virtual.Channel):
def _put(self, queue, payload, **kwargs):
"""Put `message` onto `queue`."""
filename = '%s_%s.%s.msg' % (int(round(time.time() * 1000)),
uuid.uuid4(), queue)
filename = os.path.join(self.data_folder_out, filename)
try:
f = open(filename, 'wb')
lock(f, LOCK_EX)
f.write(dumps(payload))
except (IOError, OSError):
raise StdChannelError(
'Filename [%s] could not be placed into folder.' % filename)
finally:
unlock(f)
f.close()
def _get(self, queue):
"""Get next message from `queue`."""
queue_find = '.' + queue + '.msg'
folder = os.listdir(self.data_folder_in)
folder = sorted(folder)
while len(folder) > 0:
filename = folder.pop(0)
# only handle message for the requested queue
if filename.find(queue_find) < 0:
continue
if self.store_processed:
processed_folder = self.processed_folder
else:
processed_folder = tempfile.gettempdir()
try:
# move the file to the tmp/processed folder
shutil.move(os.path.join(self.data_folder_in, filename),
processed_folder)
except IOError:
pass # file could be locked, or removed in meantime so ignore
filename = os.path.join(processed_folder, filename)
try:
f = open(filename, 'rb')
payload = f.read()
f.close()
if not self.store_processed:
os.remove(filename)
except (IOError, OSError):
raise StdChannelError(
'Filename [%s] could not be read from queue.' % filename)
return loads(payload)
raise Empty()
def _purge(self, queue):
"""Remove all messages from `queue`."""
count = 0
queue_find = '.' + queue + '.msg'
folder = os.listdir(self.data_folder_in)
while len(folder) > 0:
filename = folder.pop()
try:
# only purge messages for the requested queue
if filename.find(queue_find) < 0:
continue
filename = os.path.join(self.data_folder_in, filename)
os.remove(filename)
count += 1
except OSError:
# we simply ignore its existence, as it was probably
# processed by another worker
pass
return count
def _size(self, queue):
"""Return the number of messages in `queue` as an :class:`int`."""
count = 0
queue_find = "." + queue + '.msg'
folder = os.listdir(self.data_folder_in)
while len(folder) > 0:
filename = folder.pop()
# only handle message for the requested queue
if filename.find(queue_find) < 0:
continue
count += 1
return count
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def data_folder_in(self):
return self.transport_options.get('data_folder_in', 'data_in')
@cached_property
def data_folder_out(self):
return self.transport_options.get('data_folder_out', 'data_out')
@cached_property
def store_processed(self):
return self.transport_options.get('store_processed', False)
@cached_property
def processed_folder(self):
return self.transport_options.get('processed_folder', 'processed')
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
connection_errors = (StdConnectionError, )
channel_errors = (StdChannelError, )
driver_type = 'filesystem'
driver_name = 'filesystem'
def driver_version(self):
return 'N/A'
| mozilla/firefox-flicks | vendor-local/lib/python/kombu/transport/filesystem.py | Python | bsd-3-clause | 5,565 |
# -*- coding: utf-8 -*-
import codecs
from distutils.core import setup
version = __import__('osmapi').__version__
try:
import pypandoc
from unidecode import unidecode
description = codecs.open('README.md', encoding='utf-8').read()
description = unidecode(description)
description = pypandoc.convert(description, 'rst', format='md')
except (IOError, ImportError):
description = 'Python wrapper for the OSM API'
setup(
name='osmapi',
packages=['osmapi'],
version=version,
description='Python wrapper for the OSM API',
long_description=description,
author='Etienne Chové',
author_email='[email protected]',
maintainer='Stefan Oderbolz',
maintainer_email='[email protected]',
url='https://github.com/metaodi/osmapi',
download_url='https://github.com/metaodi/osmapi/archive/v%s.zip' % version,
keywords=['openstreetmap', 'osm', 'api'],
license='GPLv3',
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Libraries',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| austinhartzheim/osmapi | setup.py | Python | gpl-3.0 | 1,507 |
# ------------------------------------------------------------------------------
# Name: __init__
# Purpose: Package information for h5cube
#
# Author: Brian Skinn
# [email protected]
#
# Created: 22 Aug 2016
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "LICENSE.txt" for full license terms
# and contributor agreement.
#
# http://www.github.com/bskinn/h5cube
#
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from .h5cube import cube_to_h5, h5_to_cube, H5, DEF, EXIT
__version__ = '0.3'
| bskinn/h5cube | h5cube/__init__.py | Python | mit | 652 |
from pymt import *
from pyglet.gl import *
class AlphaWindow(MTWidget):
def __init__(self, **kwargs):
super(AlphaWindow, self).__init__(**kwargs)
self.tsize = (64, 64)
self.fbo1 = Fbo(size=self.tsize)
self.fbo2 = Fbo(size=self.tsize)
self.fbo3 = Fbo(size=self.tsize)
self.need_redraw = True
self.s, self.d = 0, 0
def on_touch_down(self, touch):
self.s = int(touch.x / 64)
self.d = int(touch.y / 64)
def on_touch_move(self, touch):
self.s = int(touch.x / 64)
self.d = int(touch.y / 64)
def draw(self):
if self.need_redraw:
with self.fbo1:
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT)
set_color(1, 0, 0)
drawRectangle(size=self.tsize)
set_color(0, 0, 1)
drawLine((10, 10 , 54, 54), width=8)
with self.fbo2:
glClearColor(0, 0, 0, 0)
glClear(GL_COLOR_BUFFER_BIT)
set_color(0, 0, 0)
drawRectangle(pos=(32, 0), size=(32, 64))
set_color(0.5, 0.5, 0.5)
drawRectangle(pos=(28, 0), size=(8, 64))
with self.fbo3:
glClearColor(0, 0, 0, 0)
glClear(GL_COLOR_BUFFER_BIT)
set_color(0, 1, 0)
drawLine((10, 54, 54, 10), width=8)
set_color(0, 0, 0)
drawLine((10, 32 , 54, 32), width=8)
self.need_redraw = False
alphasrc = (GL_ZERO, GL_ONE,
GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA,
GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA,
GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR,
GL_SRC_ALPHA_SATURATE)
alphadst = (GL_ZERO, GL_ONE,
GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA,
GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA,
GL_DST_COLOR, GL_ONE_MINUS_DST_COLOR)
self.d = min(self.d, len(alphadst)-1)
self.s = min(self.s, len(alphasrc)-1)
for x in xrange(len(alphasrc)):
for y in xrange(len(alphadst)):
pos = x * 64, y * 64
set_color(1, 1, 1)
drawTexturedRectangle(
texture=self.fbo1.texture, pos=pos, size=self.tsize)
'''
set_color(1, 1, 1, 0.999, sfactor=alphasrc[self.s],
dfactor=alphadst[self.d])
#set_color(1, 1, 1, 0.999, sfactor=GL_DST_COLOR, dfactor=GL_ZERO)
drawTexturedRectangle(
texture=self.fbo2.texture, pos=pos, size=self.tsize)
'''
set_color(1, 1, 1, 0.999, sfactor=alphasrc[x], dfactor=alphadst[y])
drawTexturedRectangle(
texture=self.fbo3.texture, pos=pos, size=self.tsize)
m = MTWindow()
m.add_widget(AlphaWindow())
runTouchApp()
| nuigroup/nuipaint | tests/alphalayer-test.py | Python | gpl-3.0 | 2,967 |
"""
This inline scripts makes it possible to use mitmproxy in scenarios where IP spoofing has been used to redirect
connections to mitmproxy. The way this works is that we rely on either the TLS Server Name Indication (SNI) or the
Host header of the HTTP request.
Of course, this is not foolproof - if an HTTPS connection comes without SNI, we don't
know the actual target and cannot construct a certificate that looks valid.
Similarly, if there's no Host header or a spoofed Host header, we're out of luck as well.
Using transparent mode is the better option most of the time.
Usage:
mitmproxy
-p 80
-R http://example.com/ // Used as the target location if no Host header is present
mitmproxy
-p 443
-R https://example.com/ // Used as the target locaction if neither SNI nor host header are present.
mitmproxy will always connect to the default location first, so it must be reachable.
As a workaround, you can spawn an arbitrary HTTP server and use that for both endpoints, e.g.
mitmproxy -p 80 -R http://localhost:8000
mitmproxy -p 443 -R https2http://localhost:8000
"""
def request(context, flow):
if flow.client_conn.ssl_established:
# TLS SNI or Host header
flow.request.host = flow.client_conn.connection.get_servername(
) or flow.request.pretty_host(hostheader=True)
# If you use a https2http location as default destination, these
# attributes need to be corrected as well:
flow.request.port = 443
flow.request.scheme = "https"
else:
# Host header
flow.request.host = flow.request.pretty_host(hostheader=True)
| noikiy/mitmproxy | examples/dns_spoofing.py | Python | mit | 1,652 |
from django.apps import AppConfig
class ListsConfig(AppConfig):
name = "lists"
| syrusakbary/snapshottest | examples/django_project/lists/apps.py | Python | mit | 85 |
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import espressomd
from espressomd.galilei import GalileiTransform
BOX_L = np.array([10, 20, 30])
N_PART = 500
class Galilei(ut.TestCase):
system = espressomd.System(box_l=BOX_L)
def setUp(self):
self.system.part.add(pos=BOX_L * np.random.random((N_PART, 3)),
v=-5. + 10. * np.random.random((N_PART, 3)),
f=np.random.random((N_PART, 3)))
if espressomd.has_features("MASS"):
self.system.part[:].mass = 42. * np.random.random((N_PART,))
def tearDown(self):
self.system.part.clear()
def test_kill_particle_motion(self):
g = GalileiTransform()
g.kill_particle_motion()
np.testing.assert_array_equal(
np.copy(self.system.part[:].v), np.zeros((N_PART, 3)))
def test_kill_particle_forces(self):
g = GalileiTransform()
g.kill_particle_forces()
np.testing.assert_array_equal(
np.copy(self.system.part[:].f), np.zeros((N_PART, 3)))
def test_cms(self):
parts = self.system.part[:]
g = GalileiTransform()
total_mass = np.sum(parts.mass)
com = np.sum(
np.multiply(parts.mass.reshape((N_PART, 1)), parts.pos), axis=0) / total_mass
np.testing.assert_allclose(np.copy(g.system_CMS()), com)
def test_cms_velocity(self):
parts = self.system.part[:]
g = GalileiTransform()
total_mass = np.sum(parts.mass)
com_v = np.sum(
np.multiply(parts.mass.reshape((N_PART, 1)), parts.v), axis=0) / total_mass
np.testing.assert_allclose(np.copy(g.system_CMS_velocity()), com_v)
def test_galilei_transform(self):
g = GalileiTransform()
g.galilei_transform()
np.testing.assert_allclose(
np.copy(g.system_CMS_velocity()), np.zeros((3,)), atol=1e-15)
if __name__ == "__main__":
ut.main()
| mkuron/espresso | testsuite/python/galilei.py | Python | gpl-3.0 | 2,661 |
from jmessage import users
from jmessage import common
from conf import *
import time
import json
jmessage=common.JMessage(app_key,master_secret)
groups=jmessage.create_groups()
response=groups.get_groups_list("1","2")
time.sleep(2)
print (response.content) | jpush/jmessage-api-python-client | example/groups/get_groups_list.py | Python | mit | 260 |
import os
import unittest
from distutils.version import StrictVersion
from walrus import Database
HOST = os.environ.get('WALRUS_REDIS_HOST') or '127.0.0.1'
PORT = os.environ.get('WALRUS_REDIS_PORT') or 6379
db = Database(host=HOST, port=PORT, db=15)
REDIS_VERSION = None
def requires_version(min_version):
def decorator(fn):
global REDIS_VERSION
if REDIS_VERSION is None:
REDIS_VERSION = db.info()['redis_version']
too_old = StrictVersion(REDIS_VERSION) < StrictVersion(min_version)
return unittest.skipIf(too_old,
'redis too old, requires %s' % min_version)(fn)
return decorator
def stream_test(fn):
test_stream = os.environ.get('TEST_STREAM')
if not test_stream:
return requires_version('4.9.101')(fn)
else:
return unittest.skipIf(not test_stream, 'skipping stream tests')(fn)
def zpop_test(fn):
test_zpop = os.environ.get('TEST_ZPOP')
if not test_zpop:
return requires_version('4.9.101')(fn)
else:
return unittest.skipIf(not test_zpop, 'skipping zpop* tests')(fn)
class WalrusTestCase(unittest.TestCase):
def setUp(self):
db.flushdb()
db._transaction_local.pipes = []
def tearDown(self):
db.flushdb()
db._transaction_local.pipes = []
def assertList(self, values, expected):
values = list(values)
self.assertEqual(len(values), len(expected))
for value, item in zip(values, expected):
self.assertEqual(value, item)
| coleifer/walrus | walrus/tests/base.py | Python | mit | 1,551 |
from model_utils import Choices
STATUS = Choices('active', 'inactive', 'deleted')
DEFAULT_STATUS = Choices('active', 'pending')
| nagyistoce/geokey | geokey/categories/base.py | Python | apache-2.0 | 129 |
class AppConfHelper(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(AppConfHelper, cls).__new__(cls)
cls.instance._appconf = None
return cls.instance
def initialize(self, config):
self._appconf = config
def find_replacement(self, key):
parts = key.split(':')
obj = self._appconf
for x in range(0, len(parts)):
part = parts[x]
if part in obj:
if x < len(parts):
obj = obj[part]
else:
obj = 'missing key {0}'.format(key)
break
return obj
| aspyatkin/assetoolz | assetoolz/appconf.py | Python | mit | 669 |
# -*- coding: utf-8 -*-
#
# mini-dinstall-ng documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 3 18:06:14 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mini-dinstall-ng'
copyright = u'2014, c0ff3m4kr'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mini-dinstall-ngdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mini-dinstall-ng.tex', u'mini-dinstall-ng Documentation',
u'c0ff3m4kr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mini-dinstall-ng', u'mini-dinstall-ng Documentation',
[u'c0ff3m4kr'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mini-dinstall-ng', u'mini-dinstall-ng Documentation',
u'c0ff3m4kr', 'mini-dinstall-ng', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| coffeemakr/mini-dinstall-ng | doc/conf.py | Python | gpl-3.0 | 8,442 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Post-processing of raw result."""
import numpy as np
from qiskit.exceptions import QiskitError
def _hex_to_bin(hexstring):
"""Convert hexadecimal readouts (memory) to binary readouts."""
return str(bin(int(hexstring, 16)))[2:]
def _pad_zeros(bitstring, memory_slots):
"""If the bitstring is truncated, pad extra zeros to make its
length equal to memory_slots"""
return format(int(bitstring, 2), '0{}b'.format(memory_slots))
def _separate_bitstring(bitstring, creg_sizes):
"""Separate a bitstring according to the registers defined in the result header."""
substrings = []
running_index = 0
for _, size in reversed(creg_sizes):
substrings.append(bitstring[running_index: running_index + size])
running_index += size
return ' '.join(substrings)
def format_counts_memory(shot_memory, header=None):
"""
Format a single bitstring (memory) from a single shot experiment.
- The hexadecimals are expanded to bitstrings
- Spaces are inserted at register divisions.
Args:
shot_memory (str): result of a single experiment.
header (dict): the experiment header dictionary containing
useful information for postprocessing. creg_sizes
are a nested list where the inner element is a list
of creg name, creg size pairs. memory_slots is an integers
specifying the number of total memory_slots in the experiment.
Returns:
dict: a formatted memory
"""
if shot_memory.startswith('0x'):
shot_memory = _hex_to_bin(shot_memory)
if header:
creg_sizes = header.get('creg_sizes', None)
memory_slots = header.get('memory_slots', None)
if memory_slots:
shot_memory = _pad_zeros(shot_memory, memory_slots)
if creg_sizes and memory_slots:
shot_memory = _separate_bitstring(shot_memory, creg_sizes)
return shot_memory
def _list_to_complex_array(complex_list):
"""Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy array
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
"""
arr = np.asarray(complex_list, dtype=np.complex_)
if not arr.shape[-1] == 2:
raise QiskitError('Inner most nested list is not of length 2.')
return arr[..., 0] + 1j*arr[..., 1]
def format_level_0_memory(memory):
""" Format an experiment result memory object for measurement level 0.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 0 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 2 (avg) or 3 (single)
indices.
"""
formatted_memory = _list_to_complex_array(memory)
# infer meas_return from shape of returned data.
if not 2 <= len(formatted_memory.shape) <= 3:
raise QiskitError('Level zero memory is not of correct shape.')
return formatted_memory
def format_level_1_memory(memory):
""" Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indices.
"""
formatted_memory = _list_to_complex_array(memory)
# infer meas_return from shape of returned data.
if not 1 <= len(formatted_memory.shape) <= 2:
raise QiskitError('Level one memory is not of correct shape.')
return formatted_memory
def format_level_2_memory(memory, header=None):
""" Format an experiment result memory object for measurement level 2.
Args:
memory (list): Memory from experiment with `meas_level==2` and `memory==True`.
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
list[str]: List of bitstrings
"""
memory_list = []
for shot_memory in memory:
memory_list.append(format_counts_memory(shot_memory, header))
return memory_list
def format_counts(counts, header=None):
"""Format a single experiment result coming from backend to present
to the Qiskit user.
Args:
counts (dict): counts histogram of multiple shots
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
dict: a formatted counts
"""
counts_dict = {}
for key, val in counts.items():
key = format_counts_memory(key, header)
counts_dict[key] = val
return counts_dict
def format_statevector(vec, decimals=None):
"""Format statevector coming from the backend to present to the Qiskit user.
Args:
vec (list): a list of [re, im] complex numbers.
decimals (int): the number of decimals in the statevector.
If None, no rounding is done.
Returns:
list[complex]: a list of python complex numbers.
"""
num_basis = len(vec)
vec_complex = np.zeros(num_basis, dtype=complex)
for i in range(num_basis):
vec_complex[i] = vec[i][0] + 1j * vec[i][1]
if decimals:
vec_complex = np.around(vec_complex, decimals=decimals)
return vec_complex
def format_unitary(mat, decimals=None):
"""Format unitary coming from the backend to present to the Qiskit user.
Args:
mat (list[list]): a list of list of [re, im] complex numbers
decimals (int): the number of decimals in the statevector.
If None, no rounding is done.
Returns:
list[list[complex]]: a matrix of complex numbers
"""
num_basis = len(mat)
mat_complex = np.zeros((num_basis, num_basis), dtype=complex)
for i, vec in enumerate(mat):
mat_complex[i] = format_statevector(vec, decimals)
return mat_complex
| QISKit/qiskit-sdk-py | qiskit/result/postprocess.py | Python | apache-2.0 | 6,761 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This a distutils script for creating distribution archives.
#
# Copyright (C) 2010 Kamil Páral <kamil.paral /at/ gmail /dot/ com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
setup(name='jabber-roster',
version='0.1.1',
py_modules=['jabber_roster'],
entry_points = {
'console_scripts': ['jabber-roster = jabber_roster:main'],
},
install_requires=['xmpppy'],
author='Kamil Paral',
author_email='[email protected]',
description='Tool for listing Jabber roster contacts',
long_description='A simple Python tool for listing your Jabber roster contacts. You can use it to easily backup list of your buddies.',
keywords='Jabber XMPP roster contacts commandline',
license='GNU Affero GPL v3',
url='https://github.com/kparal/jabber-roster',
download_url='https://github.com/kparal/jabber-roster/downloads',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Topic :: Utilities'
])
| kparal/jabber-roster | setup.py | Python | agpl-3.0 | 2,183 |
# -*- encoding: utf-8 -*-
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import sys
try:
from ast import PyCF_ONLY_AST
except ImportError:
PyCF_ONLY_AST = 1024
from setuptools import setup
def get_version():
return [compile(line, '', 'exec', PyCF_ONLY_AST).body[0].value.s
for line in open('preflight/__init__.py')
if line.startswith('__version__')][0]
tests_require = [
'mock > 0.6',
'gargoyle >= 0.6.0',
'pyquery',
]
if sys.version_info[:2] < (2, 7):
tests_require.append('unittest2')
setup(
name='django-preflight',
version=get_version(),
author='Lukasz Czyzykowski',
author_email='[email protected]',
description="Create a page for making sure all settings are correct.",
long_description=open('README').read(),
url='https://launchpad.net/django-preflight',
download_url='https://launchpad.net/django-preflight/+download',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Software Development :: Quality Assurance",
],
license='AGPL-3',
packages=(
'preflight',
'preflight.management',
'preflight.management.commands',
),
package_data={
'preflight': ['templates/preflight/*.html'],
},
install_requires=[
'django >= 1.1',
],
tests_require=tests_require,
extras_require={
'docs': ['Sphinx'],
},
test_suite='preflight_example_project.run.tests',
)
| miing/mci_migo_packages_django-preflight | setup.py | Python | agpl-3.0 | 1,975 |
'''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_false, assert_true, assert_is_instance, \
assert_equal, assert_greater, assert_in, assert_list_equal
import os
import tempfile
import shutil
import sys
import nideep.iow.file_system_utils as fs
from nideep.eval.log_utils import is_caffe_info_log
CURRENT_MODULE_PATH = os.path.abspath(sys.modules[__name__].__file__)
ROOT_PKG_PATH = os.path.dirname(CURRENT_MODULE_PATH)
TEST_DATA_DIRNAME = 'test_data'
def test_filter_is_img():
assert_false(fs.filter_is_img('foo.bar'))
assert_false(fs.filter_is_img('foo.png.bar'))
assert_true(fs.filter_is_img('foo.png'))
assert_true(fs.filter_is_img('foo.jpg'))
def test_filter_is_h5():
assert_false(fs.filter_is_h5('foo.bar'))
assert_false(fs.filter_is_h5('foo.h5.bar'))
assert_false(fs.filter_is_h5('foo.hdf'))
assert_false(fs.filter_is_h5('foo.h4'))
assert_false(fs.filter_is_h5('foo.hdf4'))
assert_true(fs.filter_is_h5('foo.h5'))
assert_true(fs.filter_is_h5('foo.hdf5'))
class TestFSUtils:
@classmethod
def setup_class(self):
self.dir_ = os.path.join(os.path.dirname(ROOT_PKG_PATH),
TEST_DATA_DIRNAME)
def test_gen_paths_no_filter(self):
flist = fs.gen_paths(self.dir_)
assert_is_instance(flist, list)
assert_greater(len(flist), 0)
def test_gen_paths_is_caffe_log(self):
flist = fs.gen_paths(self.dir_, is_caffe_info_log)
assert_is_instance(flist, list)
assert_equal(len(flist), 1)
assert_true('.log.' in flist[0] and '.INFO.' in flist[0])
def test_gen_paths_no_imgs_found(self):
flist = fs.gen_paths(self.dir_, fs.filter_is_img)
assert_equal(len(flist), 0)
def test_hash_file(self):
p = fs.gen_paths(self.dir_, is_caffe_info_log)[0]
h = fs.hashfile(p)
assert_is_instance(h, str)
assert_greater(len(h), 0)
class TestFNamePairs:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
self.dir_ = os.path.join(os.path.dirname(ROOT_PKG_PATH),
TEST_DATA_DIRNAME)
shutil.copytree(self.dir_, os.path.join(self.dir_tmp, 'subdir'))
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_fname_pairs(self):
a = ['foo1_a.txt', os.path.join('foo', 'bar_x.txt'), 'foo5.txt']
b = [os.path.join('oof', 'bar_x.txt'), 'foo5_b.txt', 'foo2_b.txt']
pairs = fs.fname_pairs(a, b)
for x, y in pairs:
assert_in(x, a)
assert_in(y, b)
assert_list_equal(pairs, [[os.path.join('foo', 'bar_x.txt'),
os.path.join('oof', 'bar_x.txt')],
['foo5.txt', 'foo5_b.txt'],
])
def test_fname_pairs_log_files(self):
a = fs.gen_paths(self.dir_, is_caffe_info_log)
b = fs.gen_paths(self.dir_tmp, is_caffe_info_log)
pairs = fs.fname_pairs(a, b)
for x, y in pairs:
assert_in(x, a)
assert_in(y, b)
| kashefy/caffe_sandbox | nideep/iow/test_file_system_utils.py | Python | bsd-2-clause | 3,184 |
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import prelu
fX = theano.config.floatX
def test_prelu_node_serialization():
tn.check_serialization(prelu.PReLUNode("a"))
def test_prelu_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 4)),
prelu.PReLUNode("p")]).network()
fn = network.function(["i"], ["p"])
x = np.array([[-1.0, -0.2, 0.2, 1.0]], dtype=fX)
ans = np.array([[-0.25, -0.05, 0.2, 1.0]], dtype=fX)
np.testing.assert_allclose(fn(x)[0],
ans)
| diogo149/treeano | treeano/sandbox/nodes/tests/prelu_test.py | Python | apache-2.0 | 666 |
'''Script used to test bucketlist response and request. '''
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from django.core.urlresolvers import reverse_lazy
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
class ApiHeaderAuthorization(APITestCase):
'''Base class used to Attach header to all request on setup.'''
fixtures = ['initial_fix']
def setUp(self):
#Include an appropriate `Authorization:` header on all requests.
token = Token.objects.get(user__username='samuel')
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
class ApiUserBucketlist(ApiHeaderAuthorization):
def test_user_can_view_bucketlist(self):
url= reverse_lazy('apibucketlist')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_user_can_create(self):
data = {'name': 'my bucketlist'}
url= reverse_lazy('apibucketlist')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_create_error(self):
data = {'': ''}
url= reverse_lazy('apibucketlist')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ApiUserNoBucketlist(APITestCase):
def setUp(self):
user = User.objects.create_user('lauren',
'[email protected]', '12345')
self.client = APIClient()
self.client.login(username='lauren', password='12345')
def test_user_has_no_bucketlist(self):
url= reverse_lazy('apibucketlist')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class ApiUserBucketlistView(ApiHeaderAuthorization):
def test_getbucketlistby_id(self):
url= reverse_lazy('bucketlistdetail', kwargs={'pk':19})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_updatebucketlistby_id(self):
data = {'name': 'my updated bucketlist'}
url= reverse_lazy('bucketlistdetail', kwargs={'pk':19})
response = self.client.put(url, data)
self.assertEqual(response.status_code, 200)
wrongdata = {'': ''}
url= reverse_lazy('bucketlistdetail', kwargs={'pk':19})
response = self.client.put(url, wrongdata)
self.assertEqual(response.status_code, 200)
def test_deletedataby_id(self):
url= reverse_lazy('bucketlistdetail', kwargs={'pk':19})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | andela-sjames/django-bucketlist-application | bucketlistapp/bucketlistapi/tests/test_bucketlist.py | Python | gpl-3.0 | 2,836 |
#
# This file is a part of KNOSSOS.
#
# (C) Copyright 2007-2011
# Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V.
#
# KNOSSOS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 of
# the License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# For further information, visit http://www.knossostool.org or contact
# [email protected] or
# [email protected]
#
import math, re, glob, os
class KnossosConfig():
def __init__(self):
self.xReg = re.compile('_x(\d*)')
self.yReg = re.compile('_y(\d*)')
self.zReg = re.compile('_z(\d*)')
self.magReg = re.compile('_mag(\d*)([\\/]$|$)')
self.expNameReg = re.compile(r'([^\\/]*)_x\d{4}_y\d{4}_z\d{4}.raw')
self.namePattern = re.compile('experiment name \"(.*)\";')
self.scaleXPattern = re.compile('scale x (.*\..*);')
self.scaleYPattern = re.compile('scale y (.*\..*);')
self.scaleZPattern = re.compile('scale z (.*\..*);')
self.boundaryXPattern = re.compile('boundary x (.*);')
self.boundaryYPattern = re.compile('boundary y (.*);')
self.boundaryZPattern = re.compile('boundary z (.*);')
self.magnificationPattern = re.compile('magnification (.*);')
def generate(self, path):
# try to read magnification factor from directory name
try:
magnification = self.magReg.search(path).groups()[0]
except AttributeError:
magnification = None
# read files in current directory
files = glob.glob(str(path) + "/x????/y????/z????/*.raw");
try:
filesize = float(os.stat(files[0]).st_size)
except (IndexError, OSError):
raise DataError("Error determining file size")
return
edgeLength = int(round(math.pow(filesize, 1. / 3.), 0))
try:
name = self.expNameReg.search(files[0]).groups()[0]
except AttributeError:
raise DataError("Error matching file name")
return
xlen_datacubes, ylen_datacubes, zlen_datacubes = 0, 0, 0
for file in files:
try:
x = int(self.xReg.search(file).groups()[0])
y = int(self.yReg.search(file).groups()[0])
z = int(self.zReg.search(file).groups()[0])
except AttributeError:
raise DataError("Error matching file name")
return
if x > xlen_datacubes and x < 9999:
xlen_datacubes = x
if y > ylen_datacubes and y < 9999:
ylen_datacubes = y
if z > zlen_datacubes and z < 9999:
zlen_datacubes = z
xlen_px = (xlen_datacubes + 1) * edgeLength
ylen_px = (ylen_datacubes + 1) * edgeLength
zlen_px = (zlen_datacubes + 1) * edgeLength
return name, (xlen_px, ylen_px, zlen_px), magnification
def read(self, path):
kconfigpath = os.path.abspath(path + "/knossos.conf")
try:
kFile = open(kconfigpath)
except IOError:
try:
name, boundaries, magnification = self.generate(path)
except DataError:
raise
return
configInfo = [True,
name,
path,
(None, None, None),
boundaries,
magnification,
""]
return configInfo
else:
configText = kFile.read()
kFile.close()
namePatternResult = self.namePattern.search(configText)
scaleXPatternResult = self.scaleXPattern.search(configText)
scaleYPatternResult = self.scaleYPattern.search(configText)
scaleZPatternResult = self.scaleZPattern.search(configText)
boundaryXPatternResult = self.boundaryXPattern.search(configText)
boundaryYPatternResult = self.boundaryYPattern.search(configText)
boundaryZPatternResult = self.boundaryZPattern.search(configText)
magnificationPatternResult = self.magnificationPattern.search(configText)
try:
name = namePatternResult.groups()[0]
except (AttributeError, ValueError):
name = ""
try:
scaleX = float(scaleXPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleX = None
try:
scaleY = float(scaleYPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleY = None
try:
scaleZ = float(scaleZPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleZ = None
try:
boundaryX = int(boundaryXPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryX = 0
try:
boundaryY = int(boundaryYPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryY = 0
try:
boundaryZ = int(boundaryZPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryZ = 0
try:
magnification = int(magnificationPatternResult.groups()[0])
except (AttributeError, ValueError):
magnification = None
# [is incomplete?, name, path, scales, boundary, magnification, original config file contents]
configInfo = [False,
name,
path,
(scaleX, scaleY, scaleZ),
(boundaryX, boundaryY, boundaryZ),
magnification,
configText]
return configInfo
def write(self, configInfo):
try:
source = configInfo["Source"]
name = configInfo["Name"]
scales = configInfo["Scales"]
boundaries = configInfo["Boundaries"]
path = configInfo["Path"]
magnification = configInfo["Magnification"]
except KeyError:
return False
if self.namePattern.search(source):
source = self.namePattern.sub("experiment name \"%s\";" % name, source)
else:
source = source + "\nexperiment name \"%s\";" % name
if self.scaleXPattern.search(source):
source = self.scaleXPattern.sub("scale x %s;" % str(float(scales[0])), source)
else:
source = source + "\nscale x %s;" % str(float(scales[0]))
if self.scaleYPattern.search(source):
source = self.scaleYPattern.sub("scale y %s;" % str(float(scales[1])), source)
else:
source = source + "\nscale y %s;" % str(float(scales[1]))
if self.scaleZPattern.search(source):
source = self.scaleZPattern.sub("scale z %s;" % str(float(scales[2])), source)
else:
source = source + "\nscale z %s;" % str(float(scales[2]))
if self.boundaryXPattern.search(source):
source = self.boundaryXPattern.sub("boundary x %d;" % boundaries[0], source)
else:
source = source + "\nboundary x %d;" % boundaries[0]
if self.boundaryYPattern.search(source):
source = self.boundaryYPattern.sub("boundary y %d;" % boundaries[1], source)
else:
source = source + "\nboundary y %d;" % boundaries[1]
if self.boundaryZPattern.search(source):
source = self.boundaryZPattern.sub("boundary z %d;" % boundaries[2], source)
else:
source = source + "\nboundary z %d;" % boundaries[2]
if self.magnificationPattern.search(source):
source = self.magnificationPattern.sub("magnification %d;\n" % magnification, source)
else:
source = source + "\nmagnification %d;" % magnification
confpath = os.path.abspath(path + "/knossos.conf")
try:
kFile = open(confpath, "w")
kFile.write(source)
kFile.close()
except IOError:
return False
def check(self, config):
try:
if config["Name"] is not "" \
and config["Path"] is not "" \
and config["Scales"][0] \
and config["Scales"][1] \
and config["Scales"][2] \
and config["Magnification"]:
return False
else:
return True
except KeyError:
return True
class DataError(Exception):
def __init__(self, errorstring):
self.errorstring = errorstring
def __str__(self):
return repr(self.errorstring)
| thorbenk/knossos-svn | tools/kconfig.py | Python | gpl-2.0 | 9,251 |
import time
import os
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("app-path=" + os.path.dirname(os.path.abspath(__file__)))
chrome_options.add_argument("ABC")
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
time.sleep(5)
assert(driver.find_element_by_id("success"))
except exceptions.WebDriverException:
assert(0)
finally:
driver.quit() | pdx1989/nw.js | test/remoting/nw-custom/rc4-lowercase-cmd-param(win)/test.py | Python | mit | 550 |
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.module_loading import import_string
import boto
from boto.s3.connection import OrdinaryCallingFormat
from celery.task import task
from import_export.admin import ExportMixin
from import_export.forms import ExportForm
ADMIN_EXPORT_TIMEOUT = 10 * 60
@task(soft_time_limit=ADMIN_EXPORT_TIMEOUT)
def async_data_export(file_format, values_list, qs_model, filename):
"""Task to export data from admin site and store it to S3."""
from django.contrib import admin
admin_obj = admin.site._registry[qs_model]
queryset = qs_model.objects.filter(id__in=values_list)
resource_class = admin_obj.get_export_resource_class()
data = resource_class().export(queryset)
export_data = file_format.export_data(data)
# Store file to AWS S3
kwargs = {
'aws_access_key_id': settings.AWS_ACCESS_KEY_ID,
'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY,
# Required to avoid ssl issues when bucket contains dots
'calling_format': OrdinaryCallingFormat()
}
conn = boto.connect_s3(**kwargs)
bucket = conn.get_bucket(settings.MOZILLIANS_ADMIN_BUCKET)
key = bucket.new_key(filename)
key.set_contents_from_string(export_data)
class S3ExportMixin(ExportMixin):
def get_export_filename(self, file_format):
query_str = self.request.GET.urlencode().replace('=', '_')
if query_str == '':
query_str = 'All'
date_str = datetime.now().strftime('%Y-%m-%d-%H:%m:%s')
filename = '{model}-{filter}-{date}.{extension}'.format(
model=self.model.__name__, filter=query_str, date=date_str,
extension=file_format.get_extension())
return filename
def get_export_data(self, file_format, queryset):
"""Returns the id from the celery task spawned to export data to S3."""
kwargs = {
'file_format': file_format,
'values_list': list(queryset.values_list('id', flat=True)),
'qs_model': queryset.model,
'filename': self.get_export_filename(file_format)
}
return async_data_export.delay(**kwargs)
def export_action(self, request, *args, **kwargs):
self.request = request
formats = self.get_export_formats()
form = ExportForm(formats, request.POST or None)
if form.is_valid():
file_format = formats[int(form.cleaned_data['file_format'])]()
queryset = self.get_export_queryset(request)
task_id = self.get_export_data(file_format, queryset)
filename = self.get_export_filename(file_format)
msg = 'Data export task spawned with id: {} and filename: {}'.format(task_id, filename)
messages.info(request, msg)
return redirect('admin:index')
context = {}
context.update(self.admin_site.each_context(request))
context['form'] = form
context['opts'] = self.model._meta
request.current_app = self.admin_site.name
return TemplateResponse(request, [self.export_template_name], context)
# Allow configuring admin export mixin in project settings in case we need to fallback to default
MozilliansAdminExportMixin = import_string(settings.ADMIN_EXPORT_MIXIN)
| fxa90id/mozillians | mozillians/common/mixins.py | Python | bsd-3-clause | 3,443 |
import os
import yaml
from twisted.internet.defer import inlineCallbacks
from juju.environment import environment
from juju.environment.config import EnvironmentsConfig, SAMPLE_CONFIG
from juju.environment.environment import Environment
from juju.environment.errors import EnvironmentsConfigError
from juju.errors import FileNotFound, FileAlreadyExists
from juju.state.environment import EnvironmentStateManager
from juju.lib.testing import TestCase
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
SAMPLE_ENV = """
environments:
myfirstenv:
type: dummy
foo: bar
mysecondenv:
type: dummy
nofoo: 1
"""
SAMPLE_ORCHESTRA = """
environments:
sample:
type: orchestra
orchestra-server: somewhe.re
orchestra-user: user
orchestra-pass: pass
admin-secret: garden
acquired-mgmt-class: acquired
available-mgmt-class: available
storage-url: http://somewhereel.se
default-series: oneiric
"""
SAMPLE_MAAS = """
environments:
sample:
type: maas
maas-server: somewhe.re
maas-oauth: foo:bar:baz
admin-secret: garden
default-series: precise
"""
SAMPLE_LOCAL = """
ensemble: environments
environments:
sample:
type: local
admin-secret: sekret
default-series: oneiric
"""
class EnvironmentsConfigTestBase(TestCase):
@inlineCallbacks
def setUp(self):
yield super(EnvironmentsConfigTestBase, self).setUp()
release_path = os.path.join(DATA_DIR, "lsb-release")
self.patch(environment, "LSB_RELEASE_PATH", release_path)
self.old_home = os.environ.get("HOME")
self.tmp_home = self.makeDir()
self.change_environment(HOME=self.tmp_home, PATH=os.environ["PATH"])
self.default_path = os.path.join(self.tmp_home,
".juju/environments.yaml")
self.other_path = os.path.join(self.tmp_home,
".juju/other-environments.yaml")
self.config = EnvironmentsConfig()
def write_config(self, config_text, other_path=False):
if other_path:
path = self.other_path
else:
path = self.default_path
parent_name = os.path.dirname(path)
if not os.path.exists(parent_name):
os.makedirs(parent_name)
with open(path, "w") as file:
file.write(config_text)
# The following methods expect to be called *after* a subclass has set
# self.client.
def push_config(self, name, config):
self.write_config(yaml.dump(config))
self.config.load()
esm = EnvironmentStateManager(self.client)
return esm.set_config_state(self.config, name)
@inlineCallbacks
def push_env_constraints(self, *constraint_strs):
esm = EnvironmentStateManager(self.client)
constraint_set = yield esm.get_constraint_set()
yield esm.set_constraints(constraint_set.parse(constraint_strs))
@inlineCallbacks
def push_default_config(self, with_constraints=True):
config = {
"environments": {"firstenv": {
"type": "dummy", "storage-directory": self.makeDir()}}}
yield self.push_config("firstenv", config)
if with_constraints:
yield self.push_env_constraints()
class EnvironmentsConfigTest(EnvironmentsConfigTestBase):
def test_get_default_path(self):
self.assertEquals(self.config.get_default_path(), self.default_path)
def compare_config(self, config1, sample_config2):
config1 = yaml.load(config1)
config2 = yaml.load(
sample_config2 % config1["environments"]["sample"])
self.assertEqual(config1, config2)
def setup_ec2_credentials(self):
self.change_environment(
AWS_ACCESS_KEY_ID="foobar",
AWS_SECRET_ACCESS_KEY="secrat")
def test_load_with_nonexistent_default_path(self):
"""
Raise an error if load() is called without a path and the
default doesn't exist.
"""
try:
self.config.load()
except FileNotFound, error:
self.assertEquals(error.path, self.default_path)
else:
self.fail("FileNotFound not raised")
def test_load_with_nonexistent_custom_path(self):
"""
Raise an error if load() is called with non-existing path.
"""
path = "/non/existent/custom/path"
try:
self.config.load(path)
except FileNotFound, error:
self.assertEquals(error.path, path)
else:
self.fail("FileNotFound not raised")
def test_write_sample_environment_default_path(self):
"""
write_sample() should write a pre-defined sample configuration file.
"""
self.config.write_sample()
self.assertTrue(os.path.isfile(self.default_path))
with open(self.default_path) as file:
self.compare_config(file.read(), SAMPLE_CONFIG)
dir_path = os.path.dirname(self.default_path)
dir_stat = os.stat(dir_path)
self.assertEqual(dir_stat.st_mode & 0777, 0700)
stat = os.stat(self.default_path)
self.assertEqual(stat.st_mode & 0777, 0600)
def test_write_sample_contains_secret_key_and_control_bucket(self):
"""
write_sample() should write a pre-defined sample with an ec2 machine
provider type, a unique s3 control bucket, and an admin secret key.
"""
uuid_factory = self.mocker.replace("uuid.uuid4")
uuid_factory().hex
self.mocker.result("abc")
uuid_factory().hex
self.mocker.result("xyz")
self.mocker.replay()
self.config.write_sample()
self.assertTrue(os.path.isfile(self.default_path))
with open(self.default_path) as file:
config = yaml.load(file.read())
self.assertEqual(
config["environments"]["sample"]["type"], "ec2")
self.assertEqual(
config["environments"]["sample"]["control-bucket"],
"juju-abc")
self.assertEqual(
config["environments"]["sample"]["admin-secret"],
"xyz")
def test_write_sample_environment_with_default_path_and_existing_dir(self):
"""
write_sample() should not fail if the config directory already exists.
"""
os.mkdir(os.path.dirname(self.default_path))
self.config.write_sample()
self.assertTrue(os.path.isfile(self.default_path))
with open(self.default_path) as file:
self.compare_config(file.read(), SAMPLE_CONFIG)
def test_write_sample_environment_with_custom_path(self):
"""
write_sample() may receive an argument with a custom path.
"""
path = os.path.join(self.tmp_home, "sample-file")
self.config.write_sample(path)
self.assertTrue(os.path.isfile(path))
with open(path) as file:
self.compare_config(file.read(), SAMPLE_CONFIG)
def test_write_sample_wont_overwrite_existing_configuration(self):
"""
write_sample() must never overwrite an existing file.
"""
path = self.other_path
os.makedirs(os.path.dirname(path))
with open(path, "w") as file:
file.write("previous content")
try:
self.config.write_sample(path)
except FileAlreadyExists, error:
self.assertEquals(error.path, path)
else:
self.fail("FileAlreadyExists not raised")
def test_load_empty_environments(self):
"""
load() must raise an error if there are no enviroments defined
in the configuration file.
"""
# Use a different path to ensure the error message is right.
self.write_config("""
environments:
""", other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"Environments configuration error: %s: "
"environments: expected dict, got None"
% self.other_path)
else:
self.fail("EnvironmentsConfigError not raised")
def test_load_environments_with_wrong_type(self):
"""
load() must raise an error if the "environments:" option in
the YAML configuration file doesn't have a mapping underneath it.
"""
# Use a different path to ensure the error message is right.
self.write_config("""
environments:
- list
""", other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"Environments configuration error: %s: "
"environments: expected dict, got ['list']"
% self.other_path)
else:
self.fail("EnvironmentsConfigError not raised")
def test_wb_parse(self):
"""
We'll have an exception, and use mocker here to test the
implementation itself, because we don't want to repeat the
same tests for both parse() and load(), so we'll just verify
that one calls the other internally.
"""
mock = self.mocker.patch(self.config)
mock.parse(SAMPLE_ENV, self.other_path)
self.write_config(SAMPLE_ENV, other_path=True)
self.mocker.replay()
self.config.load(self.other_path)
def test_parse_errors_without_filename(self):
"""
parse() may receive None as the file path, in which case the
error should not mention it.
"""
# Use a different path to ensure the error message is right.
try:
self.config.parse("""
environments:
""")
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"Environments configuration error: "
"environments: expected dict, got None")
else:
self.fail("EnvironmentsConfigError not raised")
def test_get_environment_names(self):
"""
get_names() should return of the environments names contained
in the configuration file.
"""
self.write_config(SAMPLE_ENV)
self.config.load()
self.assertEquals(self.config.get_names(),
["myfirstenv", "mysecondenv"])
def test_get_non_existing_environment(self):
"""
Trying to get() a non-existing configuration name should return None.
"""
self.config.parse(SAMPLE_ENV)
self.assertEquals(self.config.get("non-existing"), None)
def test_load_and_get_environment(self):
"""
get() should return an Environment instance.
"""
self.write_config(SAMPLE_ENV)
self.config.load()
self.assertEquals(type(self.config.get("myfirstenv")), Environment)
def test_load_or_write_sample_with_non_existent_config(self):
"""
When an environment configuration does not exist, the method
load_or_write_sample() must write down a sample configuration
file, and raise an error to let the user know his request did
not work, and he should edit this file.
"""
try:
self.config.load_or_write_sample()
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"No environments configured. Please edit: %s" %
self.default_path)
self.assertEquals(error.sample_written, True)
with open(self.default_path) as file:
self.compare_config(file.read(), SAMPLE_CONFIG)
else:
self.fail("EnvironmentsConfigError not raised")
def test_environment_config_error_sample_written_defaults_to_false(self):
"""
The error raised by load_or_write_sample() has a flag to let the
calling site know if a sample file was actually written down or not.
It must default to false, naturally.
"""
error = EnvironmentsConfigError("message")
self.assertFalse(error.sample_written)
def test_load_or_write_sample_will_load(self):
"""
load_or_write_sample() must load the configuration file if it exists.
"""
self.write_config(SAMPLE_ENV)
self.config.load_or_write_sample()
self.assertTrue(self.config.get("myfirstenv"))
def test_get_default_with_single_environment(self):
"""
get_default() must return the one defined environment, when it's
indeed a single one.
"""
config = yaml.load(SAMPLE_ENV)
del config["environments"]["mysecondenv"]
self.write_config(yaml.dump(config))
self.config.load()
env = self.config.get_default()
self.assertEquals(env.name, "myfirstenv")
def test_get_default_with_named_default(self):
"""
get_default() must otherwise return the environment named
through the "default:" option.
"""
config = yaml.load(SAMPLE_ENV)
config["default"] = "mysecondenv"
self.write_config(yaml.dump(config))
self.config.load()
env = self.config.get_default()
self.assertEquals(env.name, "mysecondenv")
def test_default_is_schema_protected(self):
"""
The schema should mention the "default:" option as a string.
"""
config = yaml.load(SAMPLE_ENV)
config["default"] = 1
self.write_config(yaml.dump(config))
error = self.assertRaises(EnvironmentsConfigError, self.config.load)
self.assertEquals(
str(error),
"Environments configuration error: %s: "
"default: expected string, got 1" % self.default_path)
def test_get_default_with_named_but_missing_default(self):
"""
get_default() must raise an error if the environment named through
the "default:" option isn't found.
"""
config = yaml.load(SAMPLE_ENV)
config["default"] = "non-existent"
# Use a different path to ensure the error message is right.
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
try:
self.config.get_default()
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"Default environment 'non-existent' was not found: "
+ self.other_path)
else:
self.fail("EnvironmentsConfigError not raised")
def test_get_default_without_computable_default(self):
"""
get_default() must raise an error if there are multiple defined
environments and no explicit default was defined.
"""
# Use a different path to ensure the error message is right.
self.write_config(SAMPLE_ENV, other_path=True)
self.config.load(self.other_path)
try:
self.config.get_default()
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"There are multiple environments and no explicit default "
"(set one explicitly?): " + self.other_path)
else:
self.fail("EnvironmentsConfigError not raised")
def test_ensure_provider_types_are_set(self):
"""
The schema should refuse to receive a configuration which
contains a machine provider configuration without any type
information.
"""
config = yaml.load(SAMPLE_ENV)
# Delete the type.
del config["environments"]["myfirstenv"]["type"]
self.write_config(yaml.dump(config), other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError, error:
self.assertEquals(str(error),
"Environments configuration error: %s: "
"environments.myfirstenv.type: required value not found"
% self.other_path)
else:
self.fail("EnvironmentsConfigError not raised")
def test_serialize(self):
"""The config should be able to serialize itself."""
self.write_config(SAMPLE_ENV)
self.config.load()
config = self.config.serialize()
serialized = yaml.load(SAMPLE_ENV)
for d in serialized["environments"].values():
d["dynamicduck"] = "magic"
self.assertEqual(yaml.load(config), serialized)
def test_serialize_environment(self):
"""
The config serialization can take an environment name, in
which case that environment is serialized in isolation
into a valid config file that can be loaded.
"""
self.write_config(SAMPLE_ENV)
self.config.load()
data = yaml.load(SAMPLE_ENV)
del data["environments"]["mysecondenv"]
data["environments"]["myfirstenv"]["dynamicduck"] = "magic"
self.assertEqual(
yaml.load(self.config.serialize("myfirstenv")),
data)
def test_load_serialized_environment(self):
"""
Serialize an environment, and then load it again
via an EnvironmentsConfig.
"""
self.write_config(SAMPLE_ENV)
self.config.load()
serialized = self.config.serialize("myfirstenv")
config = EnvironmentsConfig()
config.parse(serialized)
self.assertTrue(
isinstance(config.get("myfirstenv"), Environment))
self.assertFalse(
isinstance(config.get("mysecondenv"), Environment))
def test_serialize_unknown_environment(self):
"""Serializing an unknown environment raises an error."""
self.write_config(SAMPLE_ENV)
self.config.load()
self.assertRaises(
EnvironmentsConfigError,
self.config.serialize, "zebra")
def test_serialize_custom_variables_outside_environment(self):
"""Serializing captures custom variables out of the environment."""
data = yaml.load(SAMPLE_ENV)
data["default"] = "myfirstenv"
self.write_config(yaml.dump(data))
self.config.load()
serialized = self.config.serialize()
config = EnvironmentsConfig()
config.parse(serialized)
environment = config.get_default()
self.assertEqual(environment.name, "myfirstenv")
def test_invalid_configuration_data_raise_environment_config_error(self):
self.write_config("ZEBRA")
self.assertRaises(EnvironmentsConfigError, self.config.load)
def test_nonstring_configuration_data_raise_environment_config_error(self):
error = self.assertRaises(
EnvironmentsConfigError, self.config.parse, None)
self.assertIn(
"Configuration must be a string:\nNone", str(error))
def test_yaml_load_error_raise_environment_config_error(self):
self.write_config("\0")
error = self.assertRaises(EnvironmentsConfigError, self.config.load)
self.assertIn(
"special characters are not allowed", str(error))
def test_ec2_verifies_region(self):
# sample doesn't include credentials
self.setup_ec2_credentials()
self.config.write_sample()
with open(self.default_path) as file:
config = yaml.load(file.read())
config["environments"]["sample"]["region"] = "ap-southeast-2"
self.write_config(yaml.dump(config), other_path=True)
e = self.assertRaises(EnvironmentsConfigError,
self.config.load,
self.other_path)
self.assertIn("expected 'us-east-1', got 'ap-southeast-2'",
str(e))
with open(self.default_path) as file:
config = yaml.load(file.read())
# Authorized keys are required for environment serialization.
config["environments"]["sample"]["authorized-keys"] = "mickey"
config["environments"]["sample"]["region"] = "ap-southeast-1"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
data = self.config.get_default().get_serialization_data()
self.assertEqual(data["sample"]["region"], "ap-southeast-1")
def assert_ec2_sample_config(self, delete_key):
self.config.write_sample()
with open(self.default_path) as file:
config = yaml.load(file.read())
del config["environments"]["sample"][delete_key]
self.write_config(yaml.dump(config), other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError, error:
self.assertEquals(
str(error),
"Environments configuration error: %s: "
"environments.sample.%s: required value not found"
% (self.other_path, delete_key))
else:
self.fail("Did not properly require " + delete_key)
def test_ec2_sample_config_without_admin_secret(self):
self.assert_ec2_sample_config("admin-secret")
def test_ec2_sample_config_without_default_series(self):
self.assert_ec2_sample_config("default-series")
def test_ec2_sample_config_without_control_buckets(self):
self.assert_ec2_sample_config("control-bucket")
def test_ec2_verifies_placement(self):
# sample doesn't include credentials
self.setup_ec2_credentials()
self.config.write_sample()
with open(self.default_path) as file:
config = yaml.load(file.read())
config["environments"]["sample"]["placement"] = "random"
self.write_config(yaml.dump(config), other_path=True)
e = self.assertRaises(EnvironmentsConfigError,
self.config.load,
self.other_path)
self.assertIn("expected 'unassigned', got 'random'",
str(e))
with open(self.default_path) as file:
config = yaml.load(file.read())
# Authorized keys are required for environment serialization.
config["environments"]["sample"]["authorized-keys"] = "mickey"
config["environments"]["sample"]["placement"] = "local"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
data = self.config.get_default().get_serialization_data()
self.assertEqual(data["sample"]["placement"], "local")
def test_ec2_respects_default_series(self):
# sample doesn't include credentials
self.setup_ec2_credentials()
self.config.write_sample()
with open(self.default_path) as f:
config = yaml.load(f.read())
config["environments"]["sample"]["default-series"] = "astounding"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
provider = self.config.get_default().get_machine_provider()
self.assertEqual(provider.config["default-series"], "astounding")
def test_ec2_respects_ssl_hostname_verification(self):
self.setup_ec2_credentials()
self.config.write_sample()
with open(self.default_path) as f:
config = yaml.load(f.read())
config["environments"]["sample"]["ssl-hostname-verification"] = True
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
provider = self.config.get_default().get_machine_provider()
self.assertEqual(provider.config["ssl-hostname-verification"], True)
def test_orchestra_schema_requires(self):
requires = (
"type orchestra-server orchestra-user orchestra-pass "
"admin-secret acquired-mgmt-class available-mgmt-class "
"default-series").split()
for require in requires:
config = yaml.load(SAMPLE_ORCHESTRA)
del config["environments"]["sample"][require]
self.write_config(yaml.dump(config), other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError as error:
self.assertEquals(str(error),
"Environments configuration error: %s: "
"environments.sample.%s: "
"required value not found"
% (self.other_path, require))
else:
self.fail("Did not properly require %s when type == orchestra"
% require)
def test_orchestra_respects_default_series(self):
config = yaml.load(SAMPLE_ORCHESTRA)
config["environments"]["sample"]["default-series"] = "magnificent"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
provider = self.config.get_default().get_machine_provider()
self.assertEqual(provider.config["default-series"], "magnificent")
def test_orchestra_verifies_placement(self):
config = yaml.load(SAMPLE_ORCHESTRA)
config["environments"]["sample"]["placement"] = "random"
self.write_config(yaml.dump(config), other_path=True)
e = self.assertRaises(
EnvironmentsConfigError, self.config.load, self.other_path)
self.assertIn("expected 'unassigned', got 'random'",
str(e))
config["environments"]["sample"]["placement"] = "local"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
data = self.config.get_default().placement
self.assertEqual(data, "local")
def test_maas_schema_requires(self):
requires = "maas-server maas-oauth admin-secret default-series".split()
for require in requires:
config = yaml.load(SAMPLE_MAAS)
del config["environments"]["sample"][require]
self.write_config(yaml.dump(config), other_path=True)
try:
self.config.load(self.other_path)
except EnvironmentsConfigError as error:
self.assertEquals(str(error),
"Environments configuration error: %s: "
"environments.sample.%s: "
"required value not found"
% (self.other_path, require))
else:
self.fail("Did not properly require %s when type == maas"
% require)
def test_maas_default_series(self):
config = yaml.load(SAMPLE_MAAS)
config["environments"]["sample"]["default-series"] = "magnificent"
self.write_config(yaml.dump(config), other_path=True)
e = self.assertRaises(
EnvironmentsConfigError, self.config.load, self.other_path)
self.assertIn(
"environments.sample.default-series: expected 'precise', got "
"'magnificent'",
str(e))
def test_maas_verifies_placement(self):
config = yaml.load(SAMPLE_MAAS)
config["environments"]["sample"]["placement"] = "random"
self.write_config(yaml.dump(config), other_path=True)
e = self.assertRaises(
EnvironmentsConfigError, self.config.load, self.other_path)
self.assertIn("expected 'unassigned', got 'random'",
str(e))
config["environments"]["sample"]["placement"] = "local"
self.write_config(yaml.dump(config), other_path=True)
self.config.load(self.other_path)
data = self.config.get_default().placement
self.assertEqual(data, "local")
def test_lxc_requires_data_dir(self):
"""lxc dev only supports local placement."""
config = yaml.load(SAMPLE_LOCAL)
self.write_config(yaml.dump(config), other_path=True)
error = self.assertRaises(
EnvironmentsConfigError, self.config.load, self.other_path)
self.assertIn("data-dir: required value not found", str(error))
def test_lxc_verifies_placement(self):
"""lxc dev only supports local placement."""
config = yaml.load(SAMPLE_LOCAL)
config["environments"]["sample"]["placement"] = "unassigned"
self.write_config(yaml.dump(config), other_path=True)
error = self.assertRaises(
EnvironmentsConfigError, self.config.load, self.other_path)
self.assertIn("expected 'local', got 'unassigned'", str(error))
| anbangr/trusted-juju | juju/environment/tests/test_config.py | Python | agpl-3.0 | 28,757 |
# -*- coding: utf-8 -*-
#
# pytity documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 24 22:22:46 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pytity'
copyright = u'2015, Marien Fressinaud'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytitydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pytity.tex', u'pytity Documentation',
u'Marien Fressinaud', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytity', u'pytity Documentation',
[u'Marien Fressinaud'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pytity', u'pytity Documentation',
u'Marien Fressinaud', 'pytity', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| marienfressinaud/pytity | doc/conf.py | Python | mit | 8,227 |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_domain
from core.platform import models
from core.tests import test_utils
(question_models,) = models.Registry.import_models([models.NAMES.question])
class QuestionModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionModel class."""
def test_create_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_data_schema_version = 1
language_code = 'en'
question_model = question_models.QuestionModel.create(
question_data, question_data_schema_version,
language_code)
self.assertEqual(question_model.question_data, question_data)
self.assertEqual(
question_model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(question_model.language_code, language_code)
class QuestionSkillLinkModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionSkillLinkModel class."""
def test_create_question_skill_link(self):
question_id = 'A Test Question Id'
skill_id = 'A Test Skill Id'
questionskilllink_model = question_models.QuestionSkillLinkModel.create(
question_id, skill_id)
self.assertEqual(questionskilllink_model.question_id, question_id)
self.assertEqual(questionskilllink_model.skill_id, skill_id)
| AllanYangZhou/oppia | core/storage/question/gae_models_test.py | Python | apache-2.0 | 2,014 |
# -*- coding: utf-8 -*-
"""
pages.py
~~~~~~
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
from flask import Blueprint, request, g
# Imports inside Bombolone
import bombolone.core.pages
from bombolone.core.utils import jsonify, set_message
from bombolone.core.pages import Pages
from bombolone.decorators import authentication, check_rank, get_hash
api_pages = Blueprint('api_pages', __name__)
@api_pages.route('/api/1.0/pages/list.json')
@authentication
@check_rank(80)
@get_hash('pages')
def overview():
""" List all the documents, each has a name
that identifies it, and an hash map. """
data = core.pages.get_list(sorted_by='name')
data = set_message(data)
return jsonify(data)
@api_pages.route('/api/1.0/pages/get.json')
@authentication
@check_rank(80)
@get_hash('pages')
def get():
""" """
page_id = request.args.get("_id", None)
data = core.pages.get(page_id=page_id)
data = set_message(data)
return jsonify(data)
@api_pages.route('/api/1.0/pages/create.json', methods=['POST'])
@authentication
@check_rank(10)
@get_hash('pages')
def new():
""" Create a new document within the hash table. """
params = request.json
data = core.pages.create(params=params, my_rank=g.my['rank'])
data = set_message(data)
return jsonify(data)
@api_pages.route('/api/1.0/pages/update.json', methods=['POST'])
@authentication
@check_rank(80)
@get_hash('pages')
def update():
""" """
params = request.json
data = core.pages.update(params=params)
data = set_message(data)
return jsonify(data)
@api_pages.route('/api/1.0/pages/remove.json')
@authentication
@check_rank(10)
def remove():
""" This method removes an hash map.
:param _id: MongoDB ObjectId """
page_id = request.args.get("_id", None)
data = core.pages.remove(page_id=page_id)
data = set_message(data)
return jsonify(data)
| Opentaste/bombolone | bombolone/api/pages.py | Python | bsd-3-clause | 1,914 |
../../../../../../share/pyshared/ubuntuone-storage-protocol/ubuntuone/storageprotocol/delta.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/ubuntuone-storage-protocol/ubuntuone/storageprotocol/delta.py | Python | gpl-3.0 | 94 |
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mock import Mock
from pytest import fixture
from uber_rides.session import OAuth2Credential
from uber_rides.session import Session
from uber_rides.utils import auth
from uber_rides.utils import http
CLIENT_ID = 'clientID-28dh1'
CLIENT_SECRET = 'clientSecret-hv783s'
SERVER_TOKEN = 'serverToken-Y4lb2'
ACCESS_TOKEN = 'accessToken-34f21'
EXPIRES_IN_SECONDS = 3000
REFRESH_TOKEN = 'refreshToken-vsh91'
SCOPES_STRING = 'profile history'
SCOPES_SET = {'profile', 'history'}
REDIRECT_URL = 'https://developer.uber.com/my-redirect_url'
@fixture
def server_token_session():
"""Create a Session with Server Token."""
return Session(
server_token=SERVER_TOKEN,
)
@fixture
def authorization_code_grant_session():
"""Create a Session from Auth Code Grant Credential."""
oauth2credential = OAuth2Credential(
client_id=None,
redirect_url=None,
access_token=ACCESS_TOKEN,
expires_in_seconds=EXPIRES_IN_SECONDS,
scopes=SCOPES_SET,
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_secret=None,
refresh_token=REFRESH_TOKEN,
)
return Session(oauth2credential=oauth2credential)
@fixture
def implicit_grant_session():
"""Create a Session from Implicit Grant Credential."""
oauth2credential = OAuth2Credential(
client_id=None,
redirect_url=None,
access_token=ACCESS_TOKEN,
expires_in_seconds=EXPIRES_IN_SECONDS,
scopes=SCOPES_SET,
grant_type=auth.IMPLICIT_GRANT,
client_secret=None,
refresh_token=None,
)
return Session(oauth2credential=oauth2credential)
@fixture
def client_credential_grant_session():
"""Create a Session from Client Credential Grant."""
oauth2credential = OAuth2Credential(
client_id=None,
redirect_url=None,
access_token=ACCESS_TOKEN,
expires_in_seconds=EXPIRES_IN_SECONDS,
scopes=SCOPES_SET,
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_secret=None,
refresh_token=None,
)
return Session(oauth2credential=oauth2credential)
@fixture
def authorization_code_response():
"""Response after Authorization Code Access Request."""
mock_response = Mock(
status_code=http.STATUS_OK,
)
response_json = {
'access_token': ACCESS_TOKEN,
'expires_in': EXPIRES_IN_SECONDS,
'scope': SCOPES_STRING,
'refresh_token': REFRESH_TOKEN,
}
mock_response.json = Mock(return_value=response_json)
return mock_response
@fixture
def client_credentials_response():
"""Response after Client Credentials Access Request."""
mock_response = Mock(
status_code=http.STATUS_OK,
)
response_json = {
'access_token': ACCESS_TOKEN,
'expires_in': EXPIRES_IN_SECONDS,
'scope': SCOPES_STRING,
}
mock_response.json = Mock(return_value=response_json)
return mock_response
def test_server_token_session_initialized(server_token_session):
"""Confirm Session with Server Token initialized correctly."""
assert server_token_session.server_token == SERVER_TOKEN
assert server_token_session.token_type == auth.SERVER_TOKEN_TYPE
assert server_token_session.oauth2credential is None
def test_oauth2_session_initialized(authorization_code_grant_session):
"""Confirm Session with OAuth2Credential initialized correctly."""
assert authorization_code_grant_session.server_token is None
assert authorization_code_grant_session.token_type == auth.OAUTH_TOKEN_TYPE
oauth2 = authorization_code_grant_session.oauth2credential
assert oauth2.access_token == ACCESS_TOKEN
assert oauth2.scopes == SCOPES_SET
assert oauth2.grant_type == auth.AUTHORIZATION_CODE_GRANT
assert oauth2.refresh_token is REFRESH_TOKEN
def test_new_authorization_code_grant_session_is_not_stale(
authorization_code_grant_session,
):
"""Confirm that a new Session from Auth Code Grant is not stale."""
assert authorization_code_grant_session.server_token is None
assert authorization_code_grant_session.oauth2credential
assert authorization_code_grant_session.token_type == auth.OAUTH_TOKEN_TYPE
assert not authorization_code_grant_session.oauth2credential.is_stale()
def test_new_implicit_grant_session_is_not_stale(implicit_grant_session):
"""Confirm that a new Session from Implicit Grant is not stale."""
assert implicit_grant_session.server_token is None
assert implicit_grant_session.oauth2credential
assert implicit_grant_session.token_type == auth.OAUTH_TOKEN_TYPE
assert not implicit_grant_session.oauth2credential.is_stale()
def test_new_client_credential_session_is_not_stale(
client_credential_grant_session,
):
"""Confirm that a new Session from Client Credential Grant is not stale."""
assert client_credential_grant_session.server_token is None
assert client_credential_grant_session.oauth2credential
assert client_credential_grant_session.token_type == auth.OAUTH_TOKEN_TYPE
assert not client_credential_grant_session.oauth2credential.is_stale()
def test_old_authorization_code_grant_session_is_stale(
authorization_code_grant_session
):
"""Confirm that an old Session from Auth Code Grant is stale."""
authorization_code_grant_session.oauth2credential.expires_in_seconds = 1
assert authorization_code_grant_session.oauth2credential.is_stale()
def test_old_implicit_grant_session_is_stale(implicit_grant_session):
"""Confirm that an old Session from Implicit Grant is stale."""
implicit_grant_session.oauth2credential.expires_in_seconds = 1
assert implicit_grant_session.oauth2credential.is_stale()
def test_old_client_credential_session_is_stale(
client_credential_grant_session,
):
"""Confirm that an old Session from Client Credential Grant is stale."""
client_credential_grant_session.oauth2credential.expires_in_seconds = 1
assert client_credential_grant_session.oauth2credential.is_stale()
def test_make_session_from_authorization_code_response(
authorization_code_response,
):
"""Test classmethod to build OAuth2Credential from HTTP Response."""
oauth2credential = OAuth2Credential.make_from_response(
response=authorization_code_response,
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_url=REDIRECT_URL,
)
assert oauth2credential.access_token == ACCESS_TOKEN
assert oauth2credential.scopes == SCOPES_SET
assert oauth2credential.grant_type == auth.AUTHORIZATION_CODE_GRANT
assert oauth2credential.refresh_token == REFRESH_TOKEN
assert oauth2credential.client_id == CLIENT_ID
assert oauth2credential.client_secret == CLIENT_SECRET
assert oauth2credential.redirect_url == REDIRECT_URL
def test_make_session_from_client_credentials_response(
client_credentials_response,
):
"""Test classmethod to build OAuth2Credential from HTTP Response."""
oauth2credential = OAuth2Credential.make_from_response(
response=client_credentials_response,
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
)
assert oauth2credential.access_token == ACCESS_TOKEN
assert oauth2credential.scopes == SCOPES_SET
assert oauth2credential.grant_type == auth.CLIENT_CREDENTIAL_GRANT
assert oauth2credential.refresh_token is None
assert oauth2credential.client_id == CLIENT_ID
assert oauth2credential.client_secret == CLIENT_SECRET
assert oauth2credential.redirect_url is None
| ianmabie/uberpy | venv/lib/python2.7/site-packages/tests/test_session.py | Python | mit | 8,880 |
from src.util import *
import os
from nose.tools import *
filename = "basic"
def removeFiles():
os.remove(filename)
@with_setup(teardown = removeFiles)
def testBasicInstaParseFile():
simpleFile = InstaParseFile(filename)
simpleFile.write("abc")
simpleFile.save()
simpleFile.writeLine("def")
simpleFile.writeLine("ghijkl")
simpleFile.save()
resultFile = open( filename, "r" )
assert_equal( resultFile.readline().strip(), "abcdef" )
assert_equal( resultFile.readline().strip(), "ghijkl" )
resultFile.close()
@with_setup(teardown = removeFiles)
def testInstaParseFileComments():
simpleFile = InstaParseFile(filename)
simpleFile.writeLine("abc")
simpleFile.comment("The above is an abc.")
simpleFile.save()
resultFile = open( filename, "r" )
assert_equal( resultFile.readline().strip(), "abc" )
assert_equal( resultFile.readline().strip(), simpleFile.commentString + " The above is an abc." )
resultFile.close()
@with_setup(teardown = removeFiles)
def testInstaParseImport():
simpleFile = InstaParseFile(filename)
simpleFile.writeLine("abc")
simpleFile.writeImportLine("import a simple library")
simpleFile.save()
resultFile = open( filename, "r" )
assert_equal( resultFile.readline().strip(), "import a simple library" )
assert_equal( resultFile.readline().strip(), "abc" )
resultFile.close()
@with_setup(teardown = removeFiles)
def testInstaParseWrite():
simpleFile = InstaParseFile(filename)
simpleFile.indent()
simpleFile.write("abc")
simpleFile.write("def")
simpleFile.writeLine("ghi")
simpleFile.write("lmn")
simpleFile.writeNewline()
simpleFile.write("opq")
simpleFile.save()
resultFile = open( filename, "r" )
assert_equal( resultFile.readline().strip(), "abcdefghi" )
assert_equal( resultFile.readline().strip(), "lmn" )
assert_equal( resultFile.readline().strip(), "opq" )
resultFile.close()
| ImpGuard/instaparse | tests/util_test.py | Python | mit | 1,973 |
import unittest
from golem.network.p2p.node import Node
def is_ip_address(address):
"""
Check if @address is correct IP address
:param address: Address to be checked
:return: True if is correct, false otherwise
"""
from ipaddress import ip_address, AddressValueError
try:
# will raise error in case of incorrect address
ip_address(unicode(address))
return True
except (ValueError, AddressValueError):
return False
class TestNode(unittest.TestCase):
def test_str(self):
n = Node(node_name="Blabla", key="ABC")
self.assertNotIn("at", str(n))
self.assertNotIn("at", "{}".format(n))
self.assertIn("Blabla", str(n))
self.assertIn("Blabla", "{}".format(n))
self.assertIn("ABC", str(n))
self.assertIn("ABC", "{}".format(n))
def test_collect_network_info(self):
""" Test configuring Node object """
node = Node()
node.collect_network_info()
assert is_ip_address(node.pub_addr)
assert is_ip_address(node.prv_addr)
for address in node.prv_addresses:
assert is_ip_address(address)
| Radagast-red/golem | tests/golem/network/p2p/test_node.py | Python | gpl-3.0 | 1,163 |
# Copyright 2010 VPAC
# Copyright 2014-2021 Marcus Furlong <[email protected]>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman If not, see <http://www.gnu.org/licenses/>.
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
from operator import itemgetter
def get_query_string(qs):
newqs = ['{0!s}={1!s}'.format(k, v) for k, v in list(qs.items())]
return '?' + '&'.join(newqs).replace(' ', '%20')
class Filter(object):
def __init__(self, request, name, filters, header=''):
if header == '':
self.header = name
else:
self.header = header
if isinstance(filters, tuple):
filters = dict(filters)
if isinstance(filters, QuerySet):
f = {}
for i in filters:
if isinstance(i, str):
f[str(i)] = str(i)
else:
f[i.pk] = str(i)
filters = f
self.name = name
self.filters = filters
self.selected = None
if self.name in request.GET:
self.selected = request.GET[self.name]
def output(self, qs):
if self.name in qs:
del(qs[self.name])
output = '<div class="panel panel-default">\n'
output += '<div class="panel-heading">'
output += '{0!s}</div>\n'.format(self.header.replace('_', ' '))
output += '<div class="panel-body">\n'
output += '<div class="list-group list-group-info">\n'
output += '<a href="{0!s}" '.format(get_query_string(qs))
output += 'class="list-group-item'
if self.selected is None:
output += ' list-group-item-success'
output += '">all</a>\n'
filters = sorted(iter(self.filters.items()), key=itemgetter(1))
for k, v in filters:
style = ''
if str(self.selected) == str(k):
style = 'list-group-item-success'
qs[self.name] = k
output += '<a href="{0!s}" class='.format(get_query_string(qs))
output += '"list-group-item {0!s}">{1!s}</a>\n'.format(style, v)
output += '</div></div></div>'
return output
class FilterBar(object):
def __init__(self, request, filter_list):
self.request = request
self.filter_list = filter_list
raw_qs = request.META.get('QUERY_STRING', '')
qs = {}
if raw_qs:
for i in raw_qs.replace('?', '').split('&'):
if i:
k, v = i.split('=')
if k != 'page':
qs[k] = v
for f in self.filter_list:
if f.name in self.request.GET:
qs[f.name] = self.request.GET[f.name]
self.qs = qs
def output(self):
output = ''
for f in self.filter_list:
output += f.output(self.qs.copy())
return output
def __str__(self):
return mark_safe(self.output())
| furlongm/patchman | util/filterspecs.py | Python | gpl-3.0 | 3,552 |
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
#
# NOTE: The original code (from https://github.com/aneesha/RAKE)
# has been extended by a_medelyan (zelandiya)
# with a set of heuristics to decide whether a phrase is an acceptable candidate
# as well as the ability to set frequency and phrase length parameters
# important when dealing with longer documents
from __future__ import absolute_import
from __future__ import print_function
import re
import operator
import six
from six.moves import range
debug = False
test = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[\\[\\]\n.!?,;:\t\\-\\"\\(\\)\\\'\u2019\u2013]')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = '\\b' + word + '\\b'
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern, min_char_length=1, max_words_length=5):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "" and is_acceptable(phrase, min_char_length, max_words_length):
phrase_list.append(phrase)
return phrase_list
def is_acceptable(phrase, min_char_length, max_words_length):
# a phrase must have a min length in characters
if len(phrase) < min_char_length:
return 0
# a phrase must have a max number of words
words = phrase.split()
if len(words) > max_words_length:
return 0
digits = 0
alpha = 0
for i in range(0, len(phrase)):
if phrase[i].isdigit():
digits += 1
elif phrase[i].isalpha():
alpha += 1
# a phrase must have at least one alpha character
if alpha == 0:
return 0
# a phrase must have more alpha than digits characters
if digits > alpha:
return 0
return 1
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_prescore = word_degree[item] / (word_frequency[item] * 1.0) #orig.
word_score[item] = round(word_prescore, 5) #orig.
# word_prescore = word_frequency[item]/(word_degree[item] * 1.0) #exp.
# word_score[item] = word_prescore #exp
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score, min_keyword_frequency=1):
keyword_candidates = {}
for phrase in phrase_list:
if min_keyword_frequency > 1:
if phrase_list.count(phrase) < min_keyword_frequency:
continue
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path, min_char_length=1, max_words_length=5, min_keyword_frequency=1):
self.__stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
self.__min_char_length = min_char_length
self.__max_words_length = max_words_length
self.__min_keyword_frequency = min_keyword_frequency
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern, self.__min_char_length, self.__max_words_length)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores, self.__min_keyword_frequency)
sorted_keywords = sorted(six.iteritems(keyword_candidates), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "RAKE/SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print(keywordcandidates)
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
if debug: print(sortedKeywords)
totalKeywords = len(sortedKeywords)
if debug: print(totalKeywords)
print(sortedKeywords[0:(totalKeywords // 3)])
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print(keywords)
| chrisfromthelc/python-rake | rake.py | Python | mit | 8,423 |
from concurrent.futures import ThreadPoolExecutor
class SingletonThreadPoolExecutor(ThreadPoolExecutor):
"""
该类不要直接实例化
"""
def __new__(cls, max_workers=None, thread_name_prefix=None):
if cls is SingletonThreadPoolExecutor:
raise NotImplementedError
if getattr(cls, '_object', None) is None:
cls._object = ThreadPoolExecutor(
max_workers=max_workers,
thread_name_prefix=thread_name_prefix
)
return cls._object
| skyoo/jumpserver | apps/common/thread_pools.py | Python | gpl-2.0 | 538 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Union.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsFeatureRequest, QgsFeature, QgsGeometry, QgsWkbTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
wkbTypeGroups = {
'Point': (QgsWkbTypes.Point, QgsWkbTypes.MultiPoint, QgsWkbTypes.Point25D, QgsWkbTypes.MultiPoint25D,),
'LineString': (QgsWkbTypes.LineString, QgsWkbTypes.MultiLineString, QgsWkbTypes.LineString25D, QgsWkbTypes.MultiLineString25D,),
'Polygon': (QgsWkbTypes.Polygon, QgsWkbTypes.MultiPolygon, QgsWkbTypes.Polygon25D, QgsWkbTypes.MultiPolygon25D,),
}
for key, value in list(wkbTypeGroups.items()):
for const in value:
wkbTypeGroups[const] = key
class Union(GeoAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'union.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Union')
self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools')
self.addParameter(ParameterVector(Union.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterVector(Union.INPUT2,
self.tr('Input layer 2')))
self.addOutput(OutputVector(Union.OUTPUT, self.tr('Union')))
def processAlgorithm(self, feedback):
vlayerA = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT))
vlayerB = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT2))
geomType = vlayerA.wkbType()
fields = vector.combineVectorFields(vlayerA, vlayerB)
writer = self.getOutputFromName(Union.OUTPUT).getVectorWriter(fields,
geomType, vlayerA.crs())
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = vector.spatialindex(vlayerB)
indexB = vector.spatialindex(vlayerA)
count = 0
nElement = 0
featuresA = vector.features(vlayerA)
nFeat = len(featuresA)
for inFeatA in featuresA:
feedback.setProgress(nElement / float(nFeat) * 50)
nElement += 1
lstIntersectingB = []
geom = inFeatA.geometry()
atMapA = inFeatA.attributes()
intersects = indexA.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
outFeat.setGeometry(geom)
outFeat.setAttributes(atMapA)
writer.addFeature(outFeat)
except:
# This really shouldn't happen, as we haven't
# edited the input geom at all
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
request = QgsFeatureRequest().setFilterFids(intersects)
engine = QgsGeometry.createGeometryEngine(geom.geometry())
engine.prepareGeometry()
for inFeatB in vlayerB.getFeatures(request):
count += 1
atMapB = inFeatB.attributes()
tmpGeom = inFeatB.geometry()
if engine.intersects(tmpGeom.geometry()):
int_geom = geom.intersection(tmpGeom)
lstIntersectingB.append(tmpGeom)
if not int_geom:
# There was a problem creating the intersection
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('GEOS geoprocessing error: One or more input features have invalid geometry.'))
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if int_geom.wkbType() == QgsWkbTypes.Unknown or QgsWkbTypes.flatType(int_geom.geometry().wkbType()) == QgsWkbTypes.GeometryCollection:
# Intersection produced different geomety types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry(i)
try:
outFeat.setGeometry(int_geom)
outFeat.setAttributes(atMapA + atMapB)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
if int_geom.wkbType() in wkbTypeGroups[wkbTypeGroups[int_geom.wkbType()]]:
try:
outFeat.setGeometry(int_geom)
outFeat.setAttributes(atMapA + atMapB)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
# the remaining bit of inFeatA's geometry
# if there is nothing left, this will just silently fail and we're good
diff_geom = QgsGeometry(geom)
if len(lstIntersectingB) != 0:
intB = QgsGeometry.unaryUnion(lstIntersectingB)
diff_geom = diff_geom.difference(intB)
if diff_geom.wkbType() == 0 or QgsWkbTypes.flatType(diff_geom.geometry().wkbType()) == QgsWkbTypes.GeometryCollection:
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
diff_geom = QgsGeometry(i)
try:
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMapA)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
length = len(vlayerA.fields())
atMapA = [None] * length
featuresA = vector.features(vlayerB)
nFeat = len(featuresA)
for inFeatA in featuresA:
feedback.setProgress(nElement / float(nFeat) * 100)
add = False
geom = inFeatA.geometry()
diff_geom = QgsGeometry(geom)
atMap = [None] * length
atMap.extend(inFeatA.attributes())
intersects = indexB.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
outFeat.setGeometry(geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
request = QgsFeatureRequest().setFilterFids(intersects)
# use prepared geometries for faster intersection tests
engine = QgsGeometry.createGeometryEngine(diff_geom.geometry())
engine.prepareGeometry()
for inFeatB in vlayerA.getFeatures(request):
atMapB = inFeatB.attributes()
tmpGeom = inFeatB.geometry()
if engine.intersects(tmpGeom.geometry()):
add = True
diff_geom = QgsGeometry(diff_geom.difference(tmpGeom))
else:
try:
# Ihis only happens if the bounding box
# intersects, but the geometry doesn't
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
if add:
try:
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
nElement += 1
del writer
| myarjunar/QGIS | python/plugins/processing/algs/qgis/Union.py | Python | gpl-2.0 | 11,085 |
"""
========================
Random Number Generation
========================
==================== =========================================================
Utility functions
==============================================================================
random Uniformly distributed values of a given shape.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
random_sample Uniformly distributed floats in a given range.
random Alias for random_sample
ranf Alias for random_sample
sample Alias for random_sample
choice Generate a weighted random sample from a given array-like
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
==================== =========================================================
==================== =========================================================
Compatibility functions
==============================================================================
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
randint Uniformly distributed integers in a given range.
==================== =========================================================
==================== =========================================================
Univariate distributions
==============================================================================
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== =========================================================
Multivariate distributions
==============================================================================
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== =========================================================
==================== =========================================================
Standard distributions
==============================================================================
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
==============================================================================
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
# To get sub-modules
from info import __doc__, __all__
import warnings
from numpy.testing.utils import WarningManager
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
from mtrand import *
finally:
warn_ctx.__exit__()
# Some aliases:
ranf = random = sample = random_sample
__all__.extend(['ranf','random','sample'])
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
"""
return RandomState()
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| beiko-lab/gengis | bin/Lib/site-packages/numpy/random/__init__.py | Python | gpl-3.0 | 5,234 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.