code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo.config import cfg
import six
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
virt_cpu_opts = [
cfg.StrOpt('vcpu_pin_set',
help='Defines which pcpus that instance vcpus can use. '
'For example, "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(virt_cpu_opts)
LOG = logging.getLogger(__name__)
def get_vcpu_pin_set():
"""Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
if not CONF.vcpu_pin_set:
return None
cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set)
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
return sorted(cpuset_ids)
def parse_cpu_spec(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids
def format_cpu_spec(cpuset, allow_ranges=True):
"""Format a libvirt CPU range specification.
:param cpuset: set (or list) of CPU indexes
Format a set/list of CPU indexes as a libvirt CPU
range specification. It allow_ranges is true, it
will try to detect continuous ranges of CPUs,
otherwise it will just list each CPU index explicitly.
:returns: a formatted CPU range string
"""
# We attempt to detect ranges, but don't bother with
# trying to do range negations to minimize the overall
# spec string length
if allow_ranges:
ranges = []
previndex = None
for cpuindex in sorted(cpuset):
if previndex is None or previndex != (cpuindex - 1):
ranges.append([])
ranges[-1].append(cpuindex)
previndex = cpuindex
parts = []
for entry in ranges:
if len(entry) == 1:
parts.append(str(entry[0]))
else:
parts.append("%d-%d" % (entry[0], entry[len(entry) - 1]))
return ",".join(parts)
else:
return ",".join(str(id) for id in sorted(cpuset))
def get_number_of_serial_ports(flavor, image_meta):
"""Get the number of serial consoles from the flavor or image
:param flavor: Flavor object to read extra specs from
:param image_meta: Image object to read image metadata from
If flavor extra specs is not set, then any image meta value is permitted.
If flavour extra specs *is* set, then this provides the default serial
port count. The image meta is permitted to override the extra specs, but
*only* with a lower value. ie
- flavor hw:serial_port_count=4
VM gets 4 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=2
VM gets 2 serial ports
- image hw_serial_port_count=6
VM gets 6 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=6
Abort guest boot - forbidden to exceed flavor value
:returns: number of serial ports
"""
def get_number(obj, property):
num_ports = obj.get(property)
if num_ports is not None:
try:
num_ports = int(num_ports)
except ValueError:
raise exception.ImageSerialPortNumberInvalid(
num_ports=num_ports, property=property)
return num_ports
image_meta_prop = (image_meta or {}).get('properties', {})
flavor_num_ports = get_number(flavor.extra_specs, "hw:serial_port_count")
image_num_ports = get_number(image_meta_prop, "hw_serial_port_count")
if (flavor_num_ports and image_num_ports) is not None:
if image_num_ports > flavor_num_ports:
raise exception.ImageSerialPortNumberExceedFlavorValue()
return image_num_ports
return flavor_num_ports or image_num_ports or 1
class VirtCPUTopology(object):
def __init__(self, sockets, cores, threads):
"""Create a new CPU topology object
:param sockets: number of sockets, at least 1
:param cores: number of cores, at least 1
:param threads: number of threads, at least 1
Create a new CPU topology object representing the
number of sockets, cores and threads to use for
the virtual instance.
"""
self.sockets = sockets
self.cores = cores
self.threads = threads
def score(self, wanttopology):
"""Calculate score for the topology against a desired configuration
:param wanttopology: VirtCPUTopology instance for preferred topology
Calculate a score indicating how well this topology
matches against a preferred topology. A score of 3
indicates an exact match for sockets, cores and threads.
A score of 2 indicates a match of sockets & cores or
sockets & threads or cores and threads. A score of 1
indicates a match of sockets or cores or threads. A
score of 0 indicates no match
:returns: score in range 0 (worst) to 3 (best)
"""
score = 0
if (wanttopology.sockets != -1 and
self.sockets == wanttopology.sockets):
score = score + 1
if (wanttopology.cores != -1 and
self.cores == wanttopology.cores):
score = score + 1
if (wanttopology.threads != -1 and
self.threads == wanttopology.threads):
score = score + 1
return score
@staticmethod
def get_topology_constraints(flavor, image_meta):
"""Get the topology constraints declared in flavor or image
:param flavor: Flavor object to read extra specs from
:param image_meta: Image object to read image metadata from
Gets the topology constraints from the configuration defined
in the flavor extra specs or the image metadata. In the flavor
this will look for
hw:cpu_sockets - preferred socket count
hw:cpu_cores - preferred core count
hw:cpu_threads - preferred thread count
hw:cpu_maxsockets - maximum socket count
hw:cpu_maxcores - maximum core count
hw:cpu_maxthreads - maximum thread count
In the image metadata this will look at
hw_cpu_sockets - preferred socket count
hw_cpu_cores - preferred core count
hw_cpu_threads - preferred thread count
hw_cpu_maxsockets - maximum socket count
hw_cpu_maxcores - maximum core count
hw_cpu_maxthreads - maximum thread count
The image metadata must be strictly lower than any values
set in the flavor. All values are, however, optional.
This will return a pair of VirtCPUTopology instances,
the first giving the preferred socket/core/thread counts,
and the second giving the upper limits on socket/core/
thread counts.
exception.ImageVCPULimitsRangeExceeded will be raised
if the maximum counts set against the image exceed
the maximum counts set against the flavor
exception.ImageVCPUTopologyRangeExceeded will be raised
if the preferred counts set against the image exceed
the maximum counts set against the image or flavor
:returns: (preferred topology, maximum topology)
"""
# Obtain the absolute limits from the flavor
flvmaxsockets = int(flavor.extra_specs.get(
"hw:cpu_max_sockets", 65536))
flvmaxcores = int(flavor.extra_specs.get(
"hw:cpu_max_cores", 65536))
flvmaxthreads = int(flavor.extra_specs.get(
"hw:cpu_max_threads", 65536))
LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvmaxsockets,
"cores": flvmaxcores,
"threads": flvmaxthreads})
# Get any customized limits from the image
maxsockets = int(image_meta.get("properties", {})
.get("hw_cpu_max_sockets", flvmaxsockets))
maxcores = int(image_meta.get("properties", {})
.get("hw_cpu_max_cores", flvmaxcores))
maxthreads = int(image_meta.get("properties", {})
.get("hw_cpu_max_threads", flvmaxthreads))
LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": maxsockets,
"cores": maxcores,
"threads": maxthreads})
# Image limits are not permitted to exceed the flavor
# limits. ie they can only lower what the flavor defines
if ((maxsockets > flvmaxsockets) or
(maxcores > flvmaxcores) or
(maxthreads > flvmaxthreads)):
raise exception.ImageVCPULimitsRangeExceeded(
sockets=maxsockets,
cores=maxcores,
threads=maxthreads,
maxsockets=flvmaxsockets,
maxcores=flvmaxcores,
maxthreads=flvmaxthreads)
# Get any default preferred topology from the flavor
flvsockets = int(flavor.extra_specs.get("hw:cpu_sockets", -1))
flvcores = int(flavor.extra_specs.get("hw:cpu_cores", -1))
flvthreads = int(flavor.extra_specs.get("hw:cpu_threads", -1))
LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvsockets,
"cores": flvcores,
"threads": flvthreads})
# If the image limits have reduced the flavor limits
# we might need to discard the preferred topology
# from the flavor
if ((flvsockets > maxsockets) or
(flvcores > maxcores) or
(flvthreads > maxthreads)):
flvsockets = flvcores = flvthreads = -1
# Finally see if the image has provided a preferred
# topology to use
sockets = int(image_meta.get("properties", {})
.get("hw_cpu_sockets", -1))
cores = int(image_meta.get("properties", {})
.get("hw_cpu_cores", -1))
threads = int(image_meta.get("properties", {})
.get("hw_cpu_threads", -1))
LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": sockets,
"cores": cores,
"threads": threads})
# Image topology is not permitted to exceed image/flavor
# limits
if ((sockets > maxsockets) or
(cores > maxcores) or
(threads > maxthreads)):
raise exception.ImageVCPUTopologyRangeExceeded(
sockets=sockets,
cores=cores,
threads=threads,
maxsockets=maxsockets,
maxcores=maxcores,
maxthreads=maxthreads)
# If no preferred topology was set against the image
# then use the preferred topology from the flavor
# We use 'and' not 'or', since if any value is set
# against the image this invalidates the entire set
# of values from the flavor
if sockets == -1 and cores == -1 and threads == -1:
sockets = flvsockets
cores = flvcores
threads = flvthreads
LOG.debug("Chosen %(sockets)d:%(cores)d:%(threads)d limits "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"sockets": sockets, "cores": cores,
"threads": threads, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
return (VirtCPUTopology(sockets, cores, threads),
VirtCPUTopology(maxsockets, maxcores, maxthreads))
@staticmethod
def get_possible_topologies(vcpus, maxtopology, allow_threads):
"""Get a list of possible topologies for a vCPU count
:param vcpus: total number of CPUs for guest instance
:param maxtopology: VirtCPUTopology for upper limits
:param allow_threads: if the hypervisor supports CPU threads
Given a total desired vCPU count and constraints on the
maximum number of sockets, cores and threads, return a
list of VirtCPUTopology instances that represent every
possible topology that satisfies the constraints.
exception.ImageVCPULimitsRangeImpossible is raised if
it is impossible to achieve the total vcpu count given
the maximum limits on sockets, cores & threads.
:returns: list of VirtCPUTopology instances
"""
# Clamp limits to number of vcpus to prevent
# iterating over insanely large list
maxsockets = min(vcpus, maxtopology.sockets)
maxcores = min(vcpus, maxtopology.cores)
maxthreads = min(vcpus, maxtopology.threads)
if not allow_threads:
maxthreads = 1
LOG.debug("Build topologies for %(vcpus)d vcpu(s) "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"vcpus": vcpus, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
# Figure out all possible topologies that match
# the required vcpus count and satisfy the declared
# limits. If the total vCPU count were very high
# it might be more efficient to factorize the vcpu
# count and then only iterate over its factors, but
# that's overkill right now
possible = []
for s in range(1, maxsockets + 1):
for c in range(1, maxcores + 1):
for t in range(1, maxthreads + 1):
if t * c * s == vcpus:
possible.append(VirtCPUTopology(s, c, t))
# We want to
# - Minimize threads (ie larger sockets * cores is best)
# - Prefer sockets over cores
possible = sorted(possible, reverse=True,
key=lambda x: (x.sockets * x.cores,
x.sockets,
x.threads))
LOG.debug("Got %d possible topologies", len(possible))
if len(possible) == 0:
raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus,
sockets=maxsockets,
cores=maxcores,
threads=maxthreads)
return possible
@staticmethod
def sort_possible_topologies(possible, wanttopology):
"""Sort the topologies in order of preference
:param possible: list of VirtCPUTopology instances
:param wanttopology: VirtCPUTopology for preferred topology
This takes the list of possible topologies and resorts
it such that those configurations which most closely
match the preferred topology are first.
:returns: sorted list of VirtCPUTopology instances
"""
# Look at possible topologies and score them according
# to how well they match the preferred topologies
# We don't use python's sort(), since we want to
# preserve the sorting done when populating the
# 'possible' list originally
scores = collections.defaultdict(list)
for topology in possible:
score = topology.score(wanttopology)
scores[score].append(topology)
# Build list of all possible topologies sorted
# by the match score, best match first
desired = []
desired.extend(scores[3])
desired.extend(scores[2])
desired.extend(scores[1])
desired.extend(scores[0])
return desired
@staticmethod
def get_desirable_configs(flavor, image_meta, allow_threads=True):
"""Get desired CPU topologies according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return this list sorted in order of preference.
:returns: sorted list of VirtCPUTopology instances
"""
LOG.debug("Getting desirable topologies for flavor %(flavor)s "
"and image_meta %(image_meta)s",
{"flavor": flavor, "image_meta": image_meta})
preferred, maximum = (
VirtCPUTopology.get_topology_constraints(flavor,
image_meta))
possible = VirtCPUTopology.get_possible_topologies(
flavor.vcpus, maximum, allow_threads)
desired = VirtCPUTopology.sort_possible_topologies(
possible, preferred)
return desired
@staticmethod
def get_best_config(flavor, image_meta, allow_threads=True):
"""Get bst CPU topology according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return the best topology to use
:returns: a VirtCPUTopology instance for best topology
"""
return VirtCPUTopology.get_desirable_configs(flavor,
image_meta,
allow_threads)[0]
class VirtNUMATopologyCell(object):
"""Class for reporting NUMA resources in a cell
The VirtNUMATopologyCell class represents the
hardware resources present in a NUMA cell.
"""
def __init__(self, id, cpuset, memory):
"""Create a new NUMA Cell
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
Creates a new NUMA cell object to record the hardware
resources.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCell, self).__init__()
self.id = id
self.cpuset = cpuset
self.memory = memory
def _to_dict(self):
return {'cpus': format_cpu_spec(self.cpuset, allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id}
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory)
class VirtNUMATopologyCellLimit(VirtNUMATopologyCell):
def __init__(self, id, cpuset, memory, cpu_limit, memory_limit):
"""Create a new NUMA Cell with usage
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
:param cpu_limit: maximum number of CPUs allocated
:param memory_usage: maxumum RAM allocated in KiB
Creates a new NUMA cell object to represent the max hardware
resources and utilization. The number of CPUs specified
by the @cpu_usage parameter may be larger than the number
of bits set in @cpuset if CPU overcommit is used. Likewise
the amount of RAM specified by the @memory_usage parameter
may be larger than the available RAM in @memory if RAM
overcommit is used.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCellLimit, self).__init__(
id, cpuset, memory)
self.cpu_limit = cpu_limit
self.memory_limit = memory_limit
def _to_dict(self):
data_dict = super(VirtNUMATopologyCellLimit, self)._to_dict()
data_dict['mem']['limit'] = self.memory_limit
data_dict['cpu_limit'] = self.cpu_limit
return data_dict
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cpu_limit = data_dict.get('cpu_limit', len(cpuset))
memory_limit = data_dict.get('mem', {}).get('limit', memory)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory, cpu_limit, memory_limit)
class VirtNUMATopologyCellUsage(VirtNUMATopologyCell):
"""Class for reporting NUMA resources and usage in a cell
The VirtNUMATopologyCellUsage class specializes
VirtNUMATopologyCell to include information about the
utilization of hardware resources in a NUMA cell.
"""
def __init__(self, id, cpuset, memory, cpu_usage=0, memory_usage=0):
"""Create a new NUMA Cell with usage
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
:param cpu_usage: number of CPUs allocated
:param memory_usage: RAM allocated in KiB
Creates a new NUMA cell object to record the hardware
resources and utilization. The number of CPUs specified
by the @cpu_usage parameter may be larger than the number
of bits set in @cpuset if CPU overcommit is used. Likewise
the amount of RAM specified by the @memory_usage parameter
may be larger than the available RAM in @memory if RAM
overcommit is used.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCellUsage, self).__init__(
id, cpuset, memory)
self.cpu_usage = cpu_usage
self.memory_usage = memory_usage
def _to_dict(self):
data_dict = super(VirtNUMATopologyCellUsage, self)._to_dict()
data_dict['mem']['used'] = self.memory_usage
data_dict['cpu_usage'] = self.cpu_usage
return data_dict
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory, cpu_usage, memory_usage)
class VirtNUMATopology(object):
"""Base class for tracking NUMA topology information
The VirtNUMATopology class represents the NUMA hardware
topology for memory and CPUs in any machine. It is
later specialized for handling either guest instance
or compute host NUMA topology.
"""
def __init__(self, cells=None):
"""Create a new NUMA topology object
:param cells: list of VirtNUMATopologyCell instances
"""
super(VirtNUMATopology, self).__init__()
self.cells = cells or []
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self._to_dict()))
def _to_dict(self):
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[cls.cell_class._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
def to_json(self):
return jsonutils.dumps(self._to_dict())
@classmethod
def from_json(cls, json_string):
return cls._from_dict(jsonutils.loads(json_string))
class VirtNUMAInstanceTopology(VirtNUMATopology):
"""Class to represent the topology configured for a guest
instance. It provides helper APIs to determine configuration
from the metadata specified against the flavour and or
disk image
"""
cell_class = VirtNUMATopologyCell
@staticmethod
def _get_flavor_or_image_prop(flavor, image_meta, propname):
flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname)
image_val = image_meta.get("hw_" + propname)
if flavor_val is not None:
if image_val is not None:
raise exception.ImageNUMATopologyForbidden(
name='hw_' + propname)
return flavor_val
else:
return image_val
@classmethod
def _get_constraints_manual(cls, nodes, flavor, image_meta):
cells = []
totalmem = 0
availcpus = set(range(flavor['vcpus']))
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're expecting both properties set, so
# raise an error if either is missing
if cpus is None or mem is None:
raise exception.ImageNUMATopologyIncomplete()
mem = int(mem)
cpuset = parse_cpu_spec(cpus)
for cpu in cpuset:
if cpu > (flavor['vcpus'] - 1):
raise exception.ImageNUMATopologyCPUOutOfRange(
cpunum=cpu, cpumax=(flavor['vcpus'] - 1))
if cpu not in availcpus:
raise exception.ImageNUMATopologyCPUDuplicates(
cpunum=cpu)
availcpus.remove(cpu)
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
totalmem = totalmem + mem
if availcpus:
raise exception.ImageNUMATopologyCPUsUnassigned(
cpuset=str(availcpus))
if totalmem != flavor['memory_mb']:
raise exception.ImageNUMATopologyMemoryOutOfRange(
memsize=totalmem,
memtotal=flavor['memory_mb'])
return cls(cells)
@classmethod
def _get_constraints_auto(cls, nodes, flavor, image_meta):
if ((flavor['vcpus'] % nodes) > 0 or
(flavor['memory_mb'] % nodes) > 0):
raise exception.ImageNUMATopologyAsymmetric()
cells = []
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're not expecting any properties set, so
# raise an error if there are any
if cpus is not None or mem is not None:
raise exception.ImageNUMATopologyIncomplete()
ncpus = int(flavor['vcpus'] / nodes)
mem = int(flavor['memory_mb'] / nodes)
start = node * ncpus
cpuset = set(range(start, start + ncpus))
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
return cls(cells)
@classmethod
def get_constraints(cls, flavor, image_meta):
nodes = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_nodes")
if nodes is None:
return None
nodes = int(nodes)
# We'll pick what path to go down based on whether
# anything is set for the first node. Both paths
# have logic to cope with inconsistent property usage
auto = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.0") is None
if auto:
return cls._get_constraints_auto(
nodes, flavor, image_meta)
else:
return cls._get_constraints_manual(
nodes, flavor, image_meta)
class VirtNUMALimitTopology(VirtNUMATopology):
"""Class to represent the max resources of a compute node used
for checking oversubscription limits.
"""
cell_class = VirtNUMATopologyCellLimit
class VirtNUMAHostTopology(VirtNUMATopology):
"""Class represents the NUMA configuration and utilization
of a compute node. As well as exposing the overall topology
it tracks the utilization of the resources by guest instances
"""
cell_class = VirtNUMATopologyCellUsage
@staticmethod
def can_fit_instances(host, instances):
"""Test if the instance topology can fit into the host
Returns True if all the cells of the all the instance topologies in
'instances' exist in the given 'host' topology. False otherwise.
"""
if not host:
return True
host_cells = set(cell.id for cell in host.cells)
instances_cells = [set(cell.id for cell in instance.cells)
for instance in instances]
return all(instance_cells <= host_cells
for instance_cells in instances_cells)
@classmethod
def usage_from_instances(cls, host, instances, free=False):
"""Get host topology usage
:param host: VirtNUMAHostTopology with usage information
:param instances: list of VirtNUMAInstanceTopology
:param free: If True usage of the host will be decreased
Sum the usage from all @instances to report the overall
host topology usage
:returns: VirtNUMAHostTopology including usage information
"""
if host is None:
return
instances = instances or []
cells = []
sign = -1 if free else 1
for hostcell in host.cells:
memory_usage = hostcell.memory_usage
cpu_usage = hostcell.cpu_usage
for instance in instances:
for instancecell in instance.cells:
if instancecell.id == hostcell.id:
memory_usage = (
memory_usage + sign * instancecell.memory)
cpu_usage = cpu_usage + sign * len(instancecell.cpuset)
cell = cls.cell_class(
hostcell.id, hostcell.cpuset, hostcell.memory,
max(0, cpu_usage), max(0, memory_usage))
cells.append(cell)
return cls(cells)
@classmethod
def claim_test(cls, host, instances, limits=None):
"""Test if we can claim an instance on the host with given limits.
:param host: VirtNUMAHostTopology with usage information
:param instances: list of VirtNUMAInstanceTopology
:param limits: VirtNUMALimitTopology with max values set. Should
match the host topology otherwise
:returns: None if the claim succeeds or text explaining the error.
"""
if not (host and instances):
return
if not cls.can_fit_instances(host, instances):
return (_("Requested instance NUMA topology cannot fit "
"the given host NUMA topology."))
if not limits:
return
claimed_host = cls.usage_from_instances(host, instances)
for claimed_cell, limit_cell in zip(claimed_host.cells, limits.cells):
if (claimed_cell.memory_usage > limit_cell.memory_limit or
claimed_cell.cpu_usage > limit_cell.cpu_limit):
return (_("Requested instance NUMA topology is too large for "
"the given host NUMA topology limits."))
# TODO(ndipanov): Remove when all code paths are using objects
def instance_topology_from_instance(instance):
"""Convenience method for getting the numa_topology out of instances
Since we may get an Instance as either a dict, a db object, or an actual
Instance object, this makes sure we get beck either None, or an instance
of objects.InstanceNUMATopology class.
"""
if isinstance(instance, objects.Instance):
# NOTE (ndipanov): This may cause a lazy-load of the attribute
instance_numa_topology = instance.numa_topology
else:
if 'numa_topology' in instance:
instance_numa_topology = instance['numa_topology']
elif 'uuid' in instance:
try:
instance_numa_topology = (
objects.InstanceNUMATopology.get_by_instance_uuid(
context.get_admin_context(), instance['uuid'])
)
except exception.NumaTopologyNotFound:
instance_numa_topology = None
else:
instance_numa_topology = None
if instance_numa_topology:
if isinstance(instance_numa_topology, six.string_types):
instance_numa_topology = VirtNUMAInstanceTopology.from_json(
instance_numa_topology)
elif isinstance(instance_numa_topology, dict):
# NOTE (ndipanov): A horrible hack so that we can use this in the
# scheduler, since the InstanceNUMATopology object is serialized
# raw using the obj_base.obj_to_primitive, (which is buggy and will
# give us a dict with a list of InstanceNUMACell objects), and then
# passed to jsonutils.to_primitive, which will make a dict out of
# those objects. All of this is done by
# scheduler.utils.build_request_spec called in the conductor.
#
# Remove when request_spec is a proper object itself!
dict_cells = instance_numa_topology.get('cells')
if dict_cells:
cells = [objects.InstanceNUMACell(id=cell['id'],
cpuset=set(cell['cpuset']),
memory=cell['memory'])
for cell in dict_cells]
instance_numa_topology = (
objects.InstanceNUMATopology(cells=cells))
return instance_numa_topology
# TODO(ndipanov): Remove when all code paths are using objects
def host_topology_and_format_from_host(host):
"""Convenience method for getting the numa_topology out of hosts
Since we may get a host as either a dict, a db object, or an actual
ComputeNode object, or an instance of HostState class, this makes sure we
get beck either None, or an instance of VirtNUMAHostTopology class.
:returns: A two-tuple, first element is the topology itself or None, second
is a boolean set to True if topology was in json format.
"""
was_json = False
try:
host_numa_topology = host.get('numa_topology')
except AttributeError:
host_numa_topology = host.numa_topology
if host_numa_topology is not None and isinstance(
host_numa_topology, six.string_types):
was_json = True
host_numa_topology = VirtNUMAHostTopology.from_json(host_numa_topology)
return host_numa_topology, was_json
# TODO(ndipanov): Remove when all code paths are using objects
def get_host_numa_usage_from_instance(host, instance, free=False,
never_serialize_result=False):
"""Calculate new 'numa_usage' of 'host' from 'instance' NUMA usage
This is a convenience method to help us handle the fact that we use several
different types throughout the code (ComputeNode and Instance objects,
dicts, scheduler HostState) which may have both json and deserialized
versions of VirtNUMATopology classes.
Handles all the complexity without polluting the class method with it.
:param host: nova.objects.ComputeNode instance, or a db object or dict
:param instance: nova.objects.Instance instance, or a db object or dict
:param free: if True the the returned topology will have it's usage
decreased instead.
:param never_serialize_result: if True result will always be an instance of
VirtNUMAHostTopology class.
:returns: numa_usage in the format it was on the host or
VirtNUMAHostTopology instance if never_serialize_result was True
"""
instance_numa_topology = instance_topology_from_instance(instance)
if instance_numa_topology:
instance_numa_topology = [instance_numa_topology]
host_numa_topology, jsonify_result = host_topology_and_format_from_host(
host)
updated_numa_topology = (
VirtNUMAHostTopology.usage_from_instances(
host_numa_topology, instance_numa_topology, free=free))
if updated_numa_topology is not None:
if jsonify_result and not never_serialize_result:
updated_numa_topology = updated_numa_topology.to_json()
return updated_numa_topology
| sajeeshcs/nested_quota | nova/virt/hardware.py | Python | apache-2.0 | 39,169 |
from flask import Flask, render_template
from os import path, pardir
def get_static_dir():
rootdir = path.join(path.dirname(__file__), pardir)
return path.abspath(path.join(rootdir, 'static'))
def get_templates_dir():
rootdir = path.join(path.dirname(__file__), pardir)
return path.abspath(path.join(rootdir, 'templates'))
def web_gui():
"""Entry method for creating an Opesci GUI instance"""
gui = Flask(__name__, static_folder=get_static_dir(),
template_folder=get_templates_dir())
@gui.route('/')
def index():
"""Entry point of the Opesci GUI web app"""
return render_template('index.html')
gui.run(debug=False)
if __name__ == '__main__':
web_gui()
| opesci/gui | opescigui/opescigui.py | Python | mit | 735 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
from weblate.trans.models.subproject import SubProject
class Source(models.Model):
checksum = models.CharField(max_length=40)
subproject = models.ForeignKey(SubProject)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'trans'
unique_together = ('checksum', 'subproject')
| paour/weblate | weblate/trans/models/source.py | Python | gpl-3.0 | 1,144 |
################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import sys
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from management import execute_command
from templates import models
BANNER = r"""
___ ___ __ __ ___ __
|__/ /\ |\/||__ | |__ / \|\ | | \|__ |\/|/ \
| \/~~\| ||___|___|___\__/| \| |__/|___| |\__/
"""
def error(failure):
print("ERROR: Failure")
print(failure)
@inlineCallbacks
def main(argv):
yield models.DATABASE.connect()
yield execute_command(models, argv=argv)
if __name__ == "__main__":
print("")
print("-------------------------------------------------------------------")
print(BANNER)
argv = None
if len(sys.argv) > 1:
argv = sys.argv[1:]
d = main(argv)
reactor.run()
if models.DATABASE and hasattr(models.DATABASE, "connection"):
print("")
print("DEBUG: Database closed")
models.DATABASE.connection.close()
else:
print("")
print("INFO: Kameleon stopped")
print("-------------------------------------------------------------------")
| ThinkEE/Kameleon | kameleon/run_template.py | Python | mit | 2,388 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 21:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0002_featurerequest'),
]
operations = [
migrations.AlterField(
model_name='featurerequest',
name='ticket_url',
field=models.URLField(blank=True, default='', verbose_name='Ticket URL'),
),
migrations.AlterUniqueTogether(
name='featurerequest',
unique_together=set([('client', 'client_priority')]),
),
]
| wkevina/feature-requests-app | features/migrations/0003_auto_20160408_2155.py | Python | mit | 646 |
#!/usr/bin/env python
import sys
import signal
import time
duration = int(sys.argv[1])
ignore_term = bool(int(sys.argv[2]))
print('duration %s, ignore SIGTERM %s' % (duration, ignore_term))
if ignore_term:
def handler(signum, frame):
print('Signal handler called with signal', signum)
# just ingore any kill attempts
signal.signal(signal.SIGTERM, handler)
for x in range(duration):
print(x, 'remaining seconds:', duration - x)
time.sleep(1)
| ganga-devs/ganga | ganga/GangaCore/old_test/Internals/TestShell/kill_test.py | Python | gpl-3.0 | 476 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import frappe.model.meta
from frappe.permissions import (add_user_permission, remove_user_permission,
clear_user_permissions_for_doctype, get_doc_permissions, add_permission, update_permission_property)
from frappe.core.page.permission_manager.permission_manager import update, reset
from frappe.test_runner import make_test_records_for_doctype
from frappe.core.doctype.user_permission.user_permission import clear_user_permissions
from frappe.desk.form.load import getdoc
test_dependencies = ['Blogger', 'Blog Post', "User", "Contact", "Salutation"]
class TestPermissions(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
if not frappe.flags.permission_user_setup_done:
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Website Manager")
user.add_roles("System Manager")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Blogger")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Sales User")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Website Manager")
frappe.flags.permission_user_setup_done = True
reset('Blogger')
reset('Blog Post')
frappe.db.sql('delete from `tabUser Permission`')
frappe.set_user("[email protected]")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
@staticmethod
def set_strict_user_permissions(ignore):
ss = frappe.get_doc("System Settings")
ss.apply_strict_user_permissions = ignore
ss.flags.ignore_mandatory = 1
ss.save()
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_select_permission(self):
# grant only select perm to blog post
add_permission('Blog Post', 'Sales User', 0)
update_permission_property('Blog Post', 'Sales User', 0, 'select', 1)
update_permission_property('Blog Post', 'Sales User', 0, 'read', 0)
update_permission_property('Blog Post', 'Sales User', 0, 'write', 0)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("[email protected]")
# validate select perm
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("select"))
# validate does not have read and write perm
self.assertFalse(post.has_permission("read"))
self.assertRaises(frappe.PermissionError, post.save)
def test_user_permissions_in_doc(self):
add_user_permission("Blog Category", "-test-blog-category-1",
"[email protected]")
frappe.set_user("[email protected]")
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
add_user_permission("Blog Category", "-test-blog-category-1", "[email protected]")
frappe.set_user("[email protected]")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("-test-blog-post-1" in names)
self.assertFalse("-test-blog-post" in names)
def test_default_values(self):
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch default based on single user permission
add_user_permission("Blog Category", "-test-blog-category-1", "[email protected]")
frappe.set_user("[email protected]")
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "-test-blog-category-1")
# Don't fetch default if user permissions is more than 1
add_user_permission("Blog Category", "-test-blog-category", "[email protected]", ignore_permissions=True)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch user permission set as default from multiple user permission
add_user_permission("Blog Category", "-test-blog-category-2", "[email protected]", ignore_permissions=True, is_default=1)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "-test-blog-category-2")
def test_user_link_match_doc(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "[email protected]"
blogger.save()
frappe.set_user("[email protected]")
post = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "[email protected]"
blogger.save()
frappe.set_user("[email protected]")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("-test-blog-post-2" in names)
self.assertFalse("-test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("[email protected]")
add_user_permission("Blog Post", "-test-blog-post", "[email protected]")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("[email protected]")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add_user_permission,
"Blog Post", "-test-blog-post", "[email protected]")
def test_read_if_explicit_user_permissions_are_set(self):
self.test_set_user_permissions()
frappe.set_user("[email protected]")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
frappe.set_user("[email protected]")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove_user_permission,
"Blog Post", "-test-blog-post", "[email protected]")
def test_user_permissions_if_applied_on_doc_being_evaluated(self):
frappe.set_user("[email protected]")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("[email protected]")
add_user_permission("Blog Post", "-test-blog-post", "[email protected]")
frappe.set_user("[email protected]")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
doc.db_set('title', 'Old')
blog_post.get_field("title").set_only_once = 1
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_set_only_once_child_table_rows(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# remove last one
doc.fields = doc.fields[:-1]
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_row_value(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# change one property from the child table
doc.fields[-1].fieldtype = 'Check'
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_okay(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
doc.load_doc_before_save()
self.assertFalse(doc.validate_set_only_once())
frappe.clear_cache(doctype='DocType')
def test_user_permission_doctypes(self):
add_user_permission("Blog Category", "-test-blog-category-1",
"[email protected]")
add_user_permission("Blogger", "_Test Blogger 1",
"[email protected]")
frappe.set_user("[email protected]")
frappe.clear_cache(doctype="Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.clear_cache(doctype="Blog Post")
def if_owner_setup(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
add_user_permission("Blog Category", "-test-blog-category-1",
"[email protected]")
add_user_permission("Blogger", "_Test Blogger 1",
"[email protected]")
frappe.clear_cache(doctype="Blog Post")
def test_insert_if_owner_with_user_permissions(self):
"""If `If Owner` is checked for a Role, check if that document
is allowed to be read, updated, submitted, etc. except be created,
even if the document is restricted based on User Permissions."""
frappe.delete_doc('Blog Post', '-test-blog-post-title')
self.if_owner_setup()
frappe.set_user("[email protected]")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertRaises(frappe.PermissionError, doc.insert)
frappe.set_user('[email protected]')
add_user_permission("Blog Category", "-test-blog-category",
"[email protected]")
frappe.set_user("[email protected]")
doc.insert()
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "-test-blog-category",
"[email protected]")
frappe.set_user("[email protected]")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertFalse(doc.has_permission("create"))
# delete created record
frappe.set_user("Administrator")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_ignore_user_permissions_if_missing(self):
"""If there are no user permissions, then allow as per role"""
add_user_permission("Blog Category", "-test-blog-category",
"[email protected]")
frappe.set_user("[email protected]")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category-2",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertFalse(doc.has_permission("write"))
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "-test-blog-category",
"[email protected]")
frappe.set_user("[email protected]")
self.assertTrue(doc.has_permission('write'))
def test_strict_user_permissions(self):
"""If `Strict User Permissions` is checked in System Settings,
show records even if User Permissions are missing for a linked
doctype"""
frappe.set_user('Administrator')
frappe.db.sql('DELETE FROM `tabContact`')
frappe.db.sql('DELETE FROM `tabContact Email`')
frappe.db.sql('DELETE FROM `tabContact Phone`')
reset('Salutation')
reset('Contact')
make_test_records_for_doctype('Contact', force=True)
add_user_permission("Salutation", "Mr", "[email protected]")
self.set_strict_user_permissions(0)
allowed_contact = frappe.get_doc('Contact', '_Test Contact For _Test Customer')
other_contact = frappe.get_doc('Contact', '_Test Contact For _Test Supplier')
frappe.set_user("[email protected]")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertTrue(other_contact.has_permission('read'))
self.assertEqual(len(frappe.get_list("Contact")), 2)
frappe.set_user("Administrator")
self.set_strict_user_permissions(1)
frappe.set_user("[email protected]")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertFalse(other_contact.has_permission('read'))
self.assertTrue(len(frappe.get_list("Contact")), 1)
frappe.set_user("Administrator")
self.set_strict_user_permissions(0)
clear_user_permissions_for_doctype("Salutation")
clear_user_permissions_for_doctype("Contact")
def test_user_permissions_not_applied_if_user_can_edit_user_permissions(self):
add_user_permission('Blogger', '_Test Blogger 1', '[email protected]')
# [email protected] has rights to create user permissions
# so it should not matter if explicit user permissions are not set
self.assertTrue(frappe.get_doc('Blogger', '_Test Blogger').has_permission('read'))
def test_user_permission_is_not_applied_if_user_roles_does_not_have_permission(self):
add_user_permission('Blog Post', '-test-blog-post-1', '[email protected]')
frappe.set_user("[email protected]")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
frappe.set_user("Administrator")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Blogger")
frappe.set_user("[email protected]")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("Administrator")
user.remove_roles("Blogger")
def test_contextual_user_permission(self):
# should be applicable for across all doctypes
add_user_permission('Blogger', '_Test Blogger', '[email protected]')
# should be applicable only while accessing Blog Post
add_user_permission('Blogger', '_Test Blogger 1', '[email protected]', applicable_for='Blog Post')
# should be applicable only while accessing User
add_user_permission('Blogger', '_Test Blogger 2', '[email protected]', applicable_for='User')
posts = frappe.get_all('Blog Post', fields=['name', 'blogger'])
# Get all posts for admin
self.assertEqual(len(posts), 4)
frappe.set_user('[email protected]')
posts = frappe.get_list('Blog Post', fields=['name', 'blogger'])
# Should get only posts with allowed blogger via user permission
# only '_Test Blogger', '_Test Blogger 1' are allowed in Blog Post
self.assertEqual(len(posts), 3)
for post in posts:
self.assertIn(post.blogger, ['_Test Blogger', '_Test Blogger 1'], 'A post from {} is not expected.'.format(post.blogger))
def test_if_owner_permission_overrides_properly(self):
# check if user is not granted access if the user is not the owner of the doc
# Blogger has only read access on the blog post unless he is the owner of the blog
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
# currently test2 user has not created any document
# still he should be able to do get_list query which should
# not raise permission error but simply return empty list
frappe.set_user("[email protected]")
self.assertEqual(frappe.get_list('Blog Post'), [])
frappe.set_user("Administrator")
# creates a custom docperm with just read access
# now any user can read any blog post (but other rights are limited to the blog post owner)
add_permission('Blog Post', 'Blogger')
frappe.clear_cache(doctype="Blog Post")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
frappe.set_user("[email protected]")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
doc.insert()
frappe.set_user("[email protected]")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertFalse(doc.has_permission("write"))
self.assertFalse(doc.has_permission("delete"))
# check if owner of the doc has the access that is available only for the owner of the doc
frappe.set_user("[email protected]")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertTrue(doc.has_permission("delete"))
# delete the created doc
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_if_owner_permission_on_getdoc(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("[email protected]")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title New",
"content": "_Test Blog Post Content"
})
doc.insert()
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
frappe.set_user("[email protected]")
self.assertRaises(frappe.PermissionError, getdoc, 'Blog Post', doc.name)
def test_if_owner_permission_on_delete(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
# Remove delete perm
update('Blog Post', 'Website Manager', 0, 'delete', 0)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("[email protected]")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title New 1",
"content": "_Test Blog Post Content"
})
doc.insert()
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
frappe.set_user("[email protected]")
# Website Manager able to read
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
# Website Manager should not be able to delete
self.assertRaises(frappe.PermissionError, frappe.delete_doc, 'Blog Post', doc.name)
frappe.set_user("[email protected]")
frappe.delete_doc('Blog Post', '-test-blog-post-title-new-1')
update('Blog Post', 'Website Manager', 0, 'delete', 1)
def test_clear_user_permissions(self):
current_user = frappe.session.user
frappe.set_user('Administrator')
clear_user_permissions_for_doctype('Blog Category', '[email protected]')
clear_user_permissions_for_doctype('Blog Post', '[email protected]')
add_user_permission('Blog Post', '-test-blog-post-1', '[email protected]')
add_user_permission('Blog Post', '-test-blog-post-2', '[email protected]')
add_user_permission("Blog Category", '-test-blog-category-1', '[email protected]')
deleted_user_permission_count = clear_user_permissions('[email protected]', 'Blog Post')
self.assertEqual(deleted_user_permission_count, 2)
blog_post_user_permission_count = frappe.db.count('User Permission', filters={
'user': '[email protected]',
'allow': 'Blog Post'
})
self.assertEqual(blog_post_user_permission_count, 0)
blog_category_user_permission_count = frappe.db.count('User Permission', filters={
'user': '[email protected]',
'allow': 'Blog Category'
})
self.assertEqual(blog_category_user_permission_count, 1)
# reset the user
frappe.set_user(current_user)
| mhbu50/frappe | frappe/tests/test_permissions.py | Python | mit | 19,796 |
import itertools
import os
import scipy
import struct
from pybrain.datasets import SupervisedDataSet
def labels(filename):
fp = file(filename)
magicnumber, length = struct.unpack('>ii', fp.read(8))
assert magicnumber in (2049, 2051), ("Not an MNIST file: %i" % magicnumber)
for _ in xrange(length):
label, = struct.unpack('B', fp.read(1))
yield label
def images(filename):
fp = file(filename)
chunk = fp.read(16)
magicnumber, length, numrows, numcols = struct.unpack('>iiii', chunk)
assert magicnumber in (2049, 2051), ("Not an MNIST file: %i" % magicnumber)
imagesize = numrows * numcols
for _ in xrange(length):
imagestring = fp.read(imagesize)
image = struct.unpack('B' * imagesize, imagestring)
yield scipy.array(image)
def flaggedArrayByIndex(idx, length):
arr = scipy.zeros(length)
arr[idx] = 1.
return arr
def makeMnistDataSets(path):
"""Return a pair consisting of two datasets, the first being the training
and the second being the test dataset."""
test = SupervisedDataSet(28 * 28, 10)
test_image_file = os.path.join(path, 't10k-images-idx3-ubyte')
test_label_file = os.path.join(path, 't10k-labels-idx1-ubyte')
test_images = images(test_image_file)
test_labels = (flaggedArrayByIndex(l, 10) for l in labels(test_label_file))
for image, label in itertools.izip(test_images, test_labels):
test.addSample(image, label)
train = SupervisedDataSet(28 * 28, 10)
train_image_file = os.path.join(path, 'train-images-idx3-ubyte')
train_label_file = os.path.join(path, 'train-labels-idx1-ubyte')
train_images = images(train_image_file)
train_labels = (flaggedArrayByIndex(l, 10) for l in labels(train_label_file))
for image, label in itertools.izip(train_images, train_labels):
train.addSample(image, label)
return train, test | hassaanm/stock-trading | src/pybrain/tools/datasets/mnist.py | Python | apache-2.0 | 1,906 |
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from builder import util
import json
class TarBallDownloader(object):
def __init__(self, config):
self.cache_dir = config.get('cache_dir') or 'cache'
self.where_from = config['from']
self.root_file = config.get('root_file')
def _check_cache(self):
cache_name = util.hash_blob(self.where_from, 'md5')
cache_name = cache_name[0:8]
full_pth = os.path.join(self.cache_dir, "%s.tar.gz" % (cache_name))
if os.path.isfile(full_pth):
return (full_pth, True)
return (full_pth, False)
def _adjust_real_root(self, arch_path):
if self.root_file:
print("Oh you really meant %s, finding that file..." % (util.quote(self.root_file)))
# Extract and then copy over the right file...
with util.tempdir() as tdir:
arch_dir = os.path.join(tdir, 'archive')
os.makedirs(arch_dir)
util.subp(['tar', '-xzf', arch_path, '-C', arch_dir])
root_gz = util.find_file(self.root_file, arch_dir)
if not root_gz:
raise RuntimeError(("Needed file %r not found in"
" extracted contents of %s")
% (self.root_file, arch_path))
else:
util.copy(root_gz, arch_path)
return arch_path
def download(self):
(cache_pth, exists_there) = self._check_cache()
if exists_there:
return cache_pth
print("Downloading from: %s" % (util.quote(self.where_from)))
util.ensure_dirs([os.path.dirname(cache_pth)])
print("To: %s" % (util.quote(cache_pth)))
util.download_url(self.where_from, cache_pth)
try:
meta_js = {
'cached_on': util.time_rfc2822(),
'from': self.where_from,
'root_file': self.root_file,
}
util.write_file("%s.json" % (cache_pth),
"%s\n" % (json.dumps(meta_js, indent=4)))
return self._adjust_real_root(cache_pth)
except:
util.del_file(cache_pth)
raise
| yahoo/Image-Builder | builder/downloader/tar_ball.py | Python | apache-2.0 | 2,868 |
from gi.repository import Gtk
class ChannelWindow:
def __init__(self, sources, client_thread):
self.sources = sources
self.client_thread = client_thread
self.changes = {}
self.widgets = {}
self.gtkwin = Gtk.Window()
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.set_default_size(320, 240)
self.gtkwin.set_title("Configure Channel")
vbox = Gtk.VBox()
self.gtkwin.add(vbox)
self.sources_list = Gtk.VBox()
sources_list_scroll = Gtk.ScrolledWindow()
sources_list_scroll.add(self.sources_list)
sources_list_scroll.get_children()[0].set_shadow_type(Gtk.ShadowType.NONE)
sources_list_scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
vbox.pack_start(sources_list_scroll, True, True, 0)
for uuid in self.sources:
self.widgets[uuid] = {}
source = self.sources[uuid]
frame = Gtk.Frame()
frame.set_label(source["username"])
self.sources_list.pack_start(frame, False, False, 0)
table = Gtk.Table(n_rows=3, n_columns=3)
frame.add(table)
hop_button = Gtk.RadioButton.new_with_label_from_widget(None, 'Hop')
if source["hop"] > 0:
hop_button.clicked()
hop_button.connect("clicked", self.on_change_mode, uuid, "hop")
hop_button.set_alignment(0,0)
table.attach(hop_button, 0, 1, 0, 1)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1,10)
field.set_range(1,100)
field.set_value(source["velocity"])
if source["hop"] == 0:
field.set_sensitive(False)
self.widgets[uuid]["hop"] = field
field.connect("changed", self.on_change_value, uuid, "hop")
table.attach(field, 1, 2, 0, 1, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="rate")
label.set_justify(Gtk.Justification.LEFT)
label.set_alignment(0.1,0.5)
table.attach(label, 2, 3, 0, 1, xoptions=Gtk.AttachOptions.FILL)
lock_button = Gtk.RadioButton.new_with_label_from_widget(hop_button, "Lock")
if source["hop"] == 0:
lock_button.clicked()
lock_button.connect("clicked", self.on_change_mode, uuid, "lock")
hop_button.set_alignment(0,0)
table.attach(lock_button, 0, 1, 1, 2)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1,10)
field.set_range(1,100)
if source["hop"] == 0:
field.set_value(source["channel"])
else:
field.set_value(1)
field.set_sensitive(False)
self.widgets[uuid]["lock"] = field
field.connect("changed", self.on_change_value, uuid, "lock")
table.attach(field, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="channel")
label.set_justify(Gtk.Justification.FILL)
label.set_alignment(0.1,0.5)
table.attach(label, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.FILL)
button_box = Gtk.HButtonBox()
vbox.pack_end(button_box, False, False, 0)
cancel_button = Gtk.Button.new_with_mnemonic('_Cancel')
cancel_button.connect("clicked", self.on_cancel)
button_box.add(cancel_button)
apply_button = Gtk.Button.new_with_mnemonic('_Apply')
apply_button.connect("clicked", self.on_apply)
button_box.add(apply_button)
self.gtkwin.show_all()
def on_change_mode(self, widget, uuid, mode):
if not widget.get_active():
return
self.changes[uuid] = mode
self.widgets[uuid][mode].set_sensitive(True)
if mode == "lock":
self.widgets[uuid]["hop"].set_sensitive(False)
else:
self.widgets[uuid]["lock"].set_sensitive(False)
def on_change_value(self, widget, uuid, mode):
self.changes[uuid] = mode
def on_apply(self, widget):
for uuid in self.changes:
mode = self.changes[uuid]
value = int(self.widgets[uuid][mode].get_value())
self.client_thread.client.set_channel(uuid, mode, value)
self.gtkwin.destroy()
def on_cancel(self, widget):
self.gtkwin.destroy()
| krzotr/kismon | kismon/windows/channel.py | Python | bsd-3-clause | 3,856 |
import pf
import epf
####################
f = epf.EuclidField(
(50, 50), # width x height
(7, 14), # goal
[ # obstacles
(9, 5),
(10, 4),
(10, 5),
(10, 6),
(11, 5),
(9, 15),
(10, 14),
(10, 15),
(10, 16),
(11, 15),
(39, 10),
(40, 9),
(40, 10),
(40, 11),
(41, 10),
]
)
src = (46, 5)
####################
path = pf.find_path(f, src, f.dst, 100)
im = pf.field_to_image(f)
pf.draw_path(im, path)
im.save('out.png')
| z-rui/pf | example.py | Python | bsd-2-clause | 564 |
""" User factory """
import factory
from smserver import models
from test.factories import base
from test.factories.room_factory import RoomFactory
class UserFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.User
name = factory.Sequence(lambda n: "User %s" % (n+1))
rank = 1
stepmania_version = "123"
@classmethod
def _after_postgeneration(cls, obj, _create, _results):
obj._room_level = {}
class AdminFactory(UserFactory):
""" Create an Admin user """
rank = 10
class PrivilegeFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.Privilege
level = 1
room = factory.SubFactory(RoomFactory)
user = factory.SubFactory(UserFactory)
class UserWithRoomFactory(UserFactory):
""" User with a new room """
room = factory.SubFactory(RoomFactory)
def user_with_room_privilege(level=1, **kwargs):
""" Return a User with privileges for a room """
user = UserWithRoomFactory(**kwargs)
PrivilegeFactory(user=user, room=user.room, level=level)
return user
| Nickito12/stepmania-server | test/factories/user_factory.py | Python | mit | 1,150 |
"""Bika's browser views are based on this one, for a nice set of utilities.
"""
from dependencies.dependency import DateTime, safelocaltime
from dependencies.dependency import DateTimeError
from dependencies.dependency import getToolByName
from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import ulocalized_time as _ut
from dependencies.dependency import BrowserView
from lims import logger
from dependencies.dependency import Lazy as lazy_property
from dependencies.dependency import translate
from time import strptime as _strptime
def strptime(context, value):
"""given a string, this function tries to return a DateTime.DateTime object
with the date formats from i18n translations
"""
val = ""
for fmt in ['date_format_long', 'date_format_short']:
fmtstr = context.translate(fmt, domain='bika', mapping={})
fmtstr = fmtstr.replace(r"${", '%').replace('}', '')
try:
val = _strptime(value, fmtstr)
except ValueError:
continue
try:
val = DateTime(*list(val)[:-6])
except DateTimeError:
val = ""
if val.timezoneNaive():
# Use local timezone for tz naive strings
# see http://dev.plone.org/plone/ticket/10141
zone = val.localZone(safelocaltime(val.timeTime()))
parts = val.parts()[:-1] + (zone,)
val = DateTime(*parts)
break
else:
logger.warning("DateTimeField failed to format date "
"string '%s' with '%s'" % (value, fmtstr))
return val
def ulocalized_time(time, long_format=None, time_only=None, context=None,
request=None):
# if time is a string, we'll try pass it through strptime with the various
# formats defined.
if isinstance(time, basestring):
time = strptime(context, time)
if time:
# no printing times if they were not specified in inputs
if time.second() + time.minute() + time.hour() == 0:
long_format = False
time_str = _ut(time, long_format, time_only, context,
'bika', request)
return time_str
class BrowserView(BrowserView):
security = ClassSecurityInfo()
logger = logger
def __init__(self, context, request):
super(BrowserView, self).__init__(context, request)
security.declarePublic('ulocalized_time')
def ulocalized_time(self, time, long_format=None, time_only=None):
return ulocalized_time(time, long_format, time_only,
context=self.context, request=self.request)
@lazy_property
def portal(self):
return getToolByName(self.context, 'portal_url').getPortalObject()
@lazy_property
def portal_url(self):
return self.portal.absolute_url().split("?")[0]
@lazy_property
def portal_catalog(self):
return getToolByName(self.context, 'portal_catalog')
@lazy_property
def reference_catalog(self):
return getToolByName(self.context, 'reference_catalog')
@lazy_property
def bika_analysis_catalog(self):
return getToolByName(self.context, 'bika_analysis_catalog')
@lazy_property
def bika_setup_catalog(self):
return getToolByName(self.context, 'bika_setup_catalog')
@lazy_property
def bika_catalog(self):
return getToolByName(self.context, 'bika_catalog')
@lazy_property
def portal_membership(self):
return getToolByName(self.context, 'portal_membership')
@lazy_property
def portal_groups(self):
return getToolByName(self.context, 'portal_groups')
@lazy_property
def portal_workflow(self):
return getToolByName(self.context, 'portal_workflow')
@lazy_property
def checkPermission(self, perm, obj):
return self.portal_membership.checkPermission(perm, obj)
def user_fullname(self, userid):
member = self.portal_membership.getMemberById(userid)
if member is None:
return userid
member_fullname = member.getProperty('fullname')
c = self.portal_catalog(portal_type='Contact', getUsername=userid)
contact_fullname = c[0].getObject().getFullname() if c else None
return contact_fullname or member_fullname or userid
def user_email(self, userid):
member = self.portal_membership.getMemberById(userid)
if member is None:
return userid
member_email = member.getProperty('email')
c = self.portal_catalog(portal_type='Contact', getUsername=userid)
contact_email = c[0].getObject().getEmailAddress() if c else None
return contact_email or member_email or ''
def python_date_format(self, long_format=None, time_only=False):
"""This convert bika domain date format msgstrs to Python
strftime format strings, by the same rules as ulocalized_time.
XXX i18nl10n.py may change, and that is where this code is taken from.
"""
# get msgid
msgid = long_format and 'date_format_long' or 'date_format_short'
if time_only:
msgid = 'time_format'
# get the formatstring
formatstring = translate(msgid, domain='bika', mapping={},
context=self.request)
if formatstring is None or formatstring.startswith(
'date_') or formatstring.startswith('time_'):
self.logger.error("bika/%s/%s could not be translated" %
(self.request.get('LANGUAGE'), msgid))
# msg catalog was not able to translate this msgids
# use default setting
properties = getToolByName(self.context,
'portal_properties').site_properties
if long_format:
format = properties.localLongTimeFormat
else:
if time_only:
format = properties.localTimeOnlyFormat
else:
format = properties.localTimeFormat
return format
return formatstring.replace(r"${", '%').replace('}', '')
@lazy_property
def date_format_long(self):
fmt = self.python_date_format(long_format=1)
if fmt == "date_format_long":
fmt = "%Y-%m-%d %I:%M %p"
return fmt
@lazy_property
def date_format_short(self):
fmt = self.python_date_format()
if fmt == "date_format_short":
fmt = "%Y-%m-%d"
return fmt
@lazy_property
def time_format(self):
fmt = self.python_date_format(time_only=True)
if fmt == "time_format":
fmt = "%I:%M %p"
return fmt
| yasir1brahim/OLiMS | lims/browser/__init__.py | Python | agpl-3.0 | 6,746 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
INCLUDE_BLAS = '/usr/include/atlas'
LIB_BLAS = '/usr/lib/atlas-base/atlas'
LIBS = 'blas'
# for windows
#INCLUDE_BLAS = 'C:\OpenBLAS\include'
#LIB_BLAS = 'C:\OpenBLAS\lib'
#LIBS = 'libopenblas'
# needs
# - numpy/arrayobject...
# - cblas.h
#
# setup(
# #ext_modules=cythonize([Extension("enet_solver",["enet_solver.pyx"],
# # include_dirs=[np.get_include(), 'C:\Program Files (x86)\Intel\Composer XE\mkl\include'],
# # library_dirs=['C:\Program Files (x86)\Intel\Composer XE\mkl\lib\intel64'],
# # extra_compiler_args=['-DMKL_ILP64'],
# # libraries=["mkl_intel_ilp64", "mkl_core", "mkl_sequential"])
# # ])
# #ext_modules=cythonize([Extension("enet_solver",["enet_solver.pyx"],
# # include_dirs=[np.get_include(), 'C:\mkl\include'],
# # library_dirs=['C:\mkl\lib\intel64'],
# # extra_compiler_args=['-DMKL_ILP64'],
# # libraries=["mkl_intel_ilp64", "mkl_core", "mkl_sequential"])
# # ])
# #ext_modules=cythonize([Extension("enet_solver",["enet_solver.pyx"],
# # include_dirs=[np.get_include(), 'C:\mkl\include'])
# # ])
# ext_modules=cythonize([Extension("enet_solver",["enet_solver.pyx"],
# include_dirs=[np.get_include(), 'C:\OpenBLAS\include'],
# library_dirs=['C:\OpenBLAS\lib'],
# libraries=["libopenblas"])
# ])
# )
setup(
ext_modules=cythonize([Extension("enet_solver",["enet_solver.pyx"],
include_dirs=[np.get_include(), INCLUDE_BLAS],
library_dirs=[LIB_BLAS],
libraries=[LIBS])
])
)
| nicococo/AdaScreen | adascreen/setup.py | Python | mit | 1,699 |
from collections import defaultdict
from changes.api.serializer import Crumbler, register
from changes.models.jobplan import JobPlan
from changes.models.jobstep import JobStep
@register(JobStep)
class JobStepCrumbler(Crumbler):
def get_extra_attrs_from_db(self, item_list):
result = {}
job_id_to_steps = defaultdict(list)
for step in item_list:
job_id_to_steps[step.job_id].append(step)
if job_id_to_steps:
for jobplan in JobPlan.query.filter(JobPlan.job_id.in_(job_id_to_steps.keys())):
for step in job_id_to_steps[jobplan.job_id]:
result[step] = {'jobplan': jobplan}
# In theory, every JobStep should have a JobPlan, but we don't need to assume that
# or enforce it in this method, and this method does need to be sure that each
# step has an entry in result, so we make sure of that here.
for step in item_list:
if step not in result:
result[step] = {'jobplan': None}
return result
def crumble(self, instance, attrs):
jobplan = attrs['jobplan']
return {
'id': instance.id.hex,
'name': instance.label,
'phase': {
'id': instance.phase_id.hex,
},
'data': dict(instance.data),
'result': instance.result,
'status': instance.status,
'image': jobplan and jobplan.snapshot_image,
'node': instance.node,
'duration': instance.duration,
'replacement_id': instance.replacement_id,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
}
| dropbox/changes | changes/api/serializer/models/jobstep.py | Python | apache-2.0 | 1,772 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pigame.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| MoonCheesez/stack | PiGame/pigame/manage.py | Python | mit | 804 |
# coding=utf-8
import os
import hashlib
import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from v2ex.babel.ext.cookies import Cookies
def CheckAuth(handler):
ip = GetIP(handler)
cookies = handler.request.cookies
if 'auth' in cookies:
auth = cookies['auth']
member_num = memcache.get(auth)
if (member_num > 0):
member = memcache.get('Member_' + str(member_num))
if member is None:
q = db.GqlQuery("SELECT * FROM Member WHERE num = :1", member_num)
if q.count() == 1:
member = q[0]
memcache.set(auth, member.num)
memcache.set('Member_' + str(member_num), member)
else:
member = False
if member:
member.ip = ip
return member
else:
q = db.GqlQuery("SELECT * FROM Member WHERE auth = :1", auth)
if (q.count() == 1):
member_num = q[0].num
member = q[0]
memcache.set(auth, member_num)
memcache.set('Member_' + str(member_num), member)
member.ip = ip
return member
else:
return False
else:
return False
def DoAuth(request, destination, message = None):
if message != None:
request.session['message'] = message
else:
request.session['message'] = u'请首先登入或注册'
return request.redirect('/signin?destination=' + destination)
def GetIP(handler):
if 'X-Real-IP' in handler.request.headers:
return handler.request.headers['X-Real-IP']
else:
return handler.request.remote_addr | selboo/v2ex | v2ex/babel/security/__init__.py | Python | bsd-3-clause | 1,770 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'xml/login.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_LoginDialog(object):
def setupUi(self, LoginDialog):
LoginDialog.setObjectName(_fromUtf8("LoginDialog"))
LoginDialog.setWindowModality(QtCore.Qt.ApplicationModal)
LoginDialog.resize(332, 192)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(LoginDialog.sizePolicy().hasHeightForWidth())
LoginDialog.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(LoginDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(LoginDialog)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.uid_field = QtGui.QLineEdit(LoginDialog)
self.uid_field.setEchoMode(QtGui.QLineEdit.Normal)
self.uid_field.setObjectName(_fromUtf8("uid_field"))
self.gridLayout.addWidget(self.uid_field, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(LoginDialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.password_field = QtGui.QLineEdit(LoginDialog)
self.password_field.setEchoMode(QtGui.QLineEdit.Password)
self.password_field.setObjectName(_fromUtf8("password_field"))
self.gridLayout.addWidget(self.password_field, 1, 1, 1, 1)
self.private_computer_check = QtGui.QCheckBox(LoginDialog)
self.private_computer_check.setObjectName(_fromUtf8("private_computer_check"))
self.gridLayout.addWidget(self.private_computer_check, 2, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.login_btn = QtGui.QPushButton(LoginDialog)
self.login_btn.setObjectName(_fromUtf8("login_btn"))
self.horizontalLayout.addWidget(self.login_btn)
self.exit_btn = QtGui.QPushButton(LoginDialog)
self.exit_btn.setObjectName(_fromUtf8("exit_btn"))
self.horizontalLayout.addWidget(self.exit_btn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(LoginDialog)
QtCore.QObject.connect(self.login_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), LoginDialog.on_login)
QtCore.QObject.connect(self.exit_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), LoginDialog.on_exit)
QtCore.QMetaObject.connectSlotsByName(LoginDialog)
def retranslateUi(self, LoginDialog):
LoginDialog.setWindowTitle(_translate("LoginDialog", "Login To Mozilla", None))
self.label.setText(_translate("LoginDialog", "Login:", None))
self.label_2.setText(_translate("LoginDialog", "Password:", None))
self.private_computer_check.setText(_translate("LoginDialog", "Private computer", None))
self.login_btn.setText(_translate("LoginDialog", "Login", None))
self.exit_btn.setText(_translate("LoginDialog", "Close", None))
| isbm/pybug | ui/_login.py | Python | mit | 4,202 |
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.log import getLogger
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404, e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
finally:
signals.got_request_exception.send(
sender=self.__class__, request=request)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| thiriel/maps | venv/lib/python2.7/site-packages/django/core/handlers/base.py | Python | bsd-3-clause | 11,887 |
#import matplotlib
#matplotlib.use("Agg")
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from pprint import pprint
import sys
import json
from random import randint
def main():
if len(sys.argv) < 2:
print 'you need to pass a filename (without the .json extension) as an argument'
return
fileName = './public/exports/sets/' + sys.argv[1] + '.json';
with open(fileName) as file:
data = json.load(file)
phrases = data["phrases"]
scatter_proxies=[]
labels=[]
indices = np.arange(len(phrases))#[0,1,2]
#indices = [0,2,3]
colors = iter(cm.rainbow(np.linspace(0, 1, len(indices))))
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
#for p in phrases:
for i in indices:
c = next(colors)
p = phrases[i]
a = np.array(p["data"])
ax.scatter(a[6::9], a[7::9], a[8::9], color=c, marker="o")
proxy = lines.Line2D([0],[0], linestyle="solid", c=c)
scatter_proxies.append(proxy)
labels.append(p["label"])
ax.legend(scatter_proxies, labels, numpoints=1)
plt.show()
#plt.savefig("lifhjioehb.png")
main() | Ircam-RnD/xmm-soundworks-template | bin/plotSet.py | Python | gpl-3.0 | 1,148 |
#!/usr/bin/env python3
from argparse import ArgumentParser, RawTextHelpFormatter
from datetime import datetime
import sys, platform
parser = ArgumentParser(description="""
Generate .geo file for Gmsh to generate a mesh on the unit square.
The settings are suitable for solving the Stokes flow problem in a
lid-driven cavity, with refinement in the lower corners so that we
can find Moffatt eddies.""",
formatter_class=RawTextHelpFormatter)
parser.add_argument('outname', type=str, default='', metavar='OUTNAME',
help='output file name ending with .geo')
parser.add_argument('-cl', type=float, default=0.1, metavar='CL',
help='characteristic length for most of boundary (default=0.1)')
parser.add_argument('-cornerrefine', type=float, default=100, metavar='X',
help='ratio of refinement in corners (default=100)')
parser.add_argument('-quiet', action='store_true', default=False,
help='suppress all stdout')
parser.add_argument('-usenames', action='store_true', default=False,
help='put names "dirichlet","neumann","interior" in PhysicalNames() ... used only for running through c/ch10/vis/petsc2tikz.py')
args = parser.parse_args()
if not args.quiet:
print('writing lidbox domain geometry to file %s ...' % args.outname)
geo = open(args.outname, 'w')
firstline = '// box domain geometry for lid-driven cavity example\n'
usagemessage = '''// usage to generate lidbox.msh for input in stokes.py:
// $ gmsh -2 %s\n\n''' % args.outname
meat = '''
Point(1) = {0.0,1.0,0,cl};
Point(2) = {0.0,trans,0,cl};
Point(3) = {0.0,0.0,0,cleddy};
Point(4) = {trans,0.0,0,cl};
Point(5) = {1.0-trans,0.0,0,cl};
Point(6) = {1.0,0.0,0,cleddy};
Point(7) = {1.0,trans,0,cl};
Point(8) = {1.0,1.0,0,cl};
Line(10) = {1,2};
Line(11) = {2,3};
Line(12) = {3,4};
Line(13) = {4,5};
Line(14) = {5,6};
Line(15) = {6,7};
Line(16) = {7,8};
Line(17) = {8,1};
Line Loop(20) = {10,11,12,13,14,15,16,17};
Plane Surface(30) = {20};\n'''
physnums = '''
Physical Line(40) = {17}; // lid
Physical Line(41) = {10,11,12,13,14,15,16}; // other
Physical Surface(50) = {30}; // interior\n'''
physnames = '''
Physical Line("dirichlet") = {10,11,12,13,14,15,16,17};
Physical Line("neumann") = {};
Physical Surface("interior") = {30};\n'''
geo.write(firstline)
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
commandline = " ".join(sys.argv[:])
geo.write('// created %s by %s using command\n// %s\n'
% (now,platform.node(),commandline) ) # header records creation info
geo.write(usagemessage)
geo.write('cl = %f; // characteristic length\n' % args.cl)
geo.write('cleddy = %f; // characteristic length for corners (%g times smaller)\n' \
% (args.cl/args.cornerrefine,args.cornerrefine))
geo.write('trans = 0.4; // location of transition\n')
geo.write(meat)
if args.usenames:
geo.write(physnames)
else:
geo.write(physnums)
geo.close()
| bueler/p4pdes | python/ch14/lidbox.py | Python | mit | 2,960 |
import requests
import json
from requests.exceptions import ConnectionError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from tworaven_apps.rook_services.models import TestCallCapture
from tworaven_apps.rook_services.rook_app_info import RookAppInfo
from datetime import datetime as dt
ROOK_ZESSIONID = 'zsessionid'
@csrf_exempt
def view_rook_route(request, app_name_in_url):
"""Route TwoRavens calls to Rook
orig: TwoRavens -> Rook
view: TwoRavens -> Django 2ravens -> Rook
"""
django_session_key = request.session._get_or_create_session_key()
print('django_session_key', django_session_key)
# get the app info
#
rook_app_info = RookAppInfo.get_appinfo_from_url(app_name_in_url)
if rook_app_info is None:
raise Http404('unknown rook app: %s' % app_name_in_url)
# look for the "solaJSON" variable in the POST
#
if rook_app_info.is_health_check():
raven_data_text = 'healthcheck'
elif (not request.POST) or (not 'solaJSON' in request.POST):
return JsonResponse(dict(status="ERROR", message="solaJSON key not found"))
else:
raven_data_text = request.POST['solaJSON']
# Retrieve post data and attempt to insert django session id
# (if none exists)
#
blank_session_str = '%s":""' % ROOK_ZESSIONID
if raven_data_text.find(blank_session_str) > -1:
# was converting to JSON, but now just simple text substitution
#
updated_session_str = '%s":"%s"' % (ROOK_ZESSIONID, django_session_key)
raven_data_text = raven_data_text.replace(blank_session_str, updated_session_str)
app_data = dict(solaJSON=raven_data_text)
rook_app_url = rook_app_info.get_rook_server_url()
# Begin object to capture request
#
call_capture = None
if rook_app_info.record_this_call():
call_capture = TestCallCapture(\
app_name=rook_app_info.name,
outgoing_url=rook_app_url,
session_id=django_session_key,
request=raven_data_text)
# Call R services
#
try:
r = requests.post(rook_app_url,
data=app_data)
except ConnectionError:
err_msg = 'R Server not responding: %s' % rook_app_url
call_capture.add_error_message(err_msg)
call_capture.save()
resp_dict = dict(message=err_msg)
return JsonResponse(resp_dict)
# Save request result
#
if rook_app_info.record_this_call():
if r.status_code == 200:
call_capture.add_success_message(r.text, r.status_code)
else:
call_capture.add_error_message(r.text, r.status_code)
call_capture.save()
# Return the response to the user
#
print(40 * '=')
print(r.text)
#d = r.json()
#print(json.dumps(d, indent=4))
print(r.status_code)
return HttpResponse(r.text)
NUM_CLICKS_KEY = 'NUM_CLICKS_KEY'
@csrf_exempt
def view_rp_test(request):
d = {'name' : 'ta2'}
d = dict(name='ta2',
status_code=55)
return JsonResponse(d)
# session test for num clicks
#
num_clicks = request.session.get(NUM_CLICKS_KEY, 0)
num_clicks += 1
request.session[NUM_CLICKS_KEY] = num_clicks
print('num_clicks: ', num_clicks)
print('request.session.session_key: ', request.session.session_key)
node_length = 'not sent'
if request.POST:
node_length = request.POST.get('nodeLength', 'not set by client (err?)')
if request.user.is_authenticated:
print('authenticated')
# Do something for authenticated users.
else:
print('anonymous')
user_msg = ('\nnode length: {1}. hello ({0})').format(\
dt.now(),
node_length)
d = dict(status='ok',
data=dict(\
num_clicks=num_clicks,
node_length=node_length,
server_time='%s' % dt.now()),
message=user_msg)
return JsonResponse(d)
# example of incoming POST from TwoRavens
"""
<QueryDict: {'solaJSON': ['{"zdata":"fearonLaitinData.tab","zedges":[["country","ccode"],["ccode","cname"]],"ztime":[],"znom":["country"],"zcross":[],"zmodel":"","zvars":["ccode","country","cname"],"zdv":["cname"],"zdataurl":"","zsubset":[["",""],[],[]],"zsetx":[["",""],["",""],["",""]],"zmodelcount":0,"zplot":[],"zsessionid":"","zdatacite":"Dataverse, Admin, 2015, \\"Smoke test\\", http://dx.doi.org/10.5072/FK2/WNCZ16, Root Dataverse, V1 [UNF:6:iuFERYJSwTaovVDvwBwsxQ==]","zmetadataurl":"http://127.0.0.1:8080/static/data/fearonLaitin.xml","zusername":"rohit","callHistory":[],"allVars":["durest","aim","casename","ended","ethwar","waryrs","pop","lpop","polity2","gdpen","gdptype","gdpenl","lgdpenl1","lpopl1","region"]}']}>
"""
"""
try:
# try to convert text to JSON
#
raven_data_json = json.loads(request.POST['solaJSON'])
# Doublecheck that the ROOK_ZESSIONID is blank
#
if raven_data_json.get(ROOK_ZESSIONID, None) == '':
#print('blank session id....')
# blank id found, subsitute the django session key
#
raven_data_json[ROOK_ZESSIONID] = django_session_key'
#
#
raven_data_text = json.dumps(raven_data_json)
"""
| vjdorazio/TwoRavens | tworaven_apps/rook_services/views.py | Python | bsd-3-clause | 5,412 |
import logging
import argparse
import realtimewebui.config
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("--webPort", type=int, default=6001)
parser.add_argument("--webSocketPort", type=int, default=6002)
parser.add_argument("--realtimewebuiRoot")
parser.add_argument("--dashboardRoot")
parser.add_argument("--localhostRackattackProvider", action='store_true')
parser.add_argument("--rackattackInstances", type=str)
args = parser.parse_args()
if args.realtimewebuiRoot is not None:
realtimewebui.config.REALTIMEWEBUI_ROOT_DIRECTORY = args.realtimewebuiRoot
if args.localhostRackattackProvider:
dashboardSources = [dict(name="Local", host="localhost")]
elif args.rackattackInstances:
dashboardSources = [dict(zip(("name", "host"), provider.split(":")))
for provider in args.rackattackInstances.split(',')]
else:
raise Exception("Please define one or more rackattack instances")
logging.info("Rackattack instances: %(dashboardSources)s", dict(dashboardSources=dashboardSources))
from realtimewebui import server
from realtimewebui import rootresource
from realtimewebui import render
from rackattack.dashboard import pollthread
from twisted.web import static
pollThreads = list()
for dashboardSource in dashboardSources:
logging.info("Creating poll thread for %(dashboardSource)s",
dict(dashboardSource=dashboardSource["name"]))
pollThreads.append(pollthread.PollThread(dashboardSource["name"], dashboardSource["host"]))
render.addTemplateDir(os.path.join(args.dashboardRoot, 'html'))
render.DEFAULTS['title'] = "Rackattack"
render.DEFAULTS['brand'] = "Rackattack"
render.DEFAULTS['mainMenu'] = []
render.DEFAULTS["useStyleTheme"] = True
render.DEFAULTS['dashboardSources'] = dashboardSources
root = rootresource.rootResource()
root.putChild("js", static.File(os.path.join(args.dashboardRoot, "js")))
root.putChild("static", static.File(os.path.join(args.dashboardRoot, "static")))
root.putChild("favicon.ico", static.File(os.path.join(args.dashboardRoot, "static", "favicon.ico")))
root.putChild("wallboard", rootresource.Renderer("index-wallboard.html", {}))
root.putChild("seriallogs", static.File("/var/lib/rackattackphysical/seriallogs"))
for dashboardSource in dashboardSources:
root.putChild(dashboardSource["name"],
rootresource.Renderer("index.html", dict(defaultDashboard=dashboardSource["name"])))
server.runUnsecured(root, args.webPort, args.webSocketPort)
| eliran-stratoscale/rackattack-physical-dashboard | py/rackattack/dashboard/main.py | Python | apache-2.0 | 2,588 |
from __future__ import absolute_import, division, print_function
import operator
def weight(items, **kwargs):
if not len(kwargs):
raise ValueError('Missing attribute for weighting items!')
scaled = []
for attr, weight in kwargs.items():
values = [float(getattr(item, attr)) for item in items]
try:
s = sum(values)
scaled.append([weight * (v / s) for v in values])
except ZeroDivisionError:
# s equals to zero, attr wont contribute
scaled.append([0] * len(items))
return map(sum, zip(*scaled))
def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip
def ffd(items, targets, **kwargs):
"""First-Fit Decreasing
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
This algorithm differs only from Next-Fit Decreasing
in having a 'sort'; that is, the items are pre-sorted
(largest to smallest).
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return ff(items, targets)
def mr(items, targets, **kwargs):
"""Max-Rest
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
capacities = [target - sum(content) for target, content in bins]
weighted = weight(capacities, **kwargs)
(target, content), capacity, _ = max(zip(bins, capacities, weighted),
key=operator.itemgetter(2))
if item <= capacity:
content.append(item)
else:
skip.append(item)
return bins, skip
def mrpq(items, targets):
"""Max-Rest Priority Queue
Complexity O(n*log(n))
"""
raise NotImplementedError()
def bf(items, targets, **kwargs):
"""Best-Fit
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
containers = []
capacities = []
for target, content in bins:
capacity = target - sum(content)
if item <= capacity:
containers.append(content)
capacities.append(capacity - item)
if len(capacities):
weighted = zip(containers, weight(capacities, **kwargs))
content, _ = min(weighted, key=operator.itemgetter(1))
content.append(item)
else:
skip.append(item)
return bins, skip
def bfd(items, targets, **kwargs):
"""Best-Fit Decreasing
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return bf(items, targets, **kwargs)
def bfh(items, targets):
"""Best-Fit-Heap
Slightly Improved Complexity
"""
raise NotImplementedError()
| lensacom/satyr | mentor/binpack.py | Python | apache-2.0 | 3,414 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platform
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
| leeping/mdtraj | tests/test_reporter.py | Python | lgpl-2.1 | 7,439 |
from __future__ import print_function, division
import os
import sys
root = os.getcwd().split("MAR")[0] + "MAR/src/util"
sys.path.append(root)
from flask import Flask, url_for, render_template, request, jsonify, Response, json
from pdb import set_trace
from mar import MAR
app = Flask(__name__,static_url_path='/static')
global target
target=MAR()
@app.route('/hello/')
def hello():
return render_template('hello.html')
@app.route('/load',methods=['POST'])
def load():
global target
file=request.form['file']
target=target.create(file)
pos, neg, total = target.get_numbers()
return jsonify({"hasLabel": target.hasLabel, "flag": target.flag, "pos": pos, "done": pos+neg, "total": total})
@app.route('/export',methods=['POST'])
def export():
try:
target.export()
flag=True
except:
flag=False
return jsonify({"flag": flag})
@app.route('/plot',methods=['POST'])
def plot():
dir = "./static/image"
for file in os.listdir(dir):
os.remove(os.path.join(dir,file))
name = target.plot()
return jsonify({"path": name})
@app.route('/labeling',methods=['POST'])
def labeling():
id = int(request.form['id'])
label = request.form['label']
target.code(id,label)
pos, neg, total = target.get_numbers()
return jsonify({"flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/auto',methods=['POST'])
def auto():
for id in request.form.values():
target.code(int(id),target.body["label"][int(id)])
pos, neg, total = target.get_numbers()
return jsonify({"flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/restart',methods=['POST'])
def restart():
global target
os.remove("./memory/"+target.name+".pickle")
target = target.create(target.filename)
pos, neg, total = target.get_numbers()
return jsonify({"hasLabel": target.hasLabel, "flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/train',methods=['POST'])
def train():
pos,neg,total=target.get_numbers()
random_id = target.random()
res={"random": target.format(random_id)}
if pos>0 and neg>0:
uncertain_id, uncertain_prob, certain_id, certain_prob = target.train()
res["certain"] = target.format(certain_id,certain_prob)
res["uncertain"] = target.format(uncertain_id, uncertain_prob)
target.save()
# return jsonify(res)
ress=json.dumps(res,ensure_ascii=False)
response = Response(ress,content_type="application/json; charset=utf-8" )
return response
if __name__ == "__main__":
app.run(debug=False,use_debugger=False) | ai-se/MAR | src/index.py | Python | mit | 2,658 |
#!/usr/bin/python -Es
#
# libocispec - a C library for parsing OCI spec files.
#
# Copyright (C) 2017, 2019 Giuseppe Scrivano <[email protected]>
# libocispec is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# libocispec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libocispec. If not, see <http://www.gnu.org/licenses/>.
#
# As a special exception, you may create a larger work that contains
# part or all of the libocispec parser skeleton and distribute that work
# under terms of your choice, so long as that work isn't itself a
# parser generator using the skeleton or a modified version thereof
# as a parser skeleton. Alternatively, if you modify or redistribute
# the parser skeleton itself, you may (at your option) remove this
# special exception, which will cause the skeleton and the resulting
# libocispec output files to be licensed under the GNU General Public
# License without this special exception.
import traceback
import os
import sys
import json
import fcntl
import argparse
from collections import OrderedDict
import helpers
import headers
import sources
import common_h
import common_c
# - json suffix
JSON_SUFFIX = ".json"
'''
Description: ref suffix
Interface: ref_suffix
History: 2019-06-17
'''
# - Description: ref suffix
REF_SUFFIX = "_json"
'''
Description: root paths
Interface: rootpaths
History: 2019-06-17
'''
class MyRoot(object):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
def __init__(self, root_path):
self.root_path = root_path
def get_repr(self):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
return "{root_path:(%s)}" % (self.root_path)
def get_path(self):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
return self.root_path
def trim_json_suffix(name):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
if name.endswith(JSON_SUFFIX) or name.endswith(REF_SUFFIX):
name = name[:-len(JSON_SUFFIX)]
return helpers.conv_to_c_style(name.replace('.', '_').replace('-', '_'))
def get_prefix_package(filepath, rootpath):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
realpath = os.path.realpath(filepath)
if realpath.startswith(rootpath) and len(realpath) > len(rootpath):
return helpers.conv_to_c_style(os.path.dirname(realpath)[(len(rootpath) + 1):])
else:
raise RuntimeError('schema path \"%s\" is not in scope of root path \"%s\"' \
% (realpath, rootpath))
def get_prefix_from_file(filepath):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
prefix_file = trim_json_suffix(os.path.basename(filepath))
root_path = MyRoot.root_path
prefix_package = get_prefix_package(filepath, root_path)
prefix = prefix_file if prefix_package == "" else prefix_package + "_" + prefix_file
return prefix
def schema_from_file(filepath, srcpath):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schemapath = helpers.FilePath(filepath)
prefix = get_prefix_from_file(schemapath.name)
header = helpers.FilePath(os.path.join(srcpath, prefix + ".h"))
source = helpers.FilePath(os.path.join(srcpath, prefix + ".c"))
schema_info = helpers.SchemaInfo(schemapath, header, source, prefix, srcpath)
return schema_info
def make_ref_name(refname, reffile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
prefix = get_prefix_from_file(reffile)
if refname == "" or prefix.endswith(refname):
return prefix
return prefix + "_" + helpers.conv_to_c_style(refname)
def splite_ref_name(ref):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
tmp_f, tmp_r = ref.split("#/") if '#/' in ref else (ref, "")
return tmp_f, tmp_r
def merge(children):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
subchildren = []
for i in children:
for j in i.children:
subchildren.append(j)
return subchildren
# BASIC_TYPES include all basic types
BASIC_TYPES = (
"byte", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "UID", "GID",
"bytePointer", "doublePointer", "int8Pointer", "int16Pointer", "int32Pointer", "int64Pointer",
"uint8Pointer", "uint16Pointer", "uint32Pointer", "uint64Pointer", "ArrayOfStrings",
"booleanPointer"
)
def judge_support_type(typ):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
return typ in ("integer", "boolean", "string", "double") or typ in BASIC_TYPES
def get_ref_subref(src, subref):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
cur = src
subrefname = ""
for j in subref.split('/'):
subrefname = j
if j in BASIC_TYPES:
return src, {"type": j}, subrefname
cur = cur[j]
return src, cur, subrefname
def get_ref_root(schema_info, src, ref, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
refname = ""
tmp_f, tmp_r = splite_ref_name(ref)
if tmp_f == "":
cur = src
else:
realpath = os.path.realpath(os.path.join(os.path.dirname(curfile), tmp_f))
curfile = realpath
subschema = schema_from_file(realpath, schema_info.filesdir)
if schema_info.refs is None:
schema_info.refs = {}
schema_info.refs[subschema.header.basename] = subschema
with open(realpath) as i:
cur = src = json.loads(i.read())
subcur = cur
if tmp_r != "":
src, subcur, refname = get_ref_subref(src, tmp_r)
if 'type' not in subcur and '$ref' in subcur:
subf, subr = splite_ref_name(subcur['$ref'])
if subf == "":
src, subcur, refname = get_ref_subref(src, subr)
if 'type' not in subcur:
raise RuntimeError("Not support reference of nesting more than 2 level: ", ref)
else:
return get_ref_root(schema_info, src, subcur['$ref'], curfile)
return src, subcur, curfile, make_ref_name(refname, curfile)
def get_type_pattern_incur(cur, schema_info, src, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
# pattern of key:
# '.{1,}' represents type 'string',
# '.{2,}' represents type 'integer'
if '.{2,}' in cur['patternProperties']:
map_key = 'Int'
else:
map_key = 'String'
for i, value in enumerate(cur['patternProperties'].values()):
# only use the first value
if i == 0:
if 'type' in value:
val = value["type"]
else:
dummy_subsrc, subcur, dummy_subcurfile, dummy_subrefname = get_ref_root(
schema_info, src, value['$ref'], curfile)
val = subcur['type']
break
m_key = {
'object': 'Object',
'string': 'String',
'integer': 'Int',
'boolean': 'Bool',
'int64': 'Int64'
}[val]
map_val = m_key
typ = 'map' + map_key + map_val
return typ
class GenerateNodeInfo(object):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
def __init__(self, schema_info, name, cur, curfile):
self.schema_info = schema_info
self.name = name
self.cur = cur
self.curfile = curfile
def get_repr(self):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
return "{schema_info:(%s) name:(%s) cur:(%s) curfile:(%s)}" \
% (self.schema_info, self.name, self.cur, self.curfile)
def get_name(self):
'''
Description: Store schema information
Interface: None
History: 2019-06-17
'''
return self.name
def gen_all_arr_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schema_info = node_info.schema_info
name = node_info.name
cur = node_info.cur
curfile = node_info.curfile
subtyp = None
subtypobj = None
required = None
children = merge(resolve_list(schema_info, name, src, cur["items"]['allOf'], curfile))
subtyp = children[0].typ
subtypobj = children
return helpers.Unite(name,
typ,
children,
subtyp=subtyp,
subtypobj=subtypobj,
subtypname=refname,
required=required), src
def gen_any_arr_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schema_info = node_info.schema_info
name = node_info.name
cur = node_info.cur
curfile = node_info.curfile
subtyp = None
subtypobj = None
required = None
anychildren = resolve_list(schema_info, name, src, cur["items"]['anyOf'], curfile)
subtyp = anychildren[0].typ
children = anychildren[0].children
subtypobj = children
refname = anychildren[0].subtypname
return helpers.Unite(name,
typ,
children,
subtyp=subtyp,
subtypobj=subtypobj,
subtypname=refname,
required=required), src
def gen_ref_arr_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schema_info = node_info.schema_info
name = node_info.name
cur = node_info.cur
curfile = node_info.curfile
item_type, src = resolve_type(schema_info, name, src, cur["items"], curfile)
ref_file, subref = splite_ref_name(cur['items']['$ref'])
if ref_file == "":
src, dummy_subcur, subrefname = get_ref_subref(src, subref)
refname = make_ref_name(subrefname, curfile)
else:
refname = item_type.subtypname
return helpers.Unite(name,
typ,
None,
subtyp=item_type.typ,
subtypobj=item_type.children,
subtypname=refname,
required=item_type.required), src
def gen_type_arr_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schema_info = node_info.schema_info
name = node_info.name
cur = node_info.cur
curfile = node_info.curfile
item_type, src = resolve_type(schema_info, name, src, cur["items"], curfile)
if typ == 'array' and typ == item_type.typ and not helpers.valid_basic_map_name(item_type.subtyp):
return helpers.Unite(name,
typ,
None,
subtyp=item_type.subtyp,
subtypobj=item_type.subtypobj,
subtypname=item_type.subtypname,
required=item_type.required, doublearray=True), src
else:
return helpers.Unite(name,
typ,
None,
subtyp=item_type.typ,
subtypobj=item_type.children,
subtypname=refname,
required=item_type.required), src
def gen_arr_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
cur = node_info.cur
if 'allOf' in cur["items"]:
return gen_all_arr_typnode(node_info, src, typ, refname)
elif 'anyOf' in cur["items"]:
return gen_any_arr_typnode(node_info, src, typ, refname)
elif '$ref' in cur["items"]:
return gen_ref_arr_typnode(node_info, src, typ, refname)
elif 'type' in cur["items"]:
return gen_type_arr_typnode(node_info, src, typ, refname)
return None
def gen_obj_typnode(node_info, src, typ, refname):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
schema_info = node_info.schema_info
name = node_info.name
cur = node_info.cur
curfile = node_info.curfile
children = None
subtyp = None
subtypobj = None
required = None
if 'allOf' in cur:
children = merge(resolve_list(schema_info, name, src, cur['allOf'], curfile))
elif 'anyOf' in cur:
children = resolve_list(schema_info, name, src, cur['anyOf'], curfile)
elif 'patternProperties' in cur:
children = parse_properties(schema_info, name, src, cur, curfile)
children[0].name = children[0].name.replace('_{1,}', 'element').replace('_{2,}', \
'element')
children[0].fixname = "values"
if helpers.valid_basic_map_name(children[0].typ):
children[0].name = helpers.make_basic_map_name(children[0].typ)
else:
children = parse_properties(schema_info, name, src, cur, curfile) \
if 'properties' in cur else None
if 'required' in cur:
required = cur['required']
return helpers.Unite(name,\
typ,\
children,\
subtyp=subtyp,\
subtypobj=subtypobj,\
subtypname=refname,\
required=required), src
def get_typ_notoneof(schema_info, src, cur, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
if 'patternProperties' in cur:
typ = get_type_pattern_incur(cur, schema_info, src, curfile)
elif "type" in cur:
typ = cur["type"]
else:
typ = "object"
return typ
def resolve_type(schema_info, name, src, cur, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
children = None
subtyp = None
subtypobj = None
required = None
refname = None
if '$ref' in cur:
src, cur, curfile, refname = get_ref_root(schema_info, src, cur['$ref'], curfile)
if "oneOf" in cur:
cur = cur['oneOf'][0]
if '$ref' in cur:
return resolve_type(schema_info, name, src, cur, curfile)
else:
typ = cur['type']
else:
typ = get_typ_notoneof(schema_info, src, cur, curfile)
node_info = GenerateNodeInfo(schema_info, name, cur, curfile)
if helpers.valid_basic_map_name(typ):
pass
elif typ == 'array':
return gen_arr_typnode(node_info, src, typ, refname)
elif typ == 'object' or typ == 'mapStringObject':
return gen_obj_typnode(node_info, src, typ, refname)
elif typ == 'ArrayOfStrings':
typ = 'array'
subtyp = 'string'
children = subtypobj = None
else:
if not judge_support_type(typ):
raise RuntimeError("Invalid schema type: %s" % typ)
children = None
return helpers.Unite(name,
typ,
children,
subtyp=subtyp,
subtypobj=subtypobj,
subtypname=refname,
required=required), src
def resolve_list(schema_info, name, schema, objs, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
obj = []
index = 0
for i in objs:
generated_name = helpers.CombinateName( \
i['$ref'].split("/")[-1]) if '$ref' in i \
else helpers.CombinateName(name.name + str(index))
node, _ = resolve_type(schema_info, generated_name, schema, i, curfile)
if node:
obj.append(node)
index += 1
if not obj:
obj = None
return obj
def parse_dict(schema_info, name, schema, objs, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
obj = []
for i in objs:
node, _ = resolve_type(schema_info, name.append(i), schema, objs[i], curfile)
if node:
obj.append(node)
if not obj:
obj = None
return obj
def parse_properties(schema_info, name, schema, props, curfile):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
if 'definitions' in props:
return parse_dict(schema_info, name, schema, props['definitions'], curfile)
if 'patternProperties' in props:
return parse_dict(schema_info, name, schema, props['patternProperties'], curfile)
return parse_dict(schema_info, name, schema, props['properties'], curfile)
def handle_type_not_in_schema(schema_info, schema, prefix):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
required = None
if 'definitions' in schema:
return helpers.Unite( \
helpers.CombinateName("definitions"), 'definitions', \
parse_properties(schema_info, helpers.CombinateName(""), schema, schema, \
schema_info.name.name), None, None, None, None)
else:
if len(schema) > 1:
print('More than one element found in schema')
return None
value_nodes = []
for value in schema:
if 'required' in schema[value]:
required = schema[value]['required']
childrens = parse_properties(schema_info, helpers.CombinateName(""), \
schema[value], schema[value], \
schema_info.name.name)
value_node = helpers.Unite(helpers.CombinateName(prefix), \
'object', childrens, None, None, \
None, required)
value_nodes.append(value_node)
return helpers.Unite(helpers.CombinateName("definitions"), \
'definitions', value_nodes, None, None, \
None, None)
def parse_schema(schema_info, schema, prefix):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
required = None
if 'type' not in schema:
return handle_type_not_in_schema(schema_info, schema, prefix)
if 'object' in schema['type']:
if 'required' in schema:
required = schema['required']
return helpers.Unite(
helpers.CombinateName(prefix), 'object',
parse_properties(schema_info, \
helpers.CombinateName(""), \
schema, schema, schema_info.name.name), \
None, None, None, required)
elif 'array' in schema['type']:
item_type, _ = resolve_type(schema_info, helpers.CombinateName(""), \
schema['items'], schema['items'], schema_info.name.name)
if item_type.typ == 'array' and not helpers.valid_basic_map_name(item_type.subtyp):
item_type.doublearray = True
return item_type
else:
return helpers.Unite(helpers.CombinateName(prefix), 'array', None, item_type.typ, \
item_type.children, None, item_type.required)
else:
print("Not supported type '%s'" % schema['type'])
return prefix, None
def expand(tree, structs, visited):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
if tree.children is not None:
for i in tree.children:
if tree.subtypname:
i.subtypname = "from_ref"
expand(i, structs, visited=visited)
if tree.subtypobj is not None:
for i in tree.subtypobj:
expand(i, structs, visited=visited)
if tree.typ == 'array' and helpers.valid_basic_map_name(tree.subtyp):
name = helpers.CombinateName(tree.name + "_element")
node = helpers.Unite(name, tree.subtyp, None)
expand(node, structs, visited)
id_ = "%s:%s" % (tree.name, tree.typ)
if id_ not in visited.keys():
structs.append(tree)
visited[id_] = tree
return structs
def reflection(schema_info, gen_ref):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
with open(schema_info.header.name, "w") as \
header_file, open(schema_info.source.name, "w") as source_file:
fcntl.flock(header_file, fcntl.LOCK_EX)
fcntl.flock(source_file, fcntl.LOCK_EX)
with open(schema_info.name.name) as schema_file:
schema_json = json.loads(schema_file.read(), object_pairs_hook=OrderedDict)
try:
tree = parse_schema(schema_info, schema_json, schema_info.prefix)
if tree is None:
print("Failed parse schema")
sys.exit(1)
structs = expand(tree, [], {})
headers.header_reflect(structs, schema_info, header_file)
sources.src_reflect(structs, schema_info, source_file, tree.typ)
except RuntimeError:
traceback.print_exc()
print("Failed to parse schema file: %s" % schema_info.name.name)
sys.exit(1)
finally:
pass
fcntl.flock(source_file, fcntl.LOCK_UN)
fcntl.flock(header_file, fcntl.LOCK_UN)
if gen_ref is True:
if schema_info.refs:
for reffile in schema_info.refs.values():
reflection(reffile, True)
def gen_common_files(out):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
common_h.generate_json_common_h(out)
common_c.generate_json_common_c(out)
def handle_single_file(args, srcpath, gen_ref, schemapath):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
if not os.path.exists(schemapath.name) or not os.path.exists(srcpath.name):
print('Path %s is not exist' % schemapath.name)
sys.exit(1)
if os.path.isdir(schemapath.name):
if args.recursive is True:
# recursively parse schema
for dirpath, dummy_dirnames, files in os.walk(schemapath.name):
for target_file in files:
if target_file.endswith(JSON_SUFFIX):
schema_info = schema_from_file(os.path.join(dirpath, target_file), \
srcpath.name)
reflection(schema_info, gen_ref)
print("\033[1;34mReflection:\033[0m\t%-60s \033[1;32mSuccess\033[0m" % (target_file))
else:
# only parse files in current direcotory
for target_file in os.listdir(schemapath.name):
fullpath = os.path.join(schemapath.name, target_file)
if fullpath.endswith(JSON_SUFFIX) and os.path.isfile(fullpath):
schema_info = schema_from_file(fullpath, srcpath.name)
reflection(schema_info, gen_ref)
print("\033[1;34mReflection:\033[0m\t%-60s \033[1;32mSuccess\033[0m" % (fullpath))
else:
if schemapath.name.endswith(JSON_SUFFIX):
schema_info = schema_from_file(schemapath.name, srcpath.name)
reflection(schema_info, gen_ref)
print("\033[1;34mReflection:\033[0m\t%-60s \033[1;32mSuccess\033[0m" % (schemapath.name))
else:
print('File %s is not ends with .json' % schemapath.name)
def handle_files(args, srcpath):
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
for path in args.path:
gen_ref = args.gen_ref
schemapath = helpers.FilePath(path)
handle_single_file(args, srcpath, gen_ref, schemapath)
def main():
"""
Description: generate c language for parse json map string object
Interface: None
History: 2019-06-17
"""
parser = argparse.ArgumentParser(prog='generate.py',
usage='%(prog)s [options] path [path ...]',
description='Generate C header and source from json-schema')
parser.add_argument('path', nargs='+', help='File or directory to parse')
parser.add_argument(
'--root',
required=True,
help=
'All schema files must be placed in root directory or sub-directory of root," \
" and naming of C variables is started from this path'
)
parser.add_argument('--gen-common',
action='store_true',
help='Generate json_common.c and json_common.h')
parser.add_argument('--gen-ref',
action='store_true',
help='Generate reference file defined in schema with key \"$ref\"')
parser.add_argument('-r',
'--recursive',
action='store_true',
help='Recursively generate all schema files in directory')
parser.add_argument(
'--out',
help='Specify a directory to save C header and source(default is current directory)')
args = parser.parse_args()
if not args.root:
print('Missing root path, see help')
sys.exit(1)
root_path = os.path.realpath(args.root)
if not os.path.exists(root_path):
print('Root %s is not exist' % args.root)
sys.exit(1)
MyRoot.root_path = root_path
if args.out:
srcpath = helpers.FilePath(args.out)
else:
srcpath = helpers.FilePath(os.getcwd())
if not os.path.exists(srcpath.name):
os.makedirs(srcpath.name)
if args.gen_common:
gen_common_files(srcpath.name)
handle_files(args, srcpath)
if __name__ == "__main__":
main()
| giuseppe/libocispec | src/generate.py | Python | gpl-3.0 | 27,790 |
PLUGIN_NAME = 'MusicBee Compatibility'
PLUGIN_AUTHOR = 'Volker Zell (and Sophist)'
PLUGIN_DESCRIPTION = '''
Provide MusicBee compatible tags.
<br/><br/>
Note 1: The tags used by this plugin are only populated when you have
checked Options / Metadata / Use track relationships.
<br/>
Note 2: You may wish to use this with the Copy to Comment plugin,
to which previous comment tag functionality has been moved.
<br/>
Note 3: Info copied includes ALL Performers, as well as the
Composer, Producer, Mixer etc.
<br/>
<pre>
MusicBee: MusicBrainz tag: MusicBrainz source:
Display Artist DISPLAY ARTIST artist
Artists:Artist artist artists (or split of artist if not existing)
Artists:Guest GUEST ARTIST taken from artist or title tags after feat./featuring/(feat./(featuring
Artists:Performer PERFORMER performer:*
Musician Credits List TMCL performer:*
Involved People List IPLS 'Arranger', 'Engineer', 'Producer', 'Mixer', 'DJMixer', 'Remixer', 'Conductor'
Comment comment: All the above
Misc MISC 'CatalogNumber', 'Barcode', 'ASIN', 'ReleaseType', 'ReleaseStatus', 'ReleaseCountry'
</pre>
Note 4: I use the following additional entry in CustomTagConfig.xml for MusicBee
<pre>
‹Tag id="Misc" id3v23="TXXX/MISC" id3v24="TXXX/MISC" wma="Misc" vorbisComments="Misc" mpeg="Misc" ape2="Misc" /›
</pre>
and a couple of Virtual Columns with the following structure (because I ran out of the 16 custom columns) to access the MISC entries:
<pre>
Catalognumber = $Replace($First($Split(‹Misc›,Catalognumber:,2)),",",;)
</pre>
'''
PLUGIN_VERSION = "0.7"
PLUGIN_API_VERSIONS = ["0.15.0", "0.15.1", "0.16.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"]
import re
from picard import log
from picard.metadata import register_track_metadata_processor
class MusicBeeCompatibility:
re_artist_split = re.compile(r",\s*|\s+&\s+|\s+and\s+|\s+feat[.:]\s+|\s+featuring\s+").split
re_featured_split = re.compile(r"\s+\(?feat[.:]\s+|\s+featuring\s+").split
def musicbee_compatibility(self, album, metadata, *args):
self.re_artist_split = MusicBeeCompatibility.re_artist_split
self.re_featured_split = MusicBeeCompatibility.re_featured_split
self.populate_performers(metadata)
self.populate_artist(metadata)
self.populate_tipl(metadata)
self.populate_misc(metadata)
def populate_performers(self, metadata):
performers = []
for name in [name for name in metadata if name.startswith('performer:')]:
self.txxx_add(metadata, 'TMCL', name[10:].title(), name, '; ')
performers += dict.get(metadata, name)
metadata["PERFORMER"] = " \x00".join(set(performers))
def populate_artist(self, metadata):
if 'artists' in metadata:
artists = dict.get(metadata, "artists")
elif 'artist' in metadata:
artists = self.re_artist_split(metadata['artist'])
else:
return
guests = []
if 'artist' in metadata:
guest = self.re_featured_split(metadata['artist'], 1)[1:]
if guest:
guests += self.re_artist_split(guest[0].rstrip(')'))
if 'title' in metadata:
guest = self.re_featured_split(metadata['title'], 1)[1:]
if guest:
guests += self.re_artist_split(guest[0].rstrip(')'))
artists = [x for x in artists if x not in guests]
metadata["DISPLAY ARTIST"] = metadata["artist"]
metadata["artist"] = " \x00".join(artists)
metadata["GUEST ARTIST"] = " \x00".join(guests)
def populate_tipl(self, metadata):
for name in ['Arranger', 'Engineer', 'Producer', 'Mixer', 'DJMixer']:
if name.lower() in metadata:
metadata[name] = " \x00".join(dict.get(metadata, name.lower()))
for name in ['Arranger', 'Engineer', 'Producer', 'Mixer', 'DJMixer', 'Remixer', 'Conductor']:
self.txxx_add(metadata, 'IPLS', name, name, '; ')
def populate_misc(self, metadata):
for name in ['CatalogNumber', 'Barcode', 'ASIN', 'ReleaseType', 'ReleaseStatus', 'ReleaseCountry']:
self.txxx_add(metadata, 'MISC', name, name, '; ')
def txxx_add(self, metadata, tagname, label, name, joiner):
name = name.lower()
if not name in metadata:
return
tag = dict.get(metadata, name)
value = ', '.join(tag)
if label:
label += ': '
if tagname in metadata:
metadata[tagname] += joiner + label + value
else:
metadata[tagname] = label + value
try:
from picard.plugin import PluginPriority
register_track_metadata_processor(
MusicBeeCompatibility().musicbee_compatibility,
priority=PluginPriority.LOW
)
except ImportError:
log.warning(
"Running %r plugin on this Picard version may not work as you expect. "
"This plugin should be run after other plugins and if it runs before "
"some changes to metadata may be incorrect.", PLUGIN_NAME
)
register_track_metadata_processor(MusicBeeCompatibility().musicbee_compatibility)
| Sophist-UK/sophist-picard-plugins | musicbee_compatibility.py | Python | gpl-2.0 | 5,299 |
# Copyright 2020 The Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyramidNet model with ShakeDrop regularization.
Reference:
ShakeDrop Regularization for Deep Residual Learning
Yoshihiro Yamada, Masakazu Iwamura, Takuya Akiba, Koichi Kise
https://arxiv.org/abs/1802.02375
Initially forked from
github.com/google/flax/blob/master/examples/cifar10/models/pyramidnet.py
This implementation mimics the one from
https://github.com/tensorflow/models/blob/master/research/autoaugment/shake_drop.py
that is widely used as a benchmark.
We use kaiming normal initialization for convolutional kernels (mode = fan_out,
gain = 2.0). The final dense layer use a uniform distribution U[-scale, scale]
where scale = 1 / sqrt(num_classes) as per the autoaugment implementation.
It is worth noting that this model is slighly different that the one presented
in the Deep Pyramidal Residual Networks paper
(https://arxiv.org/pdf/1610.02915.pdf), as we round instead of trucating when
computing the number of channels in each block. This results in a model with
roughtly 0.2M additional parameters. Rounding is however the method that was
used in follow up work (https://arxiv.org/abs/1905.00397,
https://arxiv.org/abs/2002.12047) so we keep it for consistency.
"""
from typing import Tuple
from flax import nn
import jax.numpy as jnp
from sam.sam_jax.models import utils
def _shortcut(x: jnp.ndarray, chn_out: int, strides: Tuple[int, int]
) -> jnp.ndarray:
"""Pyramid Net shortcut.
Use Average pooling to downsample.
Use zero-padding to increase channels.
Args:
x: Input. Should have shape [batch_size, dim, dim, features]
where dim is the resolution (width and height if the input is an image).
chn_out: Expected output channels.
strides: Output stride.
Returns:
Shortcut value for Pyramid Net. Shape will be
[batch_size, dim, dim, chn_out] if strides = (1, 1) (no downsampling) or
[batch_size, dim/2, dim/2, chn_out] if strides = (2, 2) (downsampling).
"""
chn_in = x.shape[3]
if strides != (1, 1):
x = nn.avg_pool(x, strides, strides)
if chn_out != chn_in:
diff = chn_out - chn_in
x = jnp.pad(x, [[0, 0], [0, 0], [0, 0], [0, diff]])
return x
class BottleneckShakeDrop(nn.Module):
"""PyramidNet with Shake-Drop Bottleneck."""
def apply(self,
x: jnp.ndarray,
channels: int,
strides: Tuple[int, int],
prob: float,
alpha_min: float,
alpha_max: float,
beta_min: float,
beta_max: float,
train: bool = True,
true_gradient: bool = False) -> jnp.ndarray:
"""Implements the forward pass in the module.
Args:
x: Input to the module. Should have shape [batch_size, dim, dim, features]
where dim is the resolution (width and height if the input is an image).
channels: How many channels to use in the convolutional layers.
strides: Strides for the pooling.
prob: Probability of dropping the block (see paper for details).
alpha_min: See paper.
alpha_max: See paper.
beta_min: See paper.
beta_max: See paper.
train: If False, will use the moving average for batch norm statistics.
Else, will use statistics computed on the batch.
true_gradient: If true, the same mixing parameter will be used for the
forward and backward pass (see paper for more details).
Returns:
The output of the bottleneck block.
"""
y = utils.activation(x, apply_relu=False, train=train, name='bn_1_pre')
y = nn.Conv(
y,
channels, (1, 1),
padding='SAME',
bias=False,
kernel_init=utils.conv_kernel_init_fn,
name='1x1_conv_contract')
y = utils.activation(y, train=train, name='bn_1_post')
y = nn.Conv(
y,
channels, (3, 3),
strides,
padding='SAME',
bias=False,
kernel_init=utils.conv_kernel_init_fn,
name='3x3')
y = utils.activation(y, train=train, name='bn_2')
y = nn.Conv(
y,
channels * 4, (1, 1),
padding='SAME',
bias=False,
kernel_init=utils.conv_kernel_init_fn,
name='1x1_conv_expand')
y = utils.activation(y, apply_relu=False, train=train, name='bn_3')
if train and not self.is_initializing():
y = utils.shake_drop_train(y, prob, alpha_min, alpha_max,
beta_min, beta_max,
true_gradient=true_gradient)
else:
y = utils.shake_drop_eval(y, prob, alpha_min, alpha_max)
x = _shortcut(x, channels * 4, strides)
return x + y
def _calc_shakedrop_mask_prob(curr_layer: int,
total_layers: int,
mask_prob: float) -> float:
"""Calculates drop prob depending on the current layer."""
return 1 - (float(curr_layer) / total_layers) * mask_prob
class PyramidNetShakeDrop(nn.Module):
"""PyramidNet with Shake-Drop."""
def apply(self,
x: jnp.ndarray,
num_outputs: int,
pyramid_alpha: int = 200,
pyramid_depth: int = 272,
train: bool = True,
true_gradient: bool = False) -> jnp.ndarray:
"""Implements the forward pass in the module.
Args:
x: Input to the module. Should have shape [batch_size, dim, dim, 3]
where dim is the resolution of the image.
num_outputs: Dimension of the output of the model (ie number of classes
for a classification problem).
pyramid_alpha: See paper.
pyramid_depth: See paper.
train: If False, will use the moving average for batch norm statistics.
Else, will use statistics computed on the batch.
true_gradient: If true, the same mixing parameter will be used for the
forward and backward pass (see paper for more details).
Returns:
The output of the PyramidNet model, a tensor of shape
[batch_size, num_classes].
"""
assert (pyramid_depth - 2) % 9 == 0
# Shake-drop hyper-params
mask_prob = 0.5
alpha_min, alpha_max = (-1.0, 1.0)
beta_min, beta_max = (0.0, 1.0)
# Bottleneck network size
blocks_per_group = (pyramid_depth - 2) // 9
# See Eqn 2 in https://arxiv.org/abs/1610.02915
num_channels = 16
# N in https://arxiv.org/abs/1610.02915
total_blocks = blocks_per_group * 3
delta_channels = pyramid_alpha / total_blocks
x = nn.Conv(
x,
16, (3, 3),
padding='SAME',
name='init_conv',
bias=False,
kernel_init=utils.conv_kernel_init_fn)
x = utils.activation(x, apply_relu=False, train=train, name='init_bn')
layer_num = 1
for block_i in range(blocks_per_group):
num_channels += delta_channels
layer_mask_prob = _calc_shakedrop_mask_prob(layer_num, total_blocks,
mask_prob)
x = BottleneckShakeDrop(
x,
int(round(num_channels)), (1, 1),
layer_mask_prob,
alpha_min,
alpha_max,
beta_min,
beta_max,
train=train,
true_gradient=true_gradient)
layer_num += 1
for block_i in range(blocks_per_group):
num_channels += delta_channels
layer_mask_prob = _calc_shakedrop_mask_prob(
layer_num, total_blocks, mask_prob)
x = BottleneckShakeDrop(x, int(round(num_channels)),
((2, 2) if block_i == 0 else (1, 1)),
layer_mask_prob,
alpha_min, alpha_max, beta_min, beta_max,
train=train,
true_gradient=true_gradient)
layer_num += 1
for block_i in range(blocks_per_group):
num_channels += delta_channels
layer_mask_prob = _calc_shakedrop_mask_prob(
layer_num, total_blocks, mask_prob)
x = BottleneckShakeDrop(x, int(round(num_channels)),
((2, 2) if block_i == 0 else (1, 1)),
layer_mask_prob,
alpha_min, alpha_max, beta_min, beta_max,
train=train,
true_gradient=true_gradient)
layer_num += 1
assert layer_num - 1 == total_blocks
x = utils.activation(x, train=train, name='final_bn')
x = nn.avg_pool(x, (8, 8))
x = x.reshape((x.shape[0], -1))
x = nn.Dense(x, num_outputs, kernel_init=utils.dense_layer_init_fn)
return x
| google-research/sam | sam_jax/models/pyramidnet.py | Python | apache-2.0 | 9,092 |
''' Module '''
import re
import logging
class CurrentCost:
''' Class '''
'''
def __init__(self, data=None, logger=None):
''' Method '''
self._data = data
self.logger = logger or logging.getLogger(__name__)
self.time = None
self.uid = None
self.value = None
'''
def parse_data(self):
''' Method '''
try:
'''#http://www.marcus-povey.co.uk - USED REGEX REGEX!'''
uidregex = re.compile('<id>([0-9]+)</id>')
valueregex = re.compile('<watts>([0-9]+)</watts>')
timeregex = re.compile('<time>([0-9\.\:]+)</time>')
self.value = str(int(valueregex.findall(self._data)[0]))
self.time = timeregex.findall(self._data)[0]
self.uid = uidregex.findall(self._data)[0]
self.logger.info('Parsed data sucessfully!')
except Exception:
self.logger.error('Could not get details from device',
exc_info=True)
| gljohn/meterd | meterd/parser/currentcost.py | Python | gpl-3.0 | 1,012 |
"""
This file provides api for retrieving data from codeforces.com
"""
import hashlib
import json
import operator
import random
import time
from collections import OrderedDict
from enum import Enum
from urllib.error import HTTPError
from urllib.request import urlopen
from .json_objects import Contest
from .json_objects import Hack
from .json_objects import Problem
from .json_objects import ProblemStatistics
from .json_objects import RanklistRow
from .json_objects import RatingChange
from .json_objects import Submission
from .json_objects import User
__all__ = ['CodeforcesAPI', 'CodeforcesLanguage']
class CodeforcesLanguage(Enum):
en = 'en'
ru = 'ru'
class CodeforcesDataRetriever:
"""
This class hides low-level operations with retrieving data from Codeforces site
"""
def __init__(self, lang=CodeforcesLanguage.en, key=None, secret=None):
"""
:param lang: Language
:type lang: CodeforcesLanguage
:param key: Private API key. Ignored if secret is None
:type key: str
:param secret: Private API secret. Ignored if key is None
:type secret: str
"""
self._key = None
self._secret = None
if key is not None and secret is not None:
self.key = key
self.secret = secret
self._base_from_language = {
CodeforcesLanguage.en: 'http://codeforces.com/api/',
CodeforcesLanguage.ru: 'http://codeforces.ru/api/'
}
self._language = lang
def get_data(self, method, **kwargs):
"""
Retrieves data by given method with given parameters
:param method: Request method
:param kwargs: HTTP parameters
:return:
"""
return self.__get_data(self.__generate_url(method, **kwargs))
def __get_data(self, url):
"""
Returns data retrieved from given url
"""
try:
with urlopen(url) as req:
return self.__check_json(req.read().decode('utf-8'))
except HTTPError as http_e:
try:
return self.__check_json(http_e.read().decode('utf-8'))
except Exception as e:
raise e from http_e
def __generate_url(self, method, **kwargs):
"""
Generates request url with given method and named parameters
:param method: Name of the method
:type method: str
:param kwargs: HTTP parameters
:type kwargs: dict of [str, object]
:return: Url
:rtype: str
"""
url = self.base + method
if self.key is not None and self.secret is not None:
kwargs['apiKey'] = self.key
kwargs['time'] = int(time.time())
if kwargs:
args = self.__get_valid_args(**kwargs)
url += '?' + '&'.join(map(self.__key_value_to_http_parameter, args.items()))
if self.key is not None and self.secret is not None:
url += '&apiSig=' + self.__generate_api_sig(method, args)
return url
def __generate_api_sig(self, method, params):
"""
apiSig — signature to ensure that you know both key and secret.
First six characters of the apiSig parameter can be arbitrary.
We recommend to choose them at random for each request. Let's denote them as rand.
The rest of the parameter is hexadecimal representation of SHA-512 hash-code of the following string:
<rand>/<methodName>?param1=value1¶m2=value2...¶mN=valueN#<secret>
where (param_1, value_1), (param_2, value_2),..., (param_n, value_n) are all the
request parameters (including apiKey, time, but excluding apiSig) with corresponding values,
sorted lexicographically first by param_i, then by value_i.
:return:
"""
rand = str(random.randint(100000, 999999))
s = '{}/{}?'.format(rand, method)
ordered_params = OrderedDict(sorted(params.items(), key=operator.itemgetter(0)))
s += '&'.join(map(self.__key_value_to_http_parameter, ordered_params.items()))
s += '#' + self.secret
return rand + hashlib.sha512(s.encode()).hexdigest()
@staticmethod
def __get_valid_args(**kwargs):
"""
Filters only not None values
"""
return {k: v for k, v in kwargs.items() if v is not None}
@staticmethod
def __key_value_to_http_parameter(key_value):
"""
Transforms dictionary of values to http parameters
"""
key, value = key_value
if isinstance(value, list):
value = ';'.join(sorted(map(str, value)))
else:
value = str(value)
return '{0}={1}'.format(key, value)
@staticmethod
def __check_json(answer):
"""
Check if answer is correct according to http://codeforces.com/api/help
"""
values = json.loads(answer)
try:
if values['status'] == 'OK':
return values['result']
else:
raise ValueError(values['comment'])
except KeyError as e:
raise ValueError('Missed required field', e.args[0])
@property
def base(self):
"""
:return: Base of url according to language
:rtype: str
"""
return self._base_from_language[self.language]
@property
def language(self):
"""
:returns: Language. By default is en
:rtype: CodeforcesLanguage
"""
return self._language
@language.setter
def language(self, value):
"""
:param value: Language
:type value: CodeforcesLanguage or str
"""
assert isinstance(value, (CodeforcesLanguage, str))
self._language = CodeforcesLanguage(value)
@property
def key(self):
"""
The private api key
:returns: Key or None if not presented
:rtype: str
"""
return self._key
@key.setter
def key(self, value):
"""
The private api key
:param value: Key or None
:type value: str
"""
assert isinstance(value, str) or value is None
self._key = value
@property
def secret(self):
"""
The secret part of api key
:returns: Secret or None if not presented
:rtype: str
"""
return self._secret
@secret.setter
def secret(self, value):
"""
The secret part of api key
:param value: Secret or None
:type value: str
"""
assert isinstance(value, str) or value is None
self._secret = value
class CodeforcesAPI:
"""
This class provides api for retrieving data from codeforces.com
"""
def __init__(self, lang='en', key=None, secret=None):
"""
:param lang: Language
:type lang: str or CodeforcesLanguage
:param key: Private API key. Ignored if secret is None
:type key: str
:param secret: Private API secret. Ignored if key is None
:type secret: str
"""
self._data_retriever = CodeforcesDataRetriever(CodeforcesLanguage(lang), key, secret)
def contest_hacks(self, contest_id):
"""
Returns list of hacks in the specified contests.
Full information about hacks is available only after some time after the contest end.
During the contest user can see only own hacks.
:param contest_id: Id of the contest.
It is not the round number. It can be seen in contest URL. For example: /contest/374/status
:type contest_id: int
:return: Returns an iterator of Hack objects.
:rtype: iterator of Hack
"""
assert isinstance(contest_id, int)
data = self._data_retriever.get_data('contest.hacks', contestId=contest_id)
return map(Hack, data)
def contest_list(self, gym=False):
"""
Returns information about all available contests.
:param gym: If true — than gym contests are returned. Otherwise, regular contests are returned.
:type gym: bool
:return: Returns an iterator of Contest objects. If this method is called not anonymously,
then all available contests for a calling user will be returned too,
including mashups and private gyms.
:rtype: iterator of Contest
"""
data = self._data_retriever.get_data('contest.list', gym=gym)
return map(Contest, data)
def contest_rating_changes(self, contest_id):
"""
Returns rating changes after the contest.
:param contest_id: Id of the contest. It is not the round number. It can be seen in contest URL.
:return: Returns an iterator of RatingChange objects.
:rtype: iterator of RatingChange
"""
data = self._data_retriever.get_data('contest.ratingChanges', contestId=contest_id)
return map(RatingChange, data)
def contest_standings(self, contest_id, from_=1, count=None, handles=None, show_unofficial=False):
"""
Returns the description of the contest and the requested part of the standings.
:param contest_id: Id of the contest. It is not the round number. It can be seen in contest URL.
For example: /contest/374/status
:type contest_id: int
:param from_: 1-based index of the standings row to start the ranklist.
:type from_: int
:param count: Number of standing rows to return.
:type count: int
:param handles: List of handles. No more than 10000 handles is accepted.
:type handles: list of str
:param show_unofficial: If true than all participants (virtual, out of competition) are shown.
Otherwise, only official contestants are shown.
:type show_unofficial: bool
:return: Returns object with three fields: "contest", "problems" and "rows".
Field "contest" contains a Contest object.
Field "problems" contains an iterator of Problem objects.
Field "rows" contains an iteator of RanklistRow objects.
:rtype: {'contest': Contest,
'problems': iterator of Problem,
'rows': iterator of RanklistRow}
"""
assert isinstance(contest_id, int), 'contest_id should be of type int, not {}'.format(type(contest_id))
assert isinstance(from_, int), 'from_ should be of type int, not {}'.format(type(from_))
assert isinstance(count, int) or count is None, 'count should be of type int, not {}'.format(type(count))
assert isinstance(handles, list) or handles is None, \
'handles should be of type list of str, not {}'.format(type(handles))
assert handles is None or len(handles) <= 10000, 'No more than 10000 handles is accepted'
assert isinstance(show_unofficial, bool), \
'show_unofficial should be of type bool, not {}'.format(type(show_unofficial))
data = self._data_retriever.get_data('contest.standings',
contestId=contest_id,
count=count,
handles=handles,
showUnofficial=show_unofficial,
**{'from': from_})
return {'contest': Contest(data['contest']),
'problems': map(Problem, data['problems']),
'rows': map(RanklistRow, data['rows'])}
def contest_status(self, contest_id, handle=None, from_=1, count=None):
"""
Returns submissions for specified contest.
Optionally can return submissions of specified user.
:param contest_id: Id of the contest.
It is not the round number. It can be seen in contest URL. For example: /contest/374/status
:type contest_id: int
:param handle: Codeforces user handle.
:type handle: str
:param from_: 1-based index of the first submission to return.
:type from_: int
:param count: Number of returned submissions.
:type count: int
:return: Returns an iterator of Submission objects, sorted in decreasing order of submission id.
:rtype: iterator of Submission
"""
assert isinstance(contest_id, int)
assert isinstance(handle, str) or handle is None
assert isinstance(from_, int)
assert isinstance(count, int) or count is None
data = self._data_retriever.get_data('contest.status',
contestId=contest_id,
handle=handle,
count=count,
**{'from': from_})
return map(Submission, data)
def problemset_problems(self, tags=None):
"""
Returns all problems from problemset. Problems can be filtered by tags.
:param tags: List of tags.
:type tags: list of str
:return: Returns two iterators. Iterator of Problem objects and iterator of ProblemStatistics objects.
:rtype: {'problems': list of Problem,
'problemStatistics': list of ProblemStatistics}
"""
data = self._data_retriever.get_data('problemset.problems', tags=tags)
return {'problems': map(Problem, data['problems']),
'problemStatistics': map(ProblemStatistics, data['problemStatistics'])}
def problemset_recent_status(self, count):
"""
Returns recent submissions.
:param count: Number of submissions to return. Can be up to 1000.
:type count: int
:return: Returns an iterator of Submission objects, sorted in decreasing order of submission id.
:rtype: iterator of Submission
"""
assert isinstance(count, int)
assert 0 < count <= 1000
data = self._data_retriever.get_data('problemset.recentStatus', count=count)
return map(Submission, data)
def user_info(self, handles):
"""
Returns information about one or several users.
:param handles: List of handles. No more than 10000 handles is accepted.
:type handles: list of str
:return: Returns an iterator of User objects for requested handles.
:rtype: iterator of User
"""
assert isinstance(handles, list)
data = self._data_retriever.get_data('user.info', handles=handles)
return map(User, data)
def user_rated_list(self, active_only=False):
"""
Returns the list of all rated users.
:param active_only: If true then only users, who participated in rated contest during the last month are
returned. Otherwise, all users with at least one rated contest are returned.
:type active_only: bool
:return: Returns an iterator of User objects, sorted in decreasing order of rating.
:rtype: iterator of User
"""
assert isinstance(active_only, bool)
data = self._data_retriever.get_data('user.ratedList', activeOnly=active_only)
return map(User, data)
def user_rating(self, handle):
"""
Returns rating history of the specified user.
:param handle: Codeforces user handle.
:type handle: str
:return: Returns an iterator of RatingChange objects for requested user.
:rtype: iterator of RatingChange
"""
assert isinstance(handle, str), 'Handle should have str type, not {}'.format(type(handle))
data = self._data_retriever.get_data('user.rating', handle=handle)
return map(RatingChange, data)
def user_status(self, handle, from_=1, count=None):
"""
Returns submissions of specified user.
:param handle: Codeforces user handle.
:type handle: str
:param from_: 1-based index of the first submission to return
:type from_: int
:param count: Number of returned submissions.
:type count: int or None
:return: Returns an iterator of Submission objects, sorted in decreasing order of submission id.
:rtype: iterator of Submission
"""
assert isinstance(handle, str)
assert isinstance(from_, int)
assert isinstance(count, int) or count is None
data = self._data_retriever.get_data('user.status', handle=handle, count=count, **{'from': from_})
return map(Submission, data)
| soon/CodeforcesAPI | codeforces/api/codeforces_api.py | Python | mit | 16,753 |
import gc
import hashlib
import os
import os.path
import tempfile
import zipfile
import numpy as np
import pytest
import requests
from hyperspy import signals
from hyperspy.io import load
MY_PATH = os.path.dirname(__file__)
ZIPF = os.path.join(MY_PATH, "edax_files.zip")
TMP_DIR = tempfile.TemporaryDirectory()
TEST_FILES_OK = os.path.isfile(ZIPF)
REASON = ""
SHA256SUM = "e217c71efbd208da4b52e9cf483443f9da2175f2924a96447ed393086fe32008"
# The test files are not included in HyperSpy v1.4 because their file size is 36.5MB
# taking the HyperSpy source distribution file size above PyPI's 60MB limit.
# As a temporary solution, we attempt to download the test files from GitHub
# and skip the tests if the download fails.
if not TEST_FILES_OK:
try:
r = requests.get(
"https://github.com/hyperspy/hyperspy/blob/e7a323a3bb9b237c24bd9267d2cc4fcb31bb99f3/hyperspy/tests/io/edax_files.zip?raw=true")
SHA256SUM_GOT = hashlib.sha256(r.content).hexdigest()
if SHA256SUM_GOT == SHA256SUM:
with open(ZIPF, 'wb') as f:
f.write(r.content)
TEST_FILES_OK = True
else:
REASON = "wrong sha256sum of downloaded file. Expected: %s, got: %s" % SHA256SUM, SHA256SUM_GOT
except BaseException as e:
REASON = "download of EDAX test files failed: %s" % e
def setup_module():
if TEST_FILES_OK:
with zipfile.ZipFile(ZIPF, 'r') as zipped:
zipped.extractall(TMP_DIR.name)
pytestmark = pytest.mark.skipif(not TEST_FILES_OK,
reason=REASON)
def teardown_module():
TMP_DIR.cleanup()
class TestSpcSpectrum_v061_xrf:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"spc0_61-ipr333_xrf.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v061_xrf.spc.data.dtype
# test data shape
assert (4000,) == TestSpcSpectrum_v061_xrf.spc.data.shape
# test 40 datapoints
assert (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319, 504, 639, 924,
1081, 1326, 1470, 1727, 1983, 2123, 2278, 2509, 2586, 2639,
2681, 2833, 2696, 2704, 2812, 2745, 2709, 2647, 2608, 2620,
2571, 2669] == TestSpcSpectrum_v061_xrf.spc.data[:40].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM'] # this will eventually need to
# be changed when XRF-specific
# features are added
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si', 'Y'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v061_xrf.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4000,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v061_xrf.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v061_xrf.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(4, spc_header['analyzerType'])
np.testing.assert_allclose(2013, spc_header['collectDateYear'])
np.testing.assert_allclose(9, spc_header['collectDateMon'])
np.testing.assert_allclose(26, spc_header['collectDateDay'])
np.testing.assert_equal(b'Garnet1.', spc_header['fileName'].view('|S8')[0])
np.testing.assert_allclose(45, spc_header['xRayTubeZ'])
class TestSpcSpectrum_v070_eds:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "single_spect.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"single_spect.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v070_eds.spc.data.dtype
# test data shape
assert (4096,) == TestSpcSpectrum_v070_eds.spc.data.shape
# test 1st 20 datapoints
assert (
[0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 10, 4, 10, 10, 45, 87, 146, 236,
312, 342] == TestSpcSpectrum_v070_eds.spc.data[:20].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(129.31299, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(50.000004, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'C', 'Ce', 'Cu', 'F', 'Ho', 'Mg', 'O'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v070_eds.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4096,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v070_eds.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v070_eds.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(5, spc_header['analyzerType'])
np.testing.assert_allclose(2016, spc_header['collectDateYear'])
np.testing.assert_allclose(4, spc_header['collectDateMon'])
np.testing.assert_allclose(19, spc_header['collectDateDay'])
np.testing.assert_equal(b'C:\\ProgramData\\EDAX\\jtaillon\\Cole\\Mapping\\Lsm\\'
b'GFdCr\\950\\Area 1\\spectrum20160419153851427_0.spc',
spc_header['longFileName'].view('|S256')[0])
np.testing.assert_allclose(0, spc_header['xRayTubeZ'])
class TestSpdMap_070_eds:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spd_map.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_070_eds.spd.data.dtype
# test d_shape
assert (200, 256, 2500) == TestSpdMap_070_eds.spd.data.shape
assert ([[[0, 0, 0, 0, 0], # test random data
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_070_eds.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(126.60252, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2621.4399, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Ce', 'Co', 'Cr', 'Fe', 'Gd', 'La', 'Mg', 'O',
'Sr'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_070_eds.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.227345585823057,
'size': 200,
'units': 'nm'},
'axis-1': {'_type': 'UniformDataAxis',
'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.235896058380602,
'size': 256,
'units': 'nm'},
'axis-2': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.0050000000000000001,
'size': 2500,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_070_eds.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_070_eds.spd.original_metadata['ipr_header']
np.testing.assert_allclose(0.014235896, ipr_header['mppX'])
np.testing.assert_allclose(0.014227346, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd metadata
spc_header = TestSpdMap_070_eds.spd.original_metadata['spc_header']
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_070_eds.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
class TestSpdMap_061_xrf:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_061_xrf.spd.data.dtype
# test d_shape
assert (200, 256, 2000) == TestSpdMap_061_xrf.spd.data.shape
assert ([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_061_xrf.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si',
'Y'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_061_xrf.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 200,
'units': 'mm'},
'axis-1': {'_type': 'UniformDataAxis',
'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 256,
'units': 'mm'},
'axis-2': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 2000,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_061_xrf.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_061_xrf.spd.original_metadata['ipr_header']
np.testing.assert_allclose(565.1920166015625, ipr_header['mppX'])
np.testing.assert_allclose(565.1920166015625, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd_061_xrf metadata
spc_header = TestSpdMap_061_xrf.spd.original_metadata['spc_header']
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_061_xrf.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
| thomasaarholt/hyperspy | hyperspy/tests/io/test_edax.py | Python | gpl-3.0 | 19,779 |
# coding=utf-8
from django.http import HttpResponse, Http404
from django.shortcuts import loader
import json
from pmtour.models import Tournament, Player
def get_tour(tour_id):
try:
tour = Tournament.objects.get(alias=tour_id)
except Tournament.DoesNotExist:
try:
tour = Tournament.objects.get(tour_id=tour_id)
except Tournament.DoesNotExist:
raise Http404
return tour
def get_perm(request, tour):
return request.user.is_staff or not request.user.is_anonymous and request.user.playeruser in tour.admins.all()
def get_a_tour(request, tour_id):
tour = get_tour(tour_id)
has_perm = get_perm(request, tour)
tour.refresh()
return tour, has_perm
def get_player_by_request(request, tour):
if request.user.is_anonymous:
return None
try:
return tour.player_set.get(user=request.user.playeruser)
except Player.DoesNotExist:
return None
def get_player_printable(sts, player):
if player is None:
return ""
for q in sts:
if q["pid"] == player.playerid:
return "%s(%s) (%s) %s" % (player.name, player.user.name, q["match"], q["score"])
return ""
def get_bracket(request, tour, has_perm, player=None, turn=None):
temp = loader.get_template("pmtour/bracket_main.html")
if turn is None:
turn = tour.get_current_turn()
log_set = tmp_log_set = turn.log_set.all()
if not has_perm and player is None:
player = get_player_by_request(request, tour)
sts = turn.get_standing()
if sts is not None:
log_set = []
for logs in tmp_log_set:
log_set.append({
"id": logs.id,
"player_a": get_player_printable(sts, logs.player_a),
"player_b": get_player_printable(sts, logs.player_b),
"status": logs.status
})
cont = {
"tour": tour, "has_perm": has_perm, "turn": turn, "logs": log_set, "player": player
}
return temp.render(cont, request)
def get_standings(request, tour, has_perm, player=None, turn=None):
temp = loader.get_template("pmtour/standings_main.html")
if turn is None:
turn = tour.get_last_turn()
if turn is None:
cont = {
"tour": tour, "has_perm": has_perm, "standings": None
}
return temp.render(cont, request)
standings_set = turn.get_standing()
if standings_set is not None:
for s in standings_set:
p = tour.player_set.get(playerid=s["pid"])
s["name"] = p.name
if not has_perm and player is None:
player = get_player_by_request(request, tour)
elimed = 0
if tour.on_swiss_over(turn.turn_number):
elimed = tour.get_option("elims")
cont = {
"tour": tour, "has_perm": has_perm, "standings": standings_set, "player": player, "elimed": elimed
}
return temp.render(cont, request)
def ret_no_perm(request, tour_id):
temp = loader.get_template("pmtour/no_perm.html")
cont = {"tour": get_tour(tour_id)}
return HttpResponse(temp.render(cont, request))
def ret_tempcont(request, template_path, context_dict):
temp = loader.get_template(template_path)
return HttpResponse(temp.render(context_dict, request))
def ret_json_data(data):
return HttpResponse(json.dumps(data))
| sunoru/pokemon_tournament | pmtour/views/utils.py | Python | mit | 3,347 |
__author__ = 'Paolo Bellagente'
# Documentation for this module.
#
# More details.
################################## DATABASE ##############################################
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
import datetime
## Database name
db_name = "testDatabase"
## Database user
db_uid = "root"
## Database user's password
db_passwd = ""
## Database host
db_host = "localhost"
##
# set the database connection engine
engine = create_engine('mysql+pymysql://'+db_uid+':'+db_passwd+'@'+db_host+'/'+db_name)
## Classe base per l'ereditarieta' delle tabelle
#
# Permette di istanziare una volta la classe base e riutilizzarla
Base = declarative_base()
class Lesson(Base):
__tablename__ = 'lessons'
id = Column(INTEGER, primary_key=True)
semesterStartDate = Column(DATE)
semesterEndDate = Column(DATE)
# lesson's start hour
hour = Column(TIME)
# lesson's day of the week coded form 0 to 6 where 0 is monday and 6 is sunday.
day = Column(INTEGER)
subject = Column(VARCHAR(200))
rooms = Column(VARCHAR(30))
address = Column(VARCHAR(50))
teacher = Column(VARCHAR(50))
def __init__(self):
self.teacher = ''
# persist the entity into the database
def persist(self):
Session = sessionmaker(bind=engine)
session = Session()
session.add(self)
session.commit()
session.close()
# todo: create new entity here
## Create the necesary tables into the databse
Base.metadata.create_all(engine)
| mpescimoro/stripp3r | lessonEntity.py | Python | gpl-3.0 | 1,603 |
import math
import numpy as np
import os
# checks if the directory where the file will be written does exist
################################################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
# Gives an array of 3d vectors for summation
################################################################################
def cartesian(arrays, out=None):
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
# creates the momentum array used for the sums
################################################################################
def create_momentum_array(p):
# these momentum suqares do not exist
exclude = [7, 15, 23, 28, 31, 39, 47, 55, 60, 63, 71, 79, 87, 92, 95, 103, \
111, 112, 119, 124, 127, 135, 143, 151, 156, 159, 167, 175, 183,\
188, 191, 199, 207, 215, 220, 223, 231, 239, 240, 247, 252, 255,\
263, 271, 279, 284, 287, 295]
if p in exclude:
return [], p
i = int(math.sqrt(p)+1)
n = [j for j in xrange(-i,i+1)]
r = cartesian((n, n, n))
out = []
for rr in r:
if (np.dot(rr, rr) == p):
out.append(np.ndarray.tolist(rr))
out = np.asarray(out, dtype=float)
if p > 302:
print('cannot converge, see zeta.py - create_momentum_array')
exit(0)
return out, p
# creating the momentum arrays and writing them to disk
################################################################################
def main():
r = create_momentum_array(0)
for i in xrange(1, 302):
r = np.vstack((r, create_momentum_array(i)))
ensure_dir("./momenta")
np.save("./momenta", r)
# make this script importable, according to the Google Python Style Guide
if __name__ == '__main__':
main()
| chjost/analysis-code | analysis/zeta/create_momentum_array.py | Python | gpl-3.0 | 2,134 |
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module.
Example usage:
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, cmd_on_exit="look", persistent=True)
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The `perstent` keyword will make the menu survive a server reboot.
It is `False` by default. Note that if using persistent mode, every
node and callback in the menu must be possible to be *pickled*, this
excludes e.g. callables that are class methods or functions defined
dynamically or as part of another function. In non-persistent mode
no such restrictions exist.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_name(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The menu tree itself is available on the caller as
`caller.ndb._menutree`. This makes it a convenient place to store
temporary state variables between nodes, since this NAttribute is
deleted when the menu is exited.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the
second element in the tuple is a help text to display at this
node when the user enters the menu help command there.
options (tuple, dict or None): (
{'key': name, # can also be a list of aliases. A special key is
# "_default", which marks this option as the default
# fallback when no other option matches the user input.
'desc': description, # optional description
'goto': nodekey, # node to go to when chosen
'exec': nodekey}, # node or callback to trigger as callback when chosen.
# If a node key is given, the node will be executed once
# but its return values are ignored. If a callable is
# given, it must accept one or two args, like any node.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). It needs not have return values.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestMenu from this module and add it to
your default cmdset. Run it with this module, like `testmenu
evennia.utils.evmenu`.
"""
from __future__ import print_function
from builtins import object, range
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import logger
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import ANSIString, strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
_ERROR_PERSISTENT_SAVING = \
"""
{error}
|rThe menu state could not be saved for persistent mode. Switching
to non-persistent mode (which means the menu session won't survive
an eventual server reload).|n
"""
_TRACE_PERSISTENT_SAVING = \
"EvMenu persistent-mode error. Commonly, this is because one or " \
"more of the EvEditor callbacks could not be pickled, for example " \
"because it's a class method or is defined inside another function."
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = _CMD_NOINPUT
aliases = [_CMD_NOMATCH]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
def _restore(caller):
# check if there is a saved menu available.
# this will re-start a completely new evmenu call.
saved_options = caller.attributes.get("_menutree_saved")
if saved_options:
startnode_tuple = caller.attributes.get("_menutree_saved_startnode")
try:
startnode, startnode_input = startnode_tuple
except ValueError: # old form of startnode stor
startnode, startnode_input = startnode_tuple, ""
if startnode:
saved_options[1]["startnode"] = startnode
saved_options[1]["startnode_input"] = startnode_input
# this will create a completely new menu call
EvMenu(caller, *saved_options[0], **saved_options[1])
return True
caller = self.caller
menu = caller.ndb._menutree
if not menu:
if _restore(caller):
return
orig_caller = caller
caller = caller.player if hasattr(caller, "player") else None
menu = caller.ndb._menutree if caller else None
if not menu:
if caller and _restore(caller):
return
caller = self.session
menu = caller.ndb._menutree
if not menu:
# can't restore from a session
err = "Menu object not found as %s.ndb._menutree!" % (orig_caller)
orig_caller.msg(err)
raise EvMenuError(err)
# we have a menu, use it.
menu._input_parser(menu, self.raw_string, caller)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
# These are default node formatters
def dedent_strip_nodetext_formatter(nodetext, has_options, caller=None):
"""
Simple dedent formatter that also strips text
"""
return dedent(nodetext).strip()
def dedent_nodetext_formatter(nodetext, has_options, caller=None):
"""
Just dedent text.
"""
return dedent(nodetext)
def evtable_options_formatter(optionlist, caller=None):
"""
Formats the option list display.
"""
if not optionlist:
return ""
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
if not (key or desc):
continue
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(" |lc%s|lt%s|le: %s" % (raw_key, key, desc))
else:
# add a default white color to key
table.append(" |lc%s|lt|w%s|n|le: %s" % (raw_key, raw_key, desc))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in range(nrows - nlastcol)])
# build the actual table grid
table = [table[icol * nrows : (icol * nrows) + nrows] for icol in range(0, ncols)]
# adjust the width of each column
for icol in range(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
# format the table into columns
return unicode(EvTable(table=table, border="none"))
def underline_node_formatter(nodetext, optionstext, caller=None):
"""
Draws a node with underlines '_____' around it.
"""
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
options_width_max = max(m_len(line) for line in optionstext.split("\n"))
total_width = max(options_width_max, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + "|n" + nodetext + "|n" + separator2 + "|n" + optionstext
def null_node_formatter(nodetext, optionstext, caller=None):
"""
A minimalistic node formatter, no lines or frames.
"""
return nodetext + "\n\n" + optionstext
def evtable_parse_input(menuobject, raw_string, caller):
"""
Processes the user' node inputs.
Args:
menuobject (EvMenu): The EvMenu instance
raw_string (str): The incoming raw_string from the menu
command.
caller (Object, Player or Session): The entity using
the menu.
"""
cmd = raw_string.strip().lower()
if cmd in menuobject.options:
# this will take precedence over the default commands
# below
goto, callback = menuobject.options[cmd]
menuobject.callback_goto(callback, goto, raw_string)
elif menuobject.auto_look and cmd in ("look", "l"):
menuobject.display_nodetext()
elif menuobject.auto_help and cmd in ("help", "h"):
menuobject.display_helptext()
elif menuobject.auto_quit and cmd in ("quit", "q", "exit"):
menuobject.close_menu()
elif menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (menuobject.options or menuobject.default):
# no options - we are at the end of the menu.
menuobject.close_menu()
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menudata, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, auto_look=True, auto_help=True,
cmd_on_exit="look",
nodetext_formatter=dedent_strip_nodetext_formatter,
options_formatter=evtable_options_formatter,
node_formatter=underline_node_formatter,
input_parser=evtable_parse_input,
persistent=False, startnode_input="", **kwargs):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (Object, Player or Session): The user of the menu.
menudata (str, module or dict): The full or relative path to the module
holding the menu tree data. All global functions in this module
whose name doesn't start with '_ ' will be parsed as menu nodes.
Also the module itself is accepted as input. Finally, a dictionary
menu tree can be given directly. This must then be a mapping
`{"nodekey":callable,...}` where `callable` must be called as
and return the data expected of a menu node. This allows for
dynamic menu creation.
startnode (str, optional): The starting node name in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
auto_quit (bool, optional): Allow user to use "q", "quit" or
"exit" to leave the menu at any point. Recommended during
development!
auto_look (bool, optional): Automatically make "looK" or "l" to
re-show the last node. Turning this off means you have to handle
re-showing nodes yourself, but may be useful if you need to
use "l" for some other purpose.
auto_help (bool, optional): Automatically make "help" or "h" show
the current help entry for the node. If turned off, eventual
help must be handled manually, but it may be useful if you
need 'h' for some other purpose, for example.
cmd_on_exit (callable, str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
callback function or command string will be executed.
The callback function takes two parameters, the caller then the
EvMenu object. This is called after cleanup is complete.
Set to None to not call any command.
nodetext_formatter (callable, optional): This callable should be on
the form `function(nodetext, has_options, caller=None)`, where `nodetext` is the
node text string and `has_options` a boolean specifying if there
are options associated with this node. It must return a formatted
string. `caller` is optionally a reference to the user of the menu.
`caller` is optionally a reference to the user of the menu.
options_formatter (callable, optional): This callable should be on
the form `function(optionlist, caller=None)`, where ` optionlist is a list
of option dictionaries, like
[{"key":..., "desc",..., "goto": ..., "exec",...}, ...]
Each dictionary describes each possible option. Note that this
will also be called if there are no options, and so should be
able to handle an empty list. This should
be formatted into an options list and returned as a string,
including the required separator to use between the node text
and the options. If not given the default EvMenu style will be used.
`caller` is optionally a reference to the user of the menu.
node_formatter (callable, optional): This callable should be on the
form `func(nodetext, optionstext, caller=None)` where the arguments are strings
representing the node text and options respectively (possibly prepared
by `nodetext_formatter`/`options_formatter` or by the default styles).
It should return a string representing the final look of the node. This
can e.g. be used to create line separators that take into account the
dynamic width of the parts. `caller` is optionally a reference to the
user of the menu.
input_parser (callable, optional): This callable is responsible for parsing the
options dict from a node and has the form `func(menuobject, raw_string, caller)`,
where menuobject is the active `EvMenu` instance, `input_string` is the
incoming text from the caller and `caller` is the user of the menu.
It should use the helper method of the menuobject to goto new nodes, show
help texts etc. See the default `evtable_parse_input` function for help
with parsing.
persistent (bool, optional): Make the Menu persistent (i.e. it will
survive a reload. This will make the Menu cmdset persistent. Use
with caution - if your menu is buggy you may end up in a state
you can't get out of! Also note that persistent mode requires
that all formatters, menu nodes and callables are possible to
*pickle*. When the server is reloaded, the latest node shown will be completely
re-run with the same input arguments - so be careful if you are counting
up some persistent counter or similar - the counter may be run twice if
reload happens on the node that does that.
startnode_input (str, optional): Send an input text to `startnode` as if
a user input text from a fictional previous node. When the server reloads,
the latest visited node will be re-run using this kwarg.
Kwargs:
any (any): All kwargs will become initialization variables on `caller._menutree`,
to be available at run.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
Notes:
In persistent mode, all nodes, formatters and callbacks in
the menu must be possible to be *pickled*, this excludes
e.g. callables that are class methods or functions defined
dynamically or as part of another function. In
non-persistent mode no such restrictions exist.
"""
self._startnode = startnode
self._menutree = self._parse_menudata(menudata)
self._nodetext_formatter = nodetext_formatter
self._options_formatter = options_formatter
self._node_formatter = node_formatter
self._input_parser = input_parser
self._persistent = persistent
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# public variables made available to the command
self.caller = caller
self.auto_quit = auto_quit
self.auto_look = auto_look
self.auto_help = auto_help
if isinstance(cmd_on_exit, str):
self.cmd_on_exit = lambda caller, menu: caller.execute_cmd(cmd_on_exit)
elif callable(cmd_on_exit):
self.cmd_on_exit = cmd_on_exit
else:
self.cmd_on_exit = None
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# assign kwargs as initialization vars on ourselves.
if set(("_startnode", "_menutree", "_nodetext_formatter", "_options_formatter",
"node_formatter", "_input_parser", "_peristent", "cmd_on_exit", "default",
"nodetext", "helptext", "options")).intersection(set(kwargs.keys())):
raise RuntimeError("One or more of the EvMenu `**kwargs` is reserved by EvMenu for internal use.")
for key, val in kwargs.iteritems():
setattr(self, key, val)
# store ourself on the object
self.caller.ndb._menutree = self
if persistent:
# save the menu to the database
try:
caller.attributes.add("_menutree_saved",
((menudata, ),
{"startnode": startnode,
"cmdset_mergetype": cmdset_mergetype,
"cmdset_priority": cmdset_priority,
"auto_quit": auto_quit, "auto_look": auto_look, "auto_help": auto_help,
"cmd_on_exit": cmd_on_exit,
"nodetext_formatter": nodetext_formatter, "options_formatter": options_formatter,
"node_formatter": node_formatter, "input_parser": input_parser,
"persistent": persistent,}))
caller.attributes.add("_menutree_saved_startnode", (startnode, startnode_input))
except Exception as err:
caller.msg(_ERROR_PERSISTENT_SAVING.format(error=err))
logger.log_trace(_TRACE_PERSISTENT_SAVING)
persistent = False
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self.caller.cmdset.add(menu_cmdset, permanent=persistent)
# start the menu
self.goto(self._startnode, startnode_input)
def _parse_menudata(self, menudata):
"""
Parse a menufile for node functions and store in dictionary
map. Alternatively, accept a pre-made mapping dictionary of
node functions.
Args:
menudata (str, module or dict): The python.path to the menufile,
or the python module itself. If a dict, this should be a
mapping nodename:callable, where the callable must match
the criteria for a menu node.
Returns:
menutree (dict): A {nodekey: func}
"""
if isinstance(menudata, dict):
# This is assumed to be a pre-loaded menu tree.
return menudata
else:
# a python path of a module
module = mod_import(menudata)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
# handle the node text
nodetext = self._nodetext_formatter(nodetext, len(optionlist), self.caller)
# handle the options
optionstext = self._options_formatter(optionlist, self.caller)
# format the entire node
return self._node_formatter(nodetext, optionstext, self.caller)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self.caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self.caller)
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
return nodetext, options
def display_nodetext(self):
self.caller.msg(self.nodetext)
def display_helptext(self):
self.caller.msg(self.helptext)
def callback_goto(self, callback, goto, raw_string):
if callback:
self.callback(callback, raw_string)
if goto:
self.goto(goto, raw_string)
def callback(self, nodename, raw_string):
"""
Run a node as a callback. This makes no use of the return
values from the node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
nodename(self.caller, raw_string)
else:
# normal callable, only the caller as arg
nodename(self.caller)
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
else:
# nodename is a string; lookup as node
try:
# execute the node; we make no use of the return values here.
self._execute_node(nodename, raw_string)
except EvMenuError:
return
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
if self._persistent:
self.caller.attributes.add("_menutree_saved_startnode", (nodename, raw_string))
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = "" if nodetext is None else str(nodetext)
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip())))
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.auto_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.auto_quit else _HELP_NO_OPTIONS_NO_QUIT
self.display_nodetext()
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node or using the quit command.
"""
self.caller.cmdset.remove(EvMenuCmdSet)
del self.caller.ndb._menutree
if self._persistent:
self.caller.attributes.remove("_menutree_saved")
self.caller.attributes.remove("_menutree_saved_startnode")
if self.cmd_on_exit is not None:
self.cmd_on_exit(self.caller, self)
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinputcallback
if not callback:
# this can be happen if called from a player-command when IC
caller = self.player
callback = caller.ndb._getinputcallback
if not callback:
raise RuntimeError("No input callback found.")
prompt = caller.ndb._getinputprompt
result = self.raw_string.strip() # we strip the ending line break caused by sending
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinputcallback
del caller.ndb._getinputprompt
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
def get_input(caller, prompt, callback):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
Raises:
RuntimeError: If the given callback is not callable.
Notes:
The result value sent to the callback is raw and not
processed in any way. This means that you will get
the ending line return character from most types of
client inputs. So make sure to strip that before
doing a comparison.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinputcallback = callback
caller.ndb._getinputprompt = prompt
caller.cmdset.add(InputCmdSet)
caller.msg(prompt)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
menu = caller.ndb._menutree
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
The menu was initialized with two variables: %s and %s.
""" % (menu.testval, menu.testval2)
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yL{nook", "l"),
"desc": "Look and see a custom message.",
"goto": "test_look_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_look_node(caller):
text = ""
options = {"key": ("{yL{nook", "l"),
"desc": "Go back to the previous menu.",
"goto": "test_start_node"}
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click |lclook|lthere|le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", persistent=True, cmdset_mergetype="Replace",
testval="val", testval2="val2")
| titeuf87/evennia | evennia/utils/evmenu.py | Python | bsd-3-clause | 39,605 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.TFRecordDataset`."""
import gzip
import os
import pathlib
import zlib
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class TFRecordDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def _dataset_factory(self,
filenames,
compression_type="",
num_epochs=1,
batch_size=None):
repeat_dataset = readers.TFRecordDataset(
filenames, compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
@combinations.generate(test_base.default_test_combinations())
def testConstructorErrorsTensorInput(self):
with self.assertRaisesRegex(
TypeError,
"The `filenames` argument must contain `tf.string` elements. Got "
"`tf.int32` elements."):
readers.TFRecordDataset([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
"The `filenames` argument must contain `tf.string` elements. Got "
"`tf.int32` elements."):
readers.TFRecordDataset(constant_op.constant([1, 2, 3]))
# convert_to_tensor raises different errors in graph and eager
with self.assertRaises(Exception):
readers.TFRecordDataset(object())
@combinations.generate(test_base.default_test_combinations())
def testReadOneEpoch(self):
# Basic test: read from file 0.
dataset = self._dataset_factory(self._filenames[0])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(0, i) for i in range(self._num_records)])
# Basic test: read from file 1.
dataset = self._dataset_factory(self._filenames[1])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(1, i) for i in range(self._num_records)])
# Basic test: read from both files.
dataset = self._dataset_factory(self._filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testReadTenEpochs(self):
dataset = self._dataset_factory(self._filenames, num_epochs=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
@combinations.generate(test_base.default_test_combinations())
def testReadTenEpochsOfBatches(self):
dataset = self._dataset_factory(
self._filenames, num_epochs=10, batch_size=self._num_records)
expected_output = []
for j in range(self._num_files):
expected_output.append(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
@combinations.generate(test_base.default_test_combinations())
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self._filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self._dataset_factory(zlib_files, compression_type="ZLIB")
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self._filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self._dataset_factory(gzip_files, compression_type="GZIP")
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testReadWithBuffer(self):
one_mebibyte = 2**20
dataset = readers.TFRecordDataset(
self._filenames, buffer_size=one_mebibyte)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testReadFromDatasetOfFiles(self):
files = dataset_ops.Dataset.from_tensor_slices(self._filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testReadTenEpochsFromDatasetOfFilesInParallel(self):
files = dataset_ops.Dataset.from_tensor_slices(
self._filenames).repeat(10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files, num_parallel_reads=4)
self.assertDatasetProduces(
dataset, expected_output=expected_output * 10, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testPathlib(self):
files = [pathlib.Path(self._filenames[0])]
expected_output = [self._record(0, i) for i in range(self._num_records)]
ds = readers.TFRecordDataset(files)
self.assertDatasetProduces(
ds, expected_output=expected_output, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testName(self):
files = [self._filenames[0]]
expected_output = [self._record(0, i) for i in range(self._num_records)]
ds = readers.TFRecordDataset(files, name="tf_record_dataset")
self.assertDatasetProduces(
ds, expected_output=expected_output, assert_items_equal=True)
class TFRecordDatasetCheckpointTest(tf_record_test_base.TFRecordTestBase,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def make_dataset(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self._filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(batch_size=[1, 5])))
def testBatchSize(self, verify_fn, batch_size):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
verify_fn(self,
lambda: self.make_dataset(num_epochs, batch_size=batch_size),
num_outputs)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(buffer_size=[0, 5])))
def testBufferSize(self, verify_fn, buffer_size):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
verify_fn(self,
lambda: self.make_dataset(num_epochs, buffer_size=buffer_size),
num_outputs)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(compression_type=[None, "GZIP", "ZLIB"])))
def testCompressionTypes(self, verify_fn, compression_type):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
# pylint: disable=g-long-lambda
verify_fn(
self, lambda: self.make_dataset(
num_epochs, compression_type=compression_type), num_outputs)
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/python/data/kernel_tests/tf_record_dataset_test.py | Python | apache-2.0 | 10,543 |
from django.shortcuts import render
from django.shortcuts import render
from django.views.generic import ListView, DetailView, UpdateView, CreateView
from braces.views import LoginRequiredMixin, GroupRequiredMixin
from .models import Indicator, Parameter, MainIndicator
from django.core.urlresolvers import reverse_lazy
from .forms import *
# Create your views here.
class EditorsMixin(LoginRequiredMixin, GroupRequiredMixin):
group_required = 'editors'
## MainIndicator views
class MainIndicatorIndexView(EditorsMixin, ListView):
model = MainIndicator
class MainIndicatorDetailView(EditorsMixin, DetailView):
model = MainIndicator
class MainIndicatorEditView(EditorsMixin, UpdateView):
model = MainIndicator
form_class = MainIndicatorForm
success_url=reverse_lazy('main_indicators:list')
class MainIndicatorAddView(EditorsMixin, CreateView):
model = Indicator
form_class = MainIndicatorForm
success_url=reverse_lazy('main_indicators:list')
## Indicator views
class IndicatorIndexView(EditorsMixin, ListView):
model = Indicator
class IndicatorDetailView(EditorsMixin, DetailView):
model = Indicator
class IndicatorEditView(EditorsMixin, UpdateView):
model = Indicator
form_class = IndicatorForm
success_url=reverse_lazy('indicators:list')
class IndicatorAddView(EditorsMixin, CreateView):
model = Indicator
form_class = IndicatorForm
success_url=reverse_lazy('indicators:list')
## Parameter views
class ParameterIndexView(EditorsMixin, ListView):
model = Parameter
class ParameterDetailView(EditorsMixin, DetailView):
model = Parameter
class ParameterEditView(EditorsMixin, UpdateView):
model = Parameter
form_class = ParameterForm
success_url=reverse_lazy('parameters:list')
class ParameterAddView(EditorsMixin, CreateView):
model = Parameter
success_url=reverse_lazy('parameters:list')
form_class = ParameterForm | rlaverde/scorecard_cps | performance_indicators_project/indicators/views.py | Python | gpl-3.0 | 1,983 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AbrahamsonSilva1997`.
"""
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
def _compute_mean_on_rock(C, mag, rrup, F, HW):
"""
Compute mean value on rock (that is eq.1, page 105 with S = 0)
"""
f1 = _compute_f1(C, mag, rrup)
f3 = _compute_f3(C, mag)
f4 = _compute_f4(C, mag, rrup)
return f1 + F * f3 + HW * f4
def _get_fault_type_hanging_wall(rake):
"""
Return fault type (F) and hanging wall (HW) flags depending on rake
angle.
The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other'
(F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse',
and 0 if 'other'.
"""
F, HW = np.zeros_like(rake), np.zeros_like(rake)
within = (45 <= rake) & (rake <= 135)
F[within] = 1.
HW[within] = 1.
return F, HW
def _get_site_class(vs30):
"""
Return site class flag (0 if vs30 > 600, that is rock, or 1 if vs30 <
600, that is deep soil)
"""
S = np.zeros_like(vs30)
S[vs30 < 600] = 1
return S
def _compute_f1(C, mag, rrup):
"""
Compute f1 term (eq.4, page 105)
"""
r = np.sqrt(rrup ** 2 + C['c4'] ** 2)
f1 = (C['a1'] +
C['a12'] * (8.5 - mag) ** C['n'] +
(C['a3'] + C['a13'] * (mag - C['c1'])) * np.log(r))
f1[mag <= C['c1']] += C['a2'] * (mag[mag <= C['c1']] - C['c1'])
f1[mag > C['c1']] += C['a4'] * (mag[mag > C['c1']] - C['c1'])
return f1
def _compute_f3(C, mag):
"""
Compute f3 term (eq.6, page 106)
NOTE: In the original manuscript, for the case 5.8 < mag < c1,
the term in the numerator '(mag - 5.8)' is missing, while is
present in the software used for creating the verification tables
"""
f3 = C['a5'] + (C['a6'] - C['a5']) * (mag - 5.8) / (C['c1'] - 5.8)
f3[mag <= 5.8] = C['a5']
f3[mag >= C['c1']] = C['a6']
return f3
def _compute_f4(C, mag, rrup):
"""
Compute f4 term (eq. 7, 8, and 9, page 106)
"""
fhw_m = 0
fhw_r = np.zeros_like(rrup)
fhw_m = np.clip(mag - 5.5, 0., 1.)
idx = (rrup > 4) & (rrup <= 8)
fhw_r[idx] = C['a9'] * (rrup[idx] - 4.) / 4.
idx = (rrup > 8) & (rrup <= 18)
fhw_r[idx] = C['a9']
idx = (rrup > 18) & (rrup <= 24)
fhw_r[idx] = C['a9'] * (1 - (rrup[idx] - 18.) / 7.)
return fhw_m * fhw_r
def _compute_f5(C, pga_rock):
"""
Compute f5 term (non-linear soil response)
"""
return C['a10'] + C['a11'] * np.log(pga_rock + C['c5'])
class AbrahamsonSilva1997(GMPE):
"""
Implements GMPE developed by N. A. Abrahamson and W. J. Silva and published
as "Empirical Response Spectral Attenuation Relations for Shallow Crustal
Earthquakes", Seismological Research Letters, v.68, no. 1, p. 94-127, 1997.
The GMPE distinguishes between rock (vs30 >= 600) and deep soil
(vs30 < 600). The rake angle is also taken into account to distinguish
between 'reverse' (45 <= rake < 135) and 'other'. If an earthquake rupture
is classified as 'reverse', then the hanging-wall term is included in the
mean calculation.
"""
#: Supported tectonic region type is 'active shallow crust' (see
#: Introduction, page 94)
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are PGA and SA. PGA is assumed to
#: have same coefficients as SA(0.01)
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
#: Supported intensity measure component is the geometric mean of two
#: horizontal components (see paragraph 'Regression Model', page 105)
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GEOMETRIC_MEAN
#: Supported standard deviation type is Total (see equations 13 pp. 106
#: and table 4, page 109).
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
#: The only site parameter is vs30 used to distinguish between rock
#: (vs30 > 600 m/s) and deep soil (see table 2, page 95)
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameters are magnitude, and rake (eq. 3, page 105).
#: Rake is used to distinguish between 'reverse' (45 <= rake <= 135) and
#: 'other' (i.e. strike-slip and normal). If an earthquake is classified
#: as 'reverse' than the hanging-wall term is taken into account.
REQUIRES_RUPTURE_PARAMETERS = {'mag', 'rake'}
#: Required distance measure is RRup (eq. 3, page 105).
REQUIRES_DISTANCES = {'rrup'}
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.compute>`
for spec of input and result values.
"""
F, HW = _get_fault_type_hanging_wall(ctx.rake)
S = _get_site_class(ctx.vs30)
# compute pga on rock (used then to compute site amplification factor)
C = self.COEFFS[PGA()]
pga_rock = np.exp(_compute_mean_on_rock(C, ctx.mag, ctx.rrup, F, HW))
for m, imt in enumerate(imts):
# compute mean for the given imt (do not repeat the calculation if
# imt is PGA, just add the site amplification term)
if imt == PGA():
mean[m] = np.log(pga_rock) + S * _compute_f5(C, pga_rock)
else:
C = self.COEFFS[imt]
mean[m] = (_compute_mean_on_rock(C, ctx.mag, ctx.rrup, F, HW) +
S * _compute_f5(C, pga_rock))
C_STD = self.COEFFS_STD[imt]
# standard deviation as defined in eq.13 page 106.
sigma = C_STD['b5'] - C_STD['b6'] * (ctx.mag - 5)
sigma[ctx.mag <= 5.] = C_STD['b5']
sigma[ctx.mag >= 7.] = C_STD['b5'] - 2 * C_STD['b6']
sig[m] += sigma
#: Coefficient table (table 3, page 108)
COEFFS = CoeffsTable(sa_damping=5, table="""\
imt c4 a1 a2 a3 a4 a5 a6 a9 a10 a11 a12 a13 c1 c5 n
pga 5.60 1.640 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.417 -0.230 0.0000 0.17 6.4 0.03 2
0.01 5.60 1.640 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.417 -0.230 0.0000 0.17 6.4 0.03 2
0.02 5.60 1.640 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.417 -0.230 0.0000 0.17 6.4 0.03 2
0.03 5.60 1.690 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.470 -0.230 0.0143 0.17 6.4 0.03 2
0.04 5.60 1.780 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.555 -0.251 0.0245 0.17 6.4 0.03 2
0.05 5.60 1.870 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.620 -0.267 0.0280 0.17 6.4 0.03 2
0.06 5.60 1.940 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.665 -0.280 0.0300 0.17 6.4 0.03 2
0.075 5.58 2.037 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.628 -0.280 0.0300 0.17 6.4 0.03 2
0.09 5.54 2.100 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.609 -0.280 0.0300 0.17 6.4 0.03 2
0.10 5.50 2.160 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.598 -0.280 0.0280 0.17 6.4 0.03 2
0.12 5.39 2.272 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.591 -0.280 0.0180 0.17 6.4 0.03 2
0.15 5.27 2.407 0.512 -1.1450 -0.144 0.610 0.260 0.370 -0.577 -0.280 0.0050 0.17 6.4 0.03 2
0.17 5.19 2.430 0.512 -1.1350 -0.144 0.610 0.260 0.370 -0.522 -0.265 -0.0040 0.17 6.4 0.03 2
0.20 5.10 2.406 0.512 -1.1150 -0.144 0.610 0.260 0.370 -0.445 -0.245 -0.0138 0.17 6.4 0.03 2
0.24 4.97 2.293 0.512 -1.0790 -0.144 0.610 0.232 0.370 -0.350 -0.223 -0.0238 0.17 6.4 0.03 2
0.30 4.80 2.114 0.512 -1.0350 -0.144 0.610 0.198 0.370 -0.219 -0.195 -0.0360 0.17 6.4 0.03 2
0.36 4.62 1.955 0.512 -1.0052 -0.144 0.610 0.170 0.370 -0.123 -0.173 -0.0460 0.17 6.4 0.03 2
0.40 4.52 1.860 0.512 -0.9880 -0.144 0.610 0.154 0.370 -0.065 -0.160 -0.0518 0.17 6.4 0.03 2
0.46 4.38 1.717 0.512 -0.9652 -0.144 0.592 0.132 0.370 0.020 -0.136 -0.0594 0.17 6.4 0.03 2
0.50 4.30 1.615 0.512 -0.9515 -0.144 0.581 0.119 0.370 0.085 -0.121 -0.0635 0.17 6.4 0.03 2
0.60 4.12 1.428 0.512 -0.9218 -0.144 0.557 0.091 0.370 0.194 -0.089 -0.0740 0.17 6.4 0.03 2
0.75 3.90 1.160 0.512 -0.8852 -0.144 0.528 0.057 0.331 0.320 -0.050 -0.0862 0.17 6.4 0.03 2
0.85 3.81 1.020 0.512 -0.8648 -0.144 0.512 0.038 0.309 0.370 -0.028 -0.0927 0.17 6.4 0.03 2
1.00 3.70 0.828 0.512 -0.8383 -0.144 0.490 0.013 0.281 0.423 0.000 -0.1020 0.17 6.4 0.03 2
1.50 3.55 0.260 0.512 -0.7721 -0.144 0.438 -0.049 0.210 0.600 0.040 -0.1200 0.17 6.4 0.03 2
2.00 3.50 -0.150 0.512 -0.7250 -0.144 0.400 -0.094 0.160 0.610 0.040 -0.1400 0.17 6.4 0.03 2
3.00 3.50 -0.690 0.512 -0.7250 -0.144 0.400 -0.156 0.089 0.630 0.040 -0.1726 0.17 6.4 0.03 2
4.00 3.50 -1.130 0.512 -0.7250 -0.144 0.400 -0.200 0.039 0.640 0.040 -0.1956 0.17 6.4 0.03 2
5.00 3.50 -1.460 0.512 -0.7250 -0.144 0.400 -0.200 0.000 0.664 0.040 -0.2150 0.17 6.4 0.03 2
""")
#: Coefficient table for standard deviation calculation (table 4, page 109)
COEFFS_STD = CoeffsTable(sa_damping=5, table="""\
imt b5 b6
pga 0.70 0.135
0.01 0.70 0.135
0.02 0.70 0.135
0.03 0.70 0.135
0.04 0.71 0.135
0.05 0.71 0.135
0.06 0.72 0.135
0.075 0.73 0.135
0.09 0.74 0.135
0.10 0.74 0.135
0.12 0.75 0.135
0.15 0.75 0.135
0.17 0.76 0.135
0.20 0.77 0.135
0.24 0.77 0.135
0.30 0.78 0.135
0.36 0.79 0.135
0.40 0.79 0.135
0.46 0.80 0.132
0.50 0.80 0.130
0.60 0.81 0.127
0.75 0.81 0.123
0.85 0.82 0.121
1.00 0.83 0.118
1.50 0.84 0.110
2.00 0.85 0.105
3.00 0.87 0.097
4.00 0.88 0.092
5.00 0.89 0.087
""")
| gem/oq-engine | openquake/hazardlib/gsim/abrahamson_silva_1997.py | Python | agpl-3.0 | 10,993 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Graph data strctures in NNGT """
import numpy as np
from numpy.random import randint, uniform
from ..lib import (InvalidArgument, nonstring_container, is_integer,
default_neuron, default_synapse, POS, WEIGHT, DELAY, DIST,
TYPE, BWEIGHT)
from ..lib.rng_tools import _eprop_distribution
# ----------- #
# Connections #
# ----------- #
class Connections:
"""
The basic class that computes the properties of the connections between
neurons for graphs.
"""
#-------------------------------------------------------------------------#
# Class methods
@staticmethod
def distances(graph, elist=None, pos=None, dlist=None, overwrite=False):
'''
Compute the distances between connected nodes in the graph. Try to add
only the new distances to the graph. If they overlap with previously
computed distances, recomputes everything.
Parameters
----------
graph : class:`~nngt.Graph` or subclass
Graph the nodes belong to.
elist : class:`numpy.array`, optional (default: None)
List of the edges.
pos : class:`numpy.array`, optional (default: None)
Positions of the nodes; note that if `graph` has a "position"
attribute, `pos` will not be taken into account.
dlist : class:`numpy.array`, optional (default: None)
List of distances (for user-defined distances)
Returns
-------
new_dist : class:`numpy.array`
Array containing *ONLY* the newly-computed distances.
'''
elist = graph.edges_array if elist is None else elist
if dlist is not None:
dlist = np.array(dlist)
graph.set_edge_attribute(DIST, value_type="double", values=dlist)
return dlist
else:
pos = graph._pos if hasattr(graph, "_pos") else pos
# compute the new distances
if graph.edge_nb():
ra_x = pos[elist[:,0], 0] - pos[elist[:,1], 0]
ra_y = pos[elist[:,0], 1] - pos[elist[:,1], 1]
ra_dist = np.sqrt( np.square(ra_x) + np.square(ra_y) )
# update graph distances
graph.set_edge_attribute(DIST, value_type="double",
values=ra_dist, edges=elist)
return ra_dist
else:
return []
@staticmethod
def delays(graph=None, dlist=None, elist=None, distribution="constant",
parameters=None, noise_scale=None):
'''
Compute the delays of the neuronal connections.
Parameters
----------
graph : class:`~nngt.Graph` or subclass
Graph the nodes belong to.
dlist : class:`numpy.array`, optional (default: None)
List of user-defined delays).
elist : class:`numpy.array`, optional (default: None)
List of the edges which value should be updated.
distribution : class:`string`, optional (default: "constant")
Type of distribution (choose among "constant", "uniform",
"lognormal", "gaussian", "user_def", "lin_corr", "log_corr").
parameters : class:`dict`, optional (default: {})
Dictionary containing the distribution parameters.
noise_scale : class:`int`, optional (default: None)
Scale of the multiplicative Gaussian noise that should be applied
on the weights.
Returns
-------
new_delays : class:`scipy.sparse.lil_matrix`
A sparse matrix containing *ONLY* the newly-computed weights.
'''
elist = np.array(elist) if elist is not None else elist
if dlist is not None:
dlist = np.array(dlist)
num_edges = graph.edge_nb() if elist is None else elist.shape[0]
if len(dlist) != num_edges:
raise InvalidArgument("`dlist` must have one entry per edge.")
else:
parameters["btype"] = parameters.get("btype", "edge")
parameters["weights"] = parameters.get("weights", None)
dlist = _eprop_distribution(graph, distribution, elist=elist,
**parameters)
# add to the graph container
if graph is not None:
graph.set_edge_attribute(
DELAY, value_type="double", values=dlist, edges=elist)
return dlist
@staticmethod
def weights(graph=None, elist=None, wlist=None, distribution="constant",
parameters={}, noise_scale=None):
'''
Compute the weights of the graph's edges.
Parameters
----------
graph : class:`~nngt.Graph` or subclass
Graph the nodes belong to.
elist : class:`numpy.array`, optional (default: None)
List of the edges (for user defined weights).
wlist : class:`numpy.array`, optional (default: None)
List of the weights (for user defined weights).
distribution : class:`string`, optional (default: "constant")
Type of distribution (choose among "constant", "uniform",
"lognormal", "gaussian", "user_def", "lin_corr", "log_corr").
parameters : class:`dict`, optional (default: {})
Dictionary containing the distribution parameters.
noise_scale : class:`int`, optional (default: None)
Scale of the multiplicative Gaussian noise that should be applied
on the weights.
Returns
-------
new_weights : class:`scipy.sparse.lil_matrix`
A sparse matrix containing *ONLY* the newly-computed weights.
'''
parameters["btype"] = parameters.get("btype", "edge")
parameters["weights"] = parameters.get("weights", None)
elist = np.array(elist) if elist is not None else elist
if wlist is not None:
wlist = np.array(wlist)
num_edges = graph.edge_nb() if elist is None else elist.shape[0]
if len(wlist) != num_edges:
raise InvalidArgument("`wlist` must have one entry per edge.")
else:
wlist = _eprop_distribution(graph, distribution, elist=elist,
**parameters)
# normalize by the inhibitory weight factor
if graph is not None and graph.is_network():
if not np.isclose(graph._iwf, 1.):
adj = graph.adjacency_matrix(types=True, weights=False)
keep = (adj[elist[:, 0], elist[:, 1]] < 0).A1
wlist[keep] *= graph._iwf
if graph is not None:
graph.set_edge_attribute(
WEIGHT, value_type="double", values=wlist, edges=elist)
return wlist
@staticmethod
def types(graph, inhib_nodes=None, inhib_frac=None, values=None):
'''
Define the type of a set of neurons.
If no arguments are given, all edges will be set as excitatory.
Parameters
----------
graph : :class:`~nngt.Graph` or subclass
Graph on which edge types will be created.
inhib_nodes : int or list, optional (default: `None`)
If `inhib_nodes` is an int, number of inhibitory nodes in the graph
(all connections from inhibitory nodes are inhibitory); if it is a
float, ratio of inhibitory nodes in the graph; if it is a list, ids
of the inhibitory nodes.
inhib_frac : float, optional (default: `None`)
Fraction of the selected edges that will be set as refractory (if
`inhib_nodes` is not `None`, it is the fraction of the nodes' edges
that will become inhibitory, otherwise it is the fraction of all
the edges in the graph).
Returns
-------
t_list : :class:`~numpy.ndarray`
List of the edges' types.
'''
num_inhib = 0
idx_inhib = []
if values is not None:
graph.new_edge_attribute("type", "int", values=values)
return values
elif inhib_nodes is None and inhib_frac is None:
graph.new_edge_attribute("type", "int", val=1)
return np.ones(graph.edge_nb())
else:
t_list = np.repeat(1, graph.edge_nb())
n = graph.node_nb()
if inhib_nodes is None:
# set inhib_frac*num_edges random inhibitory connections
num_edges = graph.edge_nb()
num_inhib = int(num_edges*inhib_frac)
num_current = 0
while num_current < num_inhib:
new = randint(0, num_edges, num_inhib-num_current)
idx_inhib = np.unique(np.concatenate((idx_inhib, new)))
num_current = len(idx_inhib)
t_list[idx_inhib.astype(int)] *= -1
else:
edges = graph.edges_array
# get the dict of inhibitory nodes
num_inhib_nodes = 0
idx_nodes = {}
if nonstring_container(inhib_nodes):
idx_nodes = {i: -1 for i in inhib_nodes}
num_inhib_nodes = len(idx_nodes)
if is_integer(inhib_nodes):
num_inhib_nodes = int(inhib_nodes)
while len(idx_nodes) != num_inhib_nodes:
indices = randint(0,n,num_inhib_nodes-len(idx_nodes))
di_tmp = {i: -1 for i in indices}
idx_nodes.update(di_tmp)
for v in edges[:, 0]:
if v in idx_nodes:
idx_inhib.append(v)
idx_inhib = np.unique(idx_inhib)
# set the inhibitory edge indices
for v in idx_inhib:
idx_edges = np.argwhere(edges[:, 0] == v)
n = len(idx_edges)
if inhib_frac is not None:
idx_inh = []
num_inh = n*inhib_frac
i = 0
while i != num_inh:
ids = randint(0, n, num_inh-i)
idx_inh = np.unique(np.concatenate((idx_inh,ids)))
i = len(idx_inh)
t_list[idx_inh] *= -1
else:
t_list[idx_edges] *= -1
graph.set_edge_attribute("type", value_type="int", values=t_list)
return t_list
| Silmathoron/NNGT | nngt/core/connections.py | Python | gpl-3.0 | 11,420 |
# Copyright (C) 2010 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject
class _Version:
@property
def description(self):
pass
@property
def downloadable(self):
pass
@property
def summary(self):
pass
@property
def size(self):
return self.pkginfo.get_size(self.name)
@property
def installed_size(self):
return 0
@property
def version(self):
pass
@property
def origins(self):
return []
@property
def record(self):
return {}
@property
def not_automatic(self):
""" should not be installed/upgraded automatically, the user needs
to opt-in once (used for e.g. ubuntu-backports)
"""
return False
class _Package:
def __init__(self, name, pkginfo):
self.name = name
self.pkginfo = pkginfo
def __str__(self):
return repr(self).replace('<', '<pkgname=%s ' % self.name)
@property
def installed(self):
""" returns a _Version object """
if self.pkginfo.is_installed(self.name):
return self.pkginfo.get_installed(self.name)
@property
def candidate(self):
""" returns a _Version object """
return self.pkginfo.get_candidate(self.name)
@property
def versions(self):
""" a list of available versions (as _Version) to install """
return self.pkginfo.get_versions(self.name)
@property
def is_installed(self):
return self.pkginfo.is_installed(self.name)
@property
def is_upgradable(self):
return self.pkginfo.is_upgradable(self.name)
@property
def section(self):
return self.pkginfo.get_section(self.name)
@property
def website(self):
return self.pkginfo.get_website(self.name)
@property
def installed_files(self):
return self.pkginfo.get_installed_files(self.name)
@property
def description(self):
return self.pkginfo.get_description(self.name)
@property
def license(self):
return self.pkginfo.get_license(self.name)
class PackageInfo(GObject.GObject):
""" abstract interface for the packageinfo information """
__gsignals__ = {
'cache-ready': (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
()),
'cache-invalid': (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
()),
'cache-broken': (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
()),
'query-total-size-on-install-done': (
GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
(str, int, int)),
}
def __getitem__(self, k):
return _Package(k, self)
def __contains__(self, pkgname):
return False
@staticmethod
def version_compare(v1, v2):
""" compare two versions """
return cmp(v1, v2)
@staticmethod
def upstream_version_compare(v1, v2):
""" compare two versions, but ignore the distro specific revisions """
return cmp(v1, v2)
@staticmethod
def upstream_version(v):
""" Return the "upstream" version number of the given version """
return v
def is_installed(self, pkgname):
pass
def is_available(self, pkgname):
pass
def get_installed(self, pkgname):
pass
def get_candidate(self, pkgname):
pass
def get_versions(self, pkgname):
return []
def get_section(self, pkgname):
pass
def get_summary(self, pkgname):
pass
def get_description(self, pkgname):
pass
def get_website(self, pkgname):
pass
def get_installed_files(self, pkgname):
return []
def get_size(self, pkgname):
return -1
def get_installed_size(self, pkgname):
return -1
def get_origins(self, pkgname):
return []
def get_origin(self, pkgname):
""" :return: unique origin as string """
return ''
def get_addons(self, pkgname, ignore_installed=False):
""" :return: a tuple of pkgnames (recommends, suggests) """
return ([], [])
def get_packages_removed_on_remove(self, pkg):
""" Returns a package names list of reverse dependencies
which will be removed if the package is removed."""
return []
def get_packages_removed_on_install(self, pkg):
""" Returns a package names list of dependencies
which will be removed if the package is installed."""
return []
def query_total_size_on_install(self, pkgname,
addons_install=None, addons_remove=None,
archive_suite=None):
""" Query for download and installed size
with disk size in KB calculated for pkgname installation
plus addons change and a (optional) archive_suite that the
package comes from
This will emit a "query-total-size-on-install-done" signal
with the parameters (pkgname, download_size, required_space_on_disk)
"""
self.emit("query-total-size-on-install-done", pkgname, 0, 0)
def open(self):
"""
(re)open the cache, this sends cache-invalid, cache-ready signals
"""
pass
@property
def ready(self):
pass
# singleton
pkginfo = None
def get_pkg_info():
global pkginfo
if pkginfo is None:
from softwarecenter.enums import USE_PACKAGEKIT_BACKEND
if not USE_PACKAGEKIT_BACKEND:
from softwarecenter.db.pkginfo_impl.aptcache import AptCache
pkginfo = AptCache()
else:
from softwarecenter.db.pkginfo_impl.packagekit import (
PackagekitInfo,
)
pkginfo = PackagekitInfo()
return pkginfo
| sti-lyneos/shop | softwarecenter/db/pkginfo.py | Python | lgpl-3.0 | 6,602 |
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pygalib',
version='0.1.0',
description='A simple and easy-to-use genetic algirthm library',
long_description=long_description,
url='https://github.com/lfzark/pygalib',
author='ark1ee',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='genetic algorithm library',
packages=['pygalib'],
)
| lfzark/pygalib | setup.py | Python | mit | 1,800 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
r"""Example of Synced sequence input and output.
This is a reimpmentation of the TensorFlow official PTB example in :
tensorflow/models/rnn/ptb
The batch_size can be seem as how many concurrent computations.\n
As the following example shows, the first batch learn the sequence information by using 0 to 9.\n
The second batch learn the sequence information by using 10 to 19.\n
So it ignores the information from 9 to 10 !\n
If only if we set the batch_size = 1, it will consider all information from 0 to 20.\n
The meaning of batch_size here is not the same with the MNIST example. In MNIST example,
batch_size reflects how many examples we consider in each iteration, while in
PTB example, batch_size is how many concurrent processes (segments)
for speed up computation.
Some Information will be ignored if batch_size > 1, however, if your dataset
is "long" enough (a text corpus usually has billions words), the ignored
information would not effect the final result.
In PTB tutorial, we setted batch_size = 20, so we cut the dataset into 20 segments.
At the begining of each epoch, we initialize (reset) the 20 RNN states for 20
segments, then go through 20 segments separately.
The training data will be generated as follow:\n
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, '\n',y)
... [[ 0 1 2] <---x 1st subset/ iteration
... [10 11 12]]
... [[ 1 2 3] <---y
... [11 12 13]]
...
... [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
... [13 14 15]] <--- 2nd batch input
... [[ 4 5 6] <--- 1st batch target
... [14 15 16]] <--- 2nd batch target
...
... [[ 6 7 8] 3rd subset/ iteration
... [16 17 18]]
... [[ 7 8 9]
... [17 18 19]]
Hao Dong: This example can also be considered as pre-training of the word
embedding matrix.
About RNN
----------
$ Karpathy Blog : http://karpathy.github.io/2015/05/21/rnn-effectiveness/
More TensorFlow official RNN examples can be found here
---------------------------------------------------------
$ RNN for PTB : https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html#recurrent-neural-networks
$ Seq2seq : https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html#sequence-to-sequence-models
$ translation : tensorflow/models/rnn/translate
Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
A) use the zero_state function on the cell object
B) for an rnn, all time steps share weights. We use one matrix to keep all
gate weights. Split by column into 4 parts to get the 4 gate weight matrices.
"""
import argparse
import sys
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
def process_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', default='small', choices=['small', 'medium', 'large'],
help="A type of model. Possible options are: small, medium, large."
)
parameters = parser.parse_args(args)
return parameters
class PTB_Net(Model):
def __init__(self, vocab_size, hidden_size, init, keep):
super(PTB_Net, self).__init__()
self.embedding = tl.layers.Embedding(vocab_size, hidden_size, init)
self.dropout1 = tl.layers.Dropout(keep=keep)
self.lstm1 = tl.layers.RNN(
cell=tf.keras.layers.LSTMCell(hidden_size), return_last_output=False, return_last_state=True,
return_seq_2d=False, in_channels=hidden_size
)
self.dropout2 = tl.layers.Dropout(keep=keep)
self.lstm2 = tl.layers.RNN(
cell=tf.keras.layers.LSTMCell(hidden_size), return_last_output=False, return_last_state=True,
return_seq_2d=True, in_channels=hidden_size
)
self.dropout3 = tl.layers.Dropout(keep=keep)
self.out_dense = tl.layers.Dense(vocab_size, in_channels=hidden_size, W_init=init, b_init=init, act=None)
def forward(self, inputs, lstm1_initial_state=None, lstm2_initial_state=None):
inputs = self.embedding(inputs)
inputs = self.dropout1(inputs)
lstm1_out, lstm1_state = self.lstm1(inputs, initial_state=lstm1_initial_state)
inputs = self.dropout2(lstm1_out)
lstm2_out, lstm2_state = self.lstm2(inputs, initial_state=lstm2_initial_state)
inputs = self.dropout3(lstm2_out)
logits = self.out_dense(inputs)
return logits, lstm1_state, lstm2_state
def main():
"""
The core of the model consists of an LSTM cell that processes one word at
a time and computes probabilities of the possible continuations of the
sentence. The memory state of the network is initialized with a vector
of zeros and gets updated after reading each word. Also, for computational
reasons, we will process data in mini-batches of size batch_size.
"""
param = process_args(sys.argv[1:])
if param.model == "small":
init_scale = 0.1
learning_rate = 1e-3
max_grad_norm = 5
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
elif param.model == "medium":
init_scale = 0.05
learning_rate = 1e-3
max_grad_norm = 5
# num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
elif param.model == "large":
init_scale = 0.04
learning_rate = 1e-3
max_grad_norm = 10
# num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
else:
raise ValueError("Invalid model: %s", param.model)
# Load PTB dataset
train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()
# train_data = train_data[0:int(100000/5)] # for fast testing
print('len(train_data) {}'.format(len(train_data))) # 929589 a list of int
print('len(valid_data) {}'.format(len(valid_data))) # 73760 a list of int
print('len(test_data) {}'.format(len(test_data))) # 82430 a list of int
print('vocab_size {}'.format(vocab_size)) # 10000
# One int represents one word, the meaning of batch_size here is not the
# same with MNIST example, it is the number of concurrent processes for
# computational reasons.
init = tf.random_uniform_initializer(-init_scale, init_scale)
net = PTB_Net(hidden_size=hidden_size, vocab_size=vocab_size, init=init, keep=keep_prob)
# Truncated Backpropagation for training
lr = tf.Variable(0.0, trainable=False)
train_weights = net.weights
optimizer = tf.optimizers.Adam(lr=lr)
print(net)
print("\nStart learning a language model by using PTB dataset")
for i in range(max_max_epoch):
# decreases the initial learning rate after several
# epoachs (defined by ``max_epoch``), by multipling a ``lr_decay``.
new_lr_decay = lr_decay**max(i - max_epoch, 0.0)
lr.assign(learning_rate * new_lr_decay)
# Training
net.train()
print("Epoch: %d/%d Learning rate: %.3f" % (i + 1, max_max_epoch, lr.value()))
epoch_size = ((len(train_data) // batch_size) - 1) // num_steps
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
lstm1_state = None
lstm2_state = None
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)):
with tf.GradientTape() as tape:
## compute outputs
logits, lstm1_state, lstm2_state = net(
x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state
)
## compute loss and update model
cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss')
grad, _ = tf.clip_by_global_norm(tape.gradient(cost, train_weights), max_grad_norm)
optimizer.apply_gradients(zip(grad, train_weights))
costs += cost
iters += 1
if step % (epoch_size // 10) == 10:
print(
"%.3f perplexity: %.3f speed: %.0f wps" % (
step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size * num_steps /
(time.time() - start_time)
)
)
train_perplexity = np.exp(costs / iters)
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
# Validing
net.eval()
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
lstm1_state = None
lstm2_state = None
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)):
## compute outputs
logits, lstm1_state, lstm2_state = net(x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state)
## compute loss and update model
cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss')
costs += cost
iters += 1
valid_perplexity = np.exp(costs / iters)
print("Epoch: %d/%d Valid Perplexity: %.3f" % (i + 1, max_max_epoch, valid_perplexity))
print("Evaluation")
# Testing
net.eval()
# go through the test set step by step, it will take a while.
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining
lstm1_state = None
lstm2_state = None
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=1, num_steps=1)):
## compute outputs
logits, lstm1_state, lstm2_state = net(x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state)
## compute loss and update model
cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss')
costs += cost
iters += 1
test_perplexity = np.exp(costs / iters)
print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time))
print(
"More example: Text generation using Trump's speech data: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():"
)
if __name__ == "__main__":
main()
# log of SmallConfig
# Start learning a language model by using PTB dataset
# Epoch: 1 Learning rate: 1.000
# 0.004 perplexity: 5512.735 speed: 4555 wps
# 0.104 perplexity: 841.289 speed: 8823 wps
# 0.204 perplexity: 626.273 speed: 9292 wps
# 0.304 perplexity: 505.628 speed: 9472 wps
# 0.404 perplexity: 435.580 speed: 9551 wps
# 0.504 perplexity: 390.108 speed: 9555 wps
# 0.604 perplexity: 351.379 speed: 9546 wps
# 0.703 perplexity: 324.846 speed: 9579 wps
# 0.803 perplexity: 303.824 speed: 9574 wps
# 0.903 perplexity: 284.468 speed: 9551 wps
# Epoch: 1 Train Perplexity: 269.981
# Epoch: 1 Valid Perplexity: 178.561
# Epoch: 2 Learning rate: 1.000
# 0.004 perplexity: 211.632 speed: 7697 wps
# 0.104 perplexity: 151.509 speed: 9488 wps
# 0.204 perplexity: 158.947 speed: 9674 wps
# 0.304 perplexity: 153.963 speed: 9806 wps
# 0.404 perplexity: 150.938 speed: 9817 wps
# 0.504 perplexity: 148.413 speed: 9824 wps
# 0.604 perplexity: 143.763 speed: 9765 wps
# 0.703 perplexity: 141.616 speed: 9731 wps
# 0.803 perplexity: 139.618 speed: 9781 wps
# 0.903 perplexity: 135.880 speed: 9735 wps
# Epoch: 2 Train Perplexity: 133.771
# Epoch: 2 Valid Perplexity: 142.595
# Epoch: 3 Learning rate: 1.000
# 0.004 perplexity: 146.902 speed: 8345 wps
# 0.104 perplexity: 105.647 speed: 9572 wps
# 0.204 perplexity: 114.261 speed: 9585 wps
# 0.304 perplexity: 111.237 speed: 9586 wps
# 0.404 perplexity: 110.181 speed: 9605 wps
# 0.504 perplexity: 109.383 speed: 9601 wps
# 0.604 perplexity: 106.722 speed: 9635 wps
# 0.703 perplexity: 106.075 speed: 9597 wps
# 0.803 perplexity: 105.481 speed: 9624 wps
# 0.903 perplexity: 103.262 speed: 9618 wps
# Epoch: 3 Train Perplexity: 102.272
# Epoch: 3 Valid Perplexity: 131.884
# Epoch: 4 Learning rate: 1.000
# 0.004 perplexity: 118.127 speed: 7867 wps
# 0.104 perplexity: 85.530 speed: 9330 wps
# 0.204 perplexity: 93.559 speed: 9399 wps
# 0.304 perplexity: 91.141 speed: 9386 wps
# 0.404 perplexity: 90.668 speed: 9462 wps
# 0.504 perplexity: 90.366 speed: 9516 wps
# 0.604 perplexity: 88.479 speed: 9477 wps
# 0.703 perplexity: 88.275 speed: 9533 wps
# 0.803 perplexity: 88.091 speed: 9560 wps
# 0.903 perplexity: 86.430 speed: 9516 wps
# Epoch: 4 Train Perplexity: 85.839
# Epoch: 4 Valid Perplexity: 128.408
# Epoch: 5 Learning rate: 1.000
# 0.004 perplexity: 100.077 speed: 7682 wps
# 0.104 perplexity: 73.856 speed: 9197 wps
# 0.204 perplexity: 81.242 speed: 9266 wps
# 0.304 perplexity: 79.315 speed: 9375 wps
# 0.404 perplexity: 79.009 speed: 9439 wps
# 0.504 perplexity: 78.874 speed: 9377 wps
# 0.604 perplexity: 77.430 speed: 9436 wps
# 0.703 perplexity: 77.415 speed: 9417 wps
# 0.803 perplexity: 77.424 speed: 9407 wps
# 0.903 perplexity: 76.083 speed: 9407 wps
# Epoch: 5 Train Perplexity: 75.719
# Epoch: 5 Valid Perplexity: 127.057
# Epoch: 6 Learning rate: 0.500
# 0.004 perplexity: 87.561 speed: 7130 wps
# 0.104 perplexity: 64.202 speed: 9753 wps
# 0.204 perplexity: 69.518 speed: 9537 wps
# 0.304 perplexity: 66.868 speed: 9647 wps
# 0.404 perplexity: 65.766 speed: 9538 wps
# 0.504 perplexity: 64.967 speed: 9537 wps
# 0.604 perplexity: 63.090 speed: 9565 wps
# 0.703 perplexity: 62.415 speed: 9544 wps
# 0.803 perplexity: 61.751 speed: 9504 wps
# 0.903 perplexity: 60.027 speed: 9482 wps
# Epoch: 6 Train Perplexity: 59.127
# Epoch: 6 Valid Perplexity: 120.339
# Epoch: 7 Learning rate: 0.250
# 0.004 perplexity: 72.069 speed: 7683 wps
# 0.104 perplexity: 53.331 speed: 9526 wps
# 0.204 perplexity: 57.897 speed: 9572 wps
# 0.304 perplexity: 55.557 speed: 9491 wps
# 0.404 perplexity: 54.597 speed: 9483 wps
# 0.504 perplexity: 53.817 speed: 9471 wps
# 0.604 perplexity: 52.147 speed: 9511 wps
# 0.703 perplexity: 51.473 speed: 9497 wps
# 0.803 perplexity: 50.788 speed: 9521 wps
# 0.903 perplexity: 49.203 speed: 9515 wps
# Epoch: 7 Train Perplexity: 48.303
# Epoch: 7 Valid Perplexity: 120.782
# Epoch: 8 Learning rate: 0.125
# 0.004 perplexity: 63.503 speed: 8425 wps
# 0.104 perplexity: 47.324 speed: 9433 wps
# 0.204 perplexity: 51.525 speed: 9653 wps
# 0.304 perplexity: 49.405 speed: 9520 wps
# 0.404 perplexity: 48.532 speed: 9487 wps
# 0.504 perplexity: 47.800 speed: 9610 wps
# 0.604 perplexity: 46.282 speed: 9554 wps
# 0.703 perplexity: 45.637 speed: 9536 wps
# 0.803 perplexity: 44.972 speed: 9493 wps
# 0.903 perplexity: 43.506 speed: 9496 wps
# Epoch: 8 Train Perplexity: 42.653
# Epoch: 8 Valid Perplexity: 122.119
# Epoch: 9 Learning rate: 0.062
# 0.004 perplexity: 59.375 speed: 7158 wps
# 0.104 perplexity: 44.223 speed: 9275 wps
# 0.204 perplexity: 48.269 speed: 9459 wps
# 0.304 perplexity: 46.273 speed: 9564 wps
# 0.404 perplexity: 45.450 speed: 9604 wps
# 0.504 perplexity: 44.749 speed: 9604 wps
# 0.604 perplexity: 43.308 speed: 9619 wps
# 0.703 perplexity: 42.685 speed: 9647 wps
# 0.803 perplexity: 42.022 speed: 9673 wps
# 0.903 perplexity: 40.616 speed: 9678 wps
# Epoch: 9 Train Perplexity: 39.792
# Epoch: 9 Valid Perplexity: 123.170
# Epoch: 10 Learning rate: 0.031
# 0.004 perplexity: 57.333 speed: 7183 wps
# 0.104 perplexity: 42.631 speed: 9592 wps
# 0.204 perplexity: 46.580 speed: 9518 wps
# 0.304 perplexity: 44.625 speed: 9569 wps
# 0.404 perplexity: 43.832 speed: 9576 wps
# 0.504 perplexity: 43.153 speed: 9571 wps
# 0.604 perplexity: 41.761 speed: 9557 wps
# 0.703 perplexity: 41.159 speed: 9524 wps
# 0.803 perplexity: 40.494 speed: 9527 wps
# 0.903 perplexity: 39.111 speed: 9558 wps
# Epoch: 10 Train Perplexity: 38.298
# Epoch: 10 Valid Perplexity: 123.658
# Epoch: 11 Learning rate: 0.016
# 0.004 perplexity: 56.238 speed: 7190 wps
# 0.104 perplexity: 41.771 speed: 9171 wps
# 0.204 perplexity: 45.656 speed: 9415 wps
# 0.304 perplexity: 43.719 speed: 9472 wps
# 0.404 perplexity: 42.941 speed: 9483 wps
# 0.504 perplexity: 42.269 speed: 9494 wps
# 0.604 perplexity: 40.903 speed: 9530 wps
# 0.703 perplexity: 40.314 speed: 9545 wps
# 0.803 perplexity: 39.654 speed: 9580 wps
# 0.903 perplexity: 38.287 speed: 9597 wps
# Epoch: 11 Train Perplexity: 37.477
# Epoch: 11 Valid Perplexity: 123.523
# Epoch: 12 Learning rate: 0.008
# 0.004 perplexity: 55.552 speed: 7317 wps
# 0.104 perplexity: 41.267 speed: 9234 wps
# 0.204 perplexity: 45.119 speed: 9461 wps
# 0.304 perplexity: 43.204 speed: 9519 wps
# 0.404 perplexity: 42.441 speed: 9453 wps
# 0.504 perplexity: 41.773 speed: 9536 wps
# 0.604 perplexity: 40.423 speed: 9555 wps
# 0.703 perplexity: 39.836 speed: 9576 wps
# 0.803 perplexity: 39.181 speed: 9579 wps
# 0.903 perplexity: 37.827 speed: 9554 wps
# Epoch: 12 Train Perplexity: 37.020
# Epoch: 12 Valid Perplexity: 123.192
# Epoch: 13 Learning rate: 0.004
# 0.004 perplexity: 55.124 speed: 8234 wps
# 0.104 perplexity: 40.970 speed: 9391 wps
# 0.204 perplexity: 44.804 speed: 9525 wps
# 0.304 perplexity: 42.912 speed: 9512 wps
# 0.404 perplexity: 42.162 speed: 9536 wps
# 0.504 perplexity: 41.500 speed: 9630 wps
# 0.604 perplexity: 40.159 speed: 9591 wps
# 0.703 perplexity: 39.574 speed: 9575 wps
# 0.803 perplexity: 38.921 speed: 9613 wps
# 0.903 perplexity: 37.575 speed: 9629 wps
# Epoch: 13 Train Perplexity: 36.771
# Epoch: 13 Valid Perplexity: 122.917
# Evaluation
# Test Perplexity: 116.723 took 124.06s
# MediumConfig
# Epoch: 1 Learning rate: 1.000
# 0.008 perplexity: 5173.547 speed: 6469 wps
# 0.107 perplexity: 1219.527 speed: 6453 wps
# 0.206 perplexity: 866.163 speed: 6441 wps
# 0.306 perplexity: 695.163 speed: 6428 wps
# 0.405 perplexity: 598.464 speed: 6420 wps
# 0.505 perplexity: 531.875 speed: 6422 wps
# 0.604 perplexity: 477.079 speed: 6425 wps
# 0.704 perplexity: 438.297 speed: 6428 wps
# 0.803 perplexity: 407.928 speed: 6425 wps
# 0.903 perplexity: 381.264 speed: 6429 wps
# Epoch: 1 Train Perplexity: 360.795
# Epoch: 1 Valid Perplexity: 208.854
# ...
# Epoch: 39 Learning rate: 0.001
# 0.008 perplexity: 56.618 speed: 6357 wps
# 0.107 perplexity: 43.375 speed: 6341 wps
# 0.206 perplexity: 47.873 speed: 6336 wps
# 0.306 perplexity: 46.408 speed: 6337 wps
# 0.405 perplexity: 46.327 speed: 6337 wps
# 0.505 perplexity: 46.115 speed: 6335 wps
# 0.604 perplexity: 45.323 speed: 6336 wps
# 0.704 perplexity: 45.286 speed: 6337 wps
# 0.803 perplexity: 45.174 speed: 6336 wps
# 0.903 perplexity: 44.334 speed: 6336 wps
# Epoch: 39 Train Perplexity: 44.021
# Epoch: 39 Valid Perplexity: 87.516
# Evaluation
# Test Perplexity: 83.858 took 167.58s
| zsdonghao/tensorlayer | examples/text_ptb/tutorial_ptb_lstm.py | Python | apache-2.0 | 20,514 |
import math
from src.course4.week3.tsp_nearest_neighbor import traveling_salesman_problem
def test_tsp_50_cities():
points = []
with open('src/course4/week3_nn.txt') as handle:
handle.readline()
n = 0
for line in handle:
index, x, y = line.split()
points.append((float(x), float(y), int(index)))
n += 1
if n == 50:
break
distance = traveling_salesman_problem(points)
assert math.floor(distance) == 2470
def test_tsp_1000_cities():
points = []
with open('src/course4/week3_nn.txt') as handle:
handle.readline()
n = 0
for line in handle:
index, x, y = line.split()
points.append((float(x), float(y), int(index)))
n += 1
if n == 1000:
break
distance = traveling_salesman_problem(points)
assert math.floor(distance) == 48581
def test_tsp_1000_cities_unsorted():
points = []
with open('src/course4/week3_nn_unsorted.txt') as handle:
handle.readline()
for line in handle:
index, x, y = line.split()
points.append((float(x), float(y), int(index)))
distance = traveling_salesman_problem(points)
assert math.floor(distance) == 29777
| manoldonev/algo1-assignments | src/course4/week3/tests/test_tsp_nearest_neighbor.py | Python | mit | 1,299 |
"""Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import in1d
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
| rishikksh20/scikit-learn | sklearn/model_selection/tests/test_search.py | Python | bsd-3-clause | 51,803 |
n=int(input())
for i in range(n):
a=input()
a=a.split(" ")
a[0]="{0:0=2d}".format(int(a[0]))
a[1]="{0:0=2d}".format(int(a[1]))
if a[2]=="1":
print(a[0]+":"+a[1]+" - A porta abriu!")
else:
print(a[0]+":"+a[1]+" - A porta fechou!")
| h31nr1ch/Mirrors | c/OtherProblems/pepeJaTireiAVela-2152.py | Python | gpl-3.0 | 270 |
'''
Created on 29.07.2013
@author: mhoyer
'''
from mysqldb import MysqlDB
from local_system import LocalSystem
from remote_system import RemoteSystem
from entities import Application
import logging
import util
class Actionmanager():
'''
classdocs
'''
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config.get_config_list()
self.app_config = config.get_applications_list()
self.db = MysqlDB(config)
self.system = LocalSystem(config)
self.remotesystem = RemoteSystem(config)
def replicate_all(self):
for element in self.app_config:
app = Application(element)
if app.slave_node:
self.logger.info("replicating %s" % app.name)
self.replicate(app)
self.logger.info("Replication completed successfully")
def replicate_single(self, app_name):
app = None
# iterate over app config and load app object if there is a matching name
for item in self.app_config:
if item["name"] == app_name:
app = Application(item)
if app:
if app.slave_node:
self.replicate(app)
else:
self.logger.warning("Application has no slave node configured")
raise Exception("Configuration Error")
else:
self.logger.error("No application configured with name: " + app_name)
raise Exception("Configuration Error")
self.logger.info("Replication completed successfully")
def backup_all(self):
for element in self.app_config:
app = Application(element)
self.logger.info("saving %s" % app.name)
self.backup(app)
self.logger.info("Backup completed successfully")
def backup_single(self, app_name):
app = None
# iterate over app config and load app object if there is a matching name
for item in self.app_config:
if item["name"] == app_name:
app = Application(item)
if app:
self.backup(app)
else:
self.logger.error("No application configured with name: " + app_name)
raise Exception("Configuration Error")
self.logger.info("Backup completed successfully")
def replicate(self, app):
try:
# prepare replicator temp folder for the target node
self.system.prepare_application_dirs()
self.remotesystem.prepare_application_dirs(app.slave_node)
if app.packages:
self.logger.debug("ensuring packages installed: %s" % ', '.join(app.packages))
self.remotesystem.install(app.slave_node, app.packages)
if app.databases:
for database in app.databases:
self.logger.debug("replicating database: %s" % database)
self.db.replicate_database(database, app.slave_node)
if app.files:
for afile in app.files:
self.logger.debug("replicating file: %s" % afile)
self.remotesystem.transfer_single_file(app.slave_node, afile, afile)
if app.folders:
for afolder in app.folders:
self.logger.debug("replicating folder: %s" % afolder)
self.remotesystem.transfer_folder(app.slave_node, afolder, afolder)
# reload needed services
if app.needed_services:
for service in app.needed_services:
self.logger.debug("reloading service %s on %s" % (service,app.slave_node))
self.remotesystem.reload_service(app.slave_node, service)
self.remotesystem.prepare_application_dirs(app.slave_node)
# test availability
if app.url:
return self.remotesystem.test_availability(app.slave_node, 80, app.url)
except Exception as e:
self.logger.error("Stopping after error: " + str(e))
raise Exception("Error replicating " + app.name)
def backup(self, app):
# define path
app_temp_path = util.path_append([self.system.temp_path,app.name])
db_temp_path = util.path_append([app_temp_path,"databases"])
file_temp_path = util.path_append([app_temp_path,"files"])
# clear and prepare temp directories
self.system.prepare_application_dirs()
self.system.clear_folder(app_temp_path)
self.system.clear_folder(db_temp_path)
self.system.clear_folder(file_temp_path)
try:
# backup all components of the application
if app.databases:
for database in app.databases:
self.logger.debug("saving database: %s" % database)
self.db.dump_database(database, util.path_append([db_temp_path ,database + ".sql"]))
if app.files:
for afile in app.files:
self.logger.debug("saving file: %s" % afile)
self.system.mkdir(util.get_folder_from_path( util.path_append([file_temp_path, afile]) ), True)
self.system.cp(afile, util.path_append([file_temp_path, afile]), False)
if app.folders:
for folder in app.folders:
self.logger.debug("saving folder: %s" % folder)
self.system.mkdir(util.path_append([file_temp_path, folder]), True)
self.system.cp(folder, util.path_append([file_temp_path, folder]), True)
# write package list
self.system.write_package_list(util.path_append([app_temp_path, "package_list.txt"]))
# save compressed backup of application data
backup_file = util.path_append([self.system.backup_path, app.name + "_" + util.get_timestamp(), ".tar.gz"])
self.logger.debug("Saving compressed backup to: %s" % backup_file)
self.system.compress(app_temp_path, backup_file)
self.system.rm(app_temp_path, True)
except Exception as e:
self.logger.error("Stopping after error: " + str(e))
raise Exception("Error saving " + app.name) | marco-hoyer/replicator | src/main/python/replicator/actionmanager.py | Python | gpl-2.0 | 6,373 |
"""
Unit tests for resdk/resources/data.py file.
"""
import unittest
from mock import MagicMock, patch
from resdk.resources.data import Data
from resdk.resources.descriptor import DescriptorSchema
from resdk.resources.process import Process
class TestData(unittest.TestCase):
def test_sample(self):
data = Data(resolwe=MagicMock(), id=1)
data._original_values = {"entity": {"id": 5, "name": "XYZ"}}
self.assertEqual(data.sample.id, 5)
self.assertEqual(data.sample.name, "XYZ")
def test_collection(self):
data = Data(resolwe=MagicMock(), id=1, collection={"id": 5, "name": "XYZ"})
# test getting collections attribute
self.assertEqual(data.collection.id, 5)
self.assertEqual(data.collection.name, "XYZ")
def test_descriptor_schema(self):
resolwe = MagicMock()
data = Data(id=1, resolwe=resolwe)
data._descriptor_schema = DescriptorSchema(resolwe=resolwe, id=2)
# test getting descriptor schema attribute
self.assertEqual(data.descriptor_schema.id, 2)
# descriptor schema is not set
data._descriptor_schema = None
self.assertEqual(data.descriptor_schema, None)
# hydrated descriptor schema
descriptor_schema = {
"slug": "test-schema",
"name": "Test schema",
"version": "1.0.0",
"schema": [
{
"default": "56G",
"type": "basic:string:",
"name": "description",
"label": "Object description",
}
],
"id": 1,
}
data = Data(id=1, descriptor_schema=descriptor_schema, resolwe=MagicMock())
self.assertTrue(isinstance(data.descriptor_schema, DescriptorSchema))
self.assertEqual(data.descriptor_schema.slug, "test-schema")
self.assertEqual(
data.descriptor_schema.schema[0]["label"], "Object description"
)
def test_parents(self):
# Data with no id should fail.
data = Data(id=None, resolwe=MagicMock())
with self.assertRaisesRegex(ValueError, "Instance must be saved *"):
data.parents
# Core functionality should be checked with e2e tests.
# Check that cache is cleared at update.
data = Data(id=42, resolwe=MagicMock())
data._parents = "foo"
data.update()
self.assertEqual(data._parents, None)
def test_children(self):
# Data with no id should fail.
data = Data(id=None, resolwe=MagicMock())
with self.assertRaisesRegex(ValueError, "Instance must be saved *"):
data.children
# Core functionality should be checked with e2e tests.
# Check that cache is cleared at update.
data = Data(id=42, resolwe=MagicMock())
data._children = "foo"
data.update()
self.assertEqual(data._children, None)
def test_files(self):
resolwe = MagicMock()
data = Data(id=123, resolwe=resolwe)
data._get_dir_files = MagicMock(
side_effect=[["first_dir/file1.txt"], ["fastq_dir/file2.txt"]]
)
data.output = {
"list": [{"file": "element.gz"}],
"dir_list": [{"dir": "first_dir"}],
"fastq": {"file": "file.fastq.gz"},
"fastq_archive": {"file": "archive.gz"},
"fastq_dir": {"dir": "fastq_dir"},
}
data.process = Process(
resolwe=resolwe,
output_schema=[
{"name": "list", "label": "List", "type": "list:basic:file:"},
{"name": "dir_list", "label": "Dir_list", "type": "list:basic:dir:"},
{"name": "fastq", "label": "Fastq", "type": "basic:file:fastq:"},
{
"name": "fastq_archive",
"label": "Fastq_archive",
"type": "basic:file:",
},
{"name": "fastq_dir", "label": "Fastq_dir", "type": "basic:dir:"},
],
)
file_list = data.files()
self.assertCountEqual(
file_list,
[
"element.gz",
"archive.gz",
"file.fastq.gz",
"first_dir/file1.txt",
"fastq_dir/file2.txt",
],
)
file_list = data.files(file_name="element.gz")
self.assertEqual(file_list, ["element.gz"])
file_list = data.files(field_name="output.fastq")
self.assertEqual(file_list, ["file.fastq.gz"])
data.output = {
"list": [{"no_file_field_here": "element.gz"}],
}
data.process.output_schema = [
{"name": "list", "label": "List", "type": "list:basic:file:"},
]
with self.assertRaisesRegex(KeyError, "does not contain 'file' key."):
data.files()
data = Data(resolwe=MagicMock(), id=None)
with self.assertRaisesRegex(ValueError, "must be saved before"):
data.files()
@patch("resdk.resolwe.Resolwe")
def test_dir_files(self, resolwe_mock):
resolwe_mock.url = "http://resolwe.url"
resolwe_mock.session.get.side_effect = [
MagicMock(
content=b'[{"type": "file", "name": "file1.txt"}, '
b'{"type": "directory", "name": "subdir"}]'
),
MagicMock(content=b'[{"type": "file", "name": "file2.txt"}]'),
]
data = Data(id=123, resolwe=resolwe_mock)
files = data._get_dir_files("test_dir")
self.assertEqual(files, ["test_dir/file1.txt", "test_dir/subdir/file2.txt"])
@patch("resdk.resources.data.Data", spec=True)
def test_download_fail(self, data_mock):
message = "Only one of file_name or field_name may be given."
with self.assertRaisesRegex(ValueError, message):
Data.download(data_mock, file_name="a", field_name="b")
@patch("resdk.resources.data.Data", spec=True)
def test_download_ok(self, data_mock):
data_mock.configure_mock(id=123, **{"resolwe": MagicMock()})
data_mock.configure_mock(
**{
"files.return_value": ["file1.txt", "file2.fq.gz"],
}
)
Data.download(data_mock)
data_mock.resolwe._download_files.assert_called_once_with(
["123/file1.txt", "123/file2.fq.gz"], None
)
data_mock.reset_mock()
Data.download(data_mock, download_dir="/some/path/")
data_mock.resolwe._download_files.assert_called_once_with(
["123/file1.txt", "123/file2.fq.gz"], "/some/path/"
)
@patch("resdk.resolwe.Resolwe")
@patch("resdk.resources.data.urljoin")
@patch("resdk.resources.data.Data", spec=True)
def test_stdout_ok(self, data_mock, urljoin_mock, resolwe_mock):
# Configure mocks:
process_mock = MagicMock(type="data:index")
data_mock.configure_mock(
id=123, status="OK", resolwe=resolwe_mock, process=process_mock
)
urljoin_mock.return_value = "some_url"
resolwe_mock.configure_mock(url="a", auth="b")
# If response.ok = True:
resolwe_mock.session.get.return_value = MagicMock(
ok=True, **{"iter_content.return_value": [b"abc", b"def"]}
)
out = Data.stdout(data_mock)
self.assertEqual(out, "abcdef")
urljoin_mock.assert_called_once_with("a", "data/123/stdout.txt")
resolwe_mock.session.get.assert_called_once_with(
"some_url", stream=True, auth="b"
)
# If response.ok = False:
response = MagicMock(ok=False)
resolwe_mock.session.get.return_value = response
out = Data.stdout(data_mock)
self.assertEqual(response.raise_for_status.call_count, 1)
if __name__ == "__main__":
unittest.main()
| genialis/resolwe-bio-py | tests/unit/test_data.py | Python | apache-2.0 | 7,918 |
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
if hasattr(settings, 'REGISTRATION_DEFAULT_GROUP_NAME'):
user.groups.add(Group.objects.get(name=settings.REGISTRATION_DEFAULT_GROUP_NAME))
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except User.DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
| austinhappel/django-registration | registration/models.py | Python | bsd-3-clause | 10,586 |
import urllib
from zerver.lib.test_classes import WebhookTestCase
class TravisHookTests(WebhookTestCase):
STREAM_NAME = 'travis'
URL_TEMPLATE = "/api/v1/external/travis?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'travis'
TOPIC = 'builds'
def test_travis_message(self) -> None:
"""
Build notifications are generated by Travis after build completes.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
expected_message = ("Author: josh_mandel\nBuild status: Passed :thumbs_up:\n"
"Details: [changes](https://github.com/hl7-fhir/fhir-sv"
"n/compare/6dccb98bcfd9...6c457d366a31), [build log](ht"
"tps://travis-ci.org/hl7-fhir/fhir-svn/builds/92495257)")
self.check_webhook(
"build", self.TOPIC, expected_message, content_type="application/x-www-form-urlencoded",
)
def test_ignore_travis_pull_request_by_default(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(
self.url,
self.get_body('pull_request'),
content_type="application/x-www-form-urlencoded",
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertNotEqual(msg.topic_name(), self.TOPIC)
def test_travis_pull_requests_are_not_ignored_when_applicable(self) -> None:
self.url = f"{self.build_webhook_url()}&ignore_pull_requests=false"
expected_message = ("Author: josh_mandel\nBuild status: Passed :thumbs_up:\n"
"Details: [changes](https://github.com/hl7-fhir/fhir-sv"
"n/compare/6dccb98bcfd9...6c457d366a31), [build log](ht"
"tps://travis-ci.org/hl7-fhir/fhir-svn/builds/92495257)")
self.check_webhook(
"pull_request",
self.TOPIC,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def get_body(self, fixture_name: str) -> str:
return urllib.parse.urlencode({'payload': self.webhook_fixture_data("travis", fixture_name, file_type="json")})
| showell/zulip | zerver/webhooks/travis/tests.py | Python | apache-2.0 | 2,291 |
# manage.py
import os
import unittest
import coverage
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/server/config.py',
'project/server/*/__init__.py'
]
)
COV.start()
from project.server import app, db
from project.server.models import User
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User(email='[email protected]', password='admin', admin=True))
db.session.commit()
@manager.command
def create_data():
"""Creates sample data."""
pass
if __name__ == '__main__':
manager.run()
| kangusrm/XMLFeed | manage.py | Python | mit | 1,869 |
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Handlers for server-side data table paging for administration pages.
These handlers are designed to work with the jQuery Datatable plugin (http://datatables.net/)
"""
__author__ = '[email protected] (Matt Tracy)'
import base64
import json
import logging
from collections import namedtuple
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.www.admin import admin
# Named tuple to hold a table page request from the jQuery datatable plugin.
_TablePageRequest = namedtuple('_TablePageRequest', ['table', 'op_type', 'start', 'length', 'last_key', 'echo'])
class AdminDataTableHandler(admin.AdminHandler):
"""Base data table handler - provides support methods to interact
with the jQuery data table plug-in, providing read-forward access to
supported data tables.
"""
def _GetCookieName(self):
"""Returns a cookie name, which is currently based on the base class type name."""
return type(self).__name__ + '_last_key'
def ReadTablePageRequest(self, table_name, op_type=None):
"""Formats the request data received from jQuery data table. The 'table_name'
parameter is required if a single handler can query more than one data table.
"""
requested_start = int(self.get_argument('iDisplayStart'))
requested_length = int(self.get_argument('iDisplayLength'))
s_echo = self.get_argument('sEcho')
# Load last key, which is stored in a cookie
cookie = self.get_secure_cookie(self._GetCookieName())
try:
last_table, op_type, last_index, last_key = json.loads(cookie)
if last_table != table_name or last_index != requested_start:
last_key = None
requested_start = 0
except:
logging.warn('Bad cookie value: %s = %s' % (self._GetCookieName(), cookie))
self.clear_cookie(self._GetCookieName())
last_key = None
requested_start = 0
if last_key:
# Convert last key back into DBKey - this is lost in json serialization.
last_key = DBKey(last_key[0], last_key[1])
self._table_request = _TablePageRequest(table=table_name, op_type=op_type, start=requested_start,
length=requested_length, last_key=last_key, echo=s_echo)
return self._table_request
def WriteTablePageResponse(self, rows, last_key, table_count=None):
"""Writes the appropriate json response and tracking cookie."""
req = self._table_request
last_index = req.start + len(rows)
if table_count:
table_count = max(table_count, last_index)
elif len(rows) == req.length:
# There may be additional rows - this tricks the jquery data table into displaying a 'Next' button anyway.
table_count = last_index + 1
else:
table_count = last_index
json_dict = {
'sEcho': int(req.echo),
'iDisplayStart': req.start,
'iTotalRecords': table_count,
'iTotalDisplayRecords': table_count,
'aaData': rows,
}
cookie = json.dumps((req.table, req.op_type, last_index, last_key))
self.set_secure_cookie(self._GetCookieName(), cookie)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(json_dict)
self.finish()
| 0359xiaodong/viewfinder | backend/www/admin/data_table.py | Python | apache-2.0 | 3,239 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 12 11:50:27 2019
@author: Eoin Elliott
A module for easy access to example data for testing analysis codes.
Numpy files are loaded here
access the data by
nplab.analysis.example_data.SERS_and_shifts
for example.
"""
import numpy as np
import os
# Example SERS spectrum (BPT, 785nm laser, centered at 785nm)
SERS_and_shifts = np.load(os.path.join(os.path.dirname(__file__), 'example_SERS_and_shifts.npy'))
#
| nanophotonics/nplab | nplab/analysis/example_data/__init__.py | Python | gpl-3.0 | 467 |
from behave import *
from nose.tools import assert_in
from webium.driver import get_driver
import pages
PAGES_MAP = {
'Main': pages.MainPage,
'Login': pages.LoginPage,
}
@when("I open {page_name} page")
@step("I am on {page_name} page")
def step_impl(context, page_name):
context.page_name = page_name
page = PAGES_MAP[page_name]
context.page = page(url=''.join([context.app_url, page.url_path]))
context.page.open()
@then("I want to see {page_name} page")
def step_impl(context, page_name):
page = PAGES_MAP[page_name]
context.page = page()
if hasattr(context.page, 'default_wait'):
context.page.default_wait()
assert_in(page.url_path, get_driver().current_url)
| ShinKaiRyuu/Python-testing-template | features/steps/navigation_steps.py | Python | mit | 721 |
from django.shortcuts import render_to_response
from django.http import HttpResponse
# Create your views here.
def index(request):
return render_to_response('index.html') | kostya9/KPI.RelationalDatabases | Lab3/flights/core/views.py | Python | mit | 176 |
# Autor: Alexander Herbrich
# Wann: 03.02.2016
# Thema: Programm zum loesen des magischen Quadrats erstellen
def quadrat():
# DIESES PRORAMM ENTHAELT NOCH FEHLER!!!!!
# A + B + C = S
# D + M + d = S
# a + b + c = S
# A + D + a = S
# B + M + b = S
# C + d + c = S
# A + M + c = S
# C + M + a = S
MAX = 21
ianz = 0
ianz2 = 0
A = 0
C = 0
B = 0
S = 0
M = 0
D = 0
d = 0
a = 0
b = 0
c = 0
for C in range (2 , MAX):
for B in range (2 , MAX):
if (C == B) :
continue
for A in range (2 , MAX):
if (A == B) or (A == C) :
continue
S = A + B + C
if (S % 3) != 0:
continue
M = S // 3
ianz += 1
if M not in range (2, 21):
continue
a = S - M - A
if (a < 1) or (a == A) or (a == B) or (a == C) or (a == M):
continue
b = S - M - B
if (b < 1) or (b == A) or (b == B) or (b == C) or (b == M) or (b == a):
continue
c = S - M - C
if (c < 1) or (c == A) or (c == B) or (c == C) or (c == M) or (c == a) or (c == b):
continue
D = S - c - A
if (D < 1) or (D == A) or (D == B) or (D == C) or (D == M) or (D == a) or (D == b) or (D == c):
continue
d = S - D - M
if (d < 1) or (d == A) or (d == B) or (d == C) or (d == M) or (d == a) or (d == b) or (d == c) or (d == D):
continue
if ((d+C+a) != S):
print ("nicht zulaessig1...","|",A,B,C,"|",D,M,d,"|",c,b,a,"|",S)
continue
if ((b+c+a) != S):
print ("nicht zulaessig2...","|",A,B,C,"|",D,M,d,"|",c,b,a,"|",S)
continue
ianz2 += 1
print ("\n",A,B,C,"\n",D,M,d,"\n",c,b,a,"\n\tSumme: ",S,"\n")
# print("\tA =%4i \tB =%4i \tC =%4i \tD =%4i \tM =%4i \td =%4i \tc =%4i \tb =%4i \ta =%4i \tS =%4i " % (A,B,C,D,M,d,c,b,a,S))
# print ("\tA=" + str(A) + "\tB=" + str(B) + "\tC=" + str(C) + "\tM=" + str(M) + "\tS=" + str(S))
print ("Anzahl:" + str(ianz) + "\tAnzahl2:" + str(ianz2))
quadrat()
| rherbrich74/FirstPythonSteps | Quadrat.py | Python | apache-2.0 | 2,362 |
# Higgins - A multi-media server
# Copyright (c) 2007-2009 Michael Frank <[email protected]>
#
# This program is free software; for license information see
# the COPYING file.
import random, string, urllib
from twisted.internet.defer import maybeDeferred
from xml.etree.ElementTree import Element, SubElement
from higgins.service import Service
from higgins.conf import conf
from higgins.upnp.device_service import UPNPDeviceService
from higgins.upnp.prettyprint import xmlprint
from higgins.upnp.logger import logger
class DeviceDeclarativeParser(type):
def __new__(cls, name, bases, attrs):
# TODO: verify required service attributes
# create UDN if needed
udn_conf_key = "UPNP_" + name + "_UDN"
udn = conf.get(udn_conf_key)
if udn == None:
udn = ''.join(map(lambda x: random.choice(string.letters), xrange(20)))
conf[udn_conf_key] = udn
attrs['UDN'] = udn
logger.log_debug("UDN for %s is %s" % (name, udn))
# load services
services = {}
for key,svc in attrs.items():
if isinstance(svc, UPNPDeviceService):
# add the service back-reference for each StateVar and Action
for statevar in svc._stateVars.values():
statevar.service = svc
for action in svc._actions.values():
action.service = svc
services[svc.serviceID] = svc
attrs['_services'] = services
return super(DeviceDeclarativeParser,cls).__new__(cls, name, bases, attrs)
class UPNPDevice(Service):
__metaclass__ = DeviceDeclarativeParser
manufacturer = "Higgins Project"
manufacturerURL = "http://syntaxjockey.com/higgins"
modelName = "Higgins UPnP Device"
modelDescription = "Higgins UPnP Device"
modelURL = "http://syntaxjockey.com/higgins"
modelNumber = None
serialNumber = None
deviceName = None
deviceType = None
friendlyName = None
UDN = None
def startService(self):
Service.startService(self)
def stopService(self):
return maybeDeferred(Service.stopService, self)
def getDescription(self, host, relativeUrls=False):
root = Element("root")
root.attrib['xmlns'] = 'urn:schemas-upnp-org:device-1-0'
version = SubElement(root, "specVersion")
SubElement(version, "major").text = "1"
SubElement(version, "minor").text = "0"
device = SubElement(root, "device")
SubElement(device, "deviceType").text = self.deviceType
SubElement(device, "friendlyName").text = self.friendlyName
SubElement(device, "manufacturer").text = self.manufacturer
SubElement(device, "UDN").text = "uuid:%s" % self.UDN
SubElement(device, "modelName").text = self.modelName
if self.manufacturerURL:
SubElement(device, "manufacturerURL").text = self.manufacturerURL
if self.modelDescription:
SubElement(device, "modelDescription").text = self.modelDescription
if self.modelURL:
SubElement(device, "modelURL").text = self.modelURL
if self.modelNumber:
SubElement(device, "modelNumber").text = self.modelNumber
if self.serialNumber:
SubElement(device, "serialNumber").text = self.serialNumber
if relativeUrls:
urlbase = ''
SubElement(device, "URLBase").text = "http://%s" % host
else:
urlbase = "http://%s" % host
svc_list = SubElement(device, "serviceList")
for svc in self._services.values():
service = SubElement(svc_list, "service")
SubElement(service, "serviceType").text = svc.serviceType
SubElement(service, "serviceId").text = svc.serviceID
SubElement(service, "SCPDURL").text = "%s/%s/%s" % (
urlbase,
self.UDN.replace(':', '_'),
svc.serviceID.replace(':', '_')
)
SubElement(service, "controlURL").text = "%s/%s/%s/control" % (
urlbase,
self.UDN.replace(':', '_'),
svc.serviceID.replace(':', '_')
)
SubElement(service, "eventSubURL").text = "%s/%s/%s/event" % (
urlbase,
self.UDN.replace(':', '_'),
svc.serviceID.replace(':', '_')
)
return xmlprint(root)
def __str__(self):
return self.UDN
# Define the public API
__all__ = ['UPNPDevice',]
| msfrank/Higgins | higgins/upnp/device.py | Python | lgpl-2.1 | 4,547 |
"""
Copyright (c) 2007-2008, Dj Gilcrease
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from base import Job, cronScheduler, HOUR, DAY, WEEK, MONTH # noqa
def autodiscover(start_timer=True, registering=True):
"""
Auto-discover INSTALLED_APPS cron.py modules and fail silently when
not present. This forces an import on them to register any cron jobs they
may want.
"""
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an cron.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for cron.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own cron registration.
try:
app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
imp.find_module('cron', app_path)
except ImportError:
continue
# Step 3: import the app's cron file. If this has errors we want them
# to bubble up.
__import__("%s.cron" % app)
# Step 4: once we find all the cron jobs, start the cronScheduler
cronScheduler.execute(start_timer=start_timer, registering=registering)
| Ixxy-Open-Source/django-cron | django_cron/__init__.py | Python | mit | 2,868 |
#Copyright 2010, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
# ########################## ACTUATOR_EC A2.R1.J0-J6 ########################################
config_arm_a2r1_actuator_ec={
'chid': 0,
'ethercat': {'pdo_version': 'actx1_pdo_v1',
'product_code': 1010,
'serial_number': 0},
'name': 'm3actuator_ec_xxx_j0',
'param': {'config': 2,
'k_d': 0,
'k_d_shift': 0,
'k_ff': 0,
'k_ff_shift': 0,
'k_ff_zero': 0,
'k_i': 0,
'k_i_limit': 0,
'k_i_shift': 0,
'k_p': 0,
'k_p_shift': 0,
'pwm_db': 0,
'pwm_max': 3186,
'qei_max': 0,
'qei_min': 0,
't_max': 0,
't_min': 0},
'pwr_component': 'm3pwr_pwr000'}
config_arm_a2r1_actuator_ec_j0=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j1=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j2=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j3=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j4=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j5=config_arm_a2r1_actuator_ec
config_arm_a2r1_actuator_ec_j6=config_arm_a2r1_actuator_ec
# ########################## ACTUATOR A2.R1.J0 ########################################
config_arm_a2r1_actuator_j0={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J1 ########################################
config_arm_a2r1_actuator_j1={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J2 ########################################
config_arm_a2r1_actuator_j2={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J3 ########################################
config_arm_a2r1_actuator_j3={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J4 ########################################
config_arm_a2r1_actuator_j4={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J5 ########################################
config_arm_a2r1_actuator_j5={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## ACTUATOR A2.R1.J6 ########################################
config_arm_a2r1_actuator_j6={
'calib': {'amp_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Microchip TC1047',
'type': 'adc_linear_5V'},
'angle_df': {'theta_df': {'cutoff_freq': 80,
'order': 3,
'type': 'butterworth'},
'thetadot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'thetadotdot_df': {'cutoff_freq': 20,
'order': 3,
'type': 'diff_butterworth'},
'type': 'df_chain'},
'current': {'cb_bias': 0.0,
'cb_mV_per_A': 100.0,
'cb_scale': 1.0,
'cb_ticks_at_zero_a': 2048,
'cb_ticks_at_zero_b': 2048,
'name': 'Allegro ACS712-20',
'type': 'adc_linear_5V'},
'motor': {'gear_ratio': 120.0,
'max_winding_temp': 155,
'name': 'Maxon RE40 150W 24V',
'thermal_resistance_housing_ambient': 4.7000000000000002,
'thermal_resistance_rotor_housing': 1.8999999999999999,
'thermal_time_constant_winding': 41.0,
'winding_resistance': 0.316},
'motor_temp': {'cb_bias': 0.0,
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'name': 'Analog TMP36',
'type': 'adc_linear_3V3'},
'theta': {'cb_bias': 0.0,
'cb_scale': 1.0,
'name': 'ContElec VertX13',
'type': 'vertx_14bit'},
'torque': {'cb_bias': 0.0,
'cb_inv_torque': [1.0,0.0],
'cb_scale': 1.0,
'cb_torque': [1.0,0.0],
'name': 'ContElec VertX13',
'type': 'sea_vertx_14bit'},
'torquedot_df': {'cutoff_freq': 100,
'order': 3,
'type': 'diff_butterworth'}},
'description': 'max2_v0.1_seax2_v1.1',
'ec_component': 'm3actuator_ec_xxx_jx',
'ignore_bounds': 0,
'joint_component': 'm3joint_xxx_jx',
'name': 'm3actuator_xxx_jx',
'param': {'max_amp_temp': 100.0,
'max_current': 12000,
'max_motor_temp': 145.0,
'max_tq': 40000.0,
'min_tq': -40000.0,
'thetadot_deadband': 2.0}}
# ########################## JOINT A2.R1.J0-J4 ########################################
config_arm_a2r1_joint_j0={
'actuator_component': 'm3actuator_xxx_j0',
'name': 'm3joint_xxx_j0',
'param': {'kq_d': 0.0,
'kq_g': 1.0,
'kq_i': 0.0,
'kq_i_limit': 0.0,
'kq_i_range': 0.0,
'kq_p': 0.0,
'kt_d': 0.0,
'kt_i': 0.0,
'kt_i_limit': 0.0,
'kt_i_range': 0.0,
'kt_p': 0.0,
'max_q': 360.0,
'max_q_pad': 0.0,
'max_q_slew_rate': 25.0,
'min_q': 0.0,
'min_q_pad': 0.0},
'transmission':{'act_name': 'm3actuator_xxx_j0',
'qj_to_qa':[1.0],
'qs_to_qj':[1.0],
'tqj_to_tqa':[1.0],
'tqs_to_tqj':[1.0],
'type': 'gear'}}
config_arm_a2r1_joint_j1=config_arm_a2r1_joint_j0
config_arm_a2r1_joint_j2=config_arm_a2r1_joint_j0
config_arm_a2r1_joint_j3=config_arm_a2r1_joint_j0
config_arm_a2r1_joint_j4=config_arm_a2r1_joint_j0
# ########################## JOINT A2.R1.J5 ########################################
config_arm_a2r1_joint_j5={
'actuator_component': 'm3actuator_xxx_j5',
'name': 'm3joint_xxx_j0',
'param': {'kq_d': 0.0,
'kq_g': 1.0,
'kq_i': 0.0,
'kq_i_limit': 0.0,
'kq_i_range': 0.0,
'kq_p': 0.0,
'kt_d': 0.0,
'kt_i': 0.0,
'kt_i_limit': 0.0,
'kt_i_range': 0.0,
'kt_p': 0.0,
'max_q': 360.0,
'max_q_pad': 0.0,
'max_q_slew_rate': 25.0,
'min_q': 0.0,
'min_q_pad': 0.0},
'transmission':{'act_name': 'm3actuator_xxx_j5',
'cpj_name': 'm3joint_xxx_j6',
'qj_to_qa':[2.0,2.0],
'qs_to_qj':[1.0,0.0],
'tqj_to_tqa':[0.25,0.25],
'tqs_to_tqj':[2.0,2.0],
'type': 'differential'}}
# ########################## JOINT A2.R1.J6 ########################################
config_arm_a2r1_joint_j6={
'actuator_component': 'm3actuator_xxx_j6',
'name': 'm3joint_xxx_j0',
'param': {'kq_d': 0.0,
'kq_g': 1.0,
'kq_i': 0.0,
'kq_i_limit': 0.0,
'kq_i_range': 0.0,
'kq_p': 0.0,
'kt_d': 0.0,
'kt_i': 0.0,
'kt_i_limit': 0.0,
'kt_i_range': 0.0,
'kt_p': 0.0,
'max_q': 360.0,
'max_q_pad': 0.0,
'max_q_slew_rate': 25.0,
'min_q': 0.0,
'min_q_pad': 0.0},
'transmission':{'act_name': 'm3actuator_xxx_j6',
'cpj_name': 'm3joint_xxx_j5',
'qj_to_qa':[-2.0,2.0],
'qs_to_qj':[1.0,0.0],
'tqj_to_tqa':[-0.25,0.25],
'tqs_to_tqj':[-2.0,2.0],
'type': 'differential'}}
# ########################## ARM A2.R1 ########################################
config_arm_a2r1={
'name': 'm3arm_xxx',
'ndof': 7,
'limb_name': 'xxx_arm',
'dynamatics_component': 'm3dynamatics_xxx',
'joint_components':{'J0': 'm3joint_xxx_j0',
'J1': 'm3joint_xxx_j1',
'J2': 'm3joint_xxx_j2',
'J3': 'm3joint_xxx_j3',
'J4': 'm3joint_xxx_j4',
'J5': 'm3joint_xxx_j5',
'J6': 'm3joint_xx_j6'}}
# ########################## Dynamatics Right Arm A2.R1 ########################################
config_arm_a2r1_dynamatics_right_arm={
'chain_component': 'm3arm_xxx',
'links': [{'Ixx': 0.00465797,
'Ixy': 4.7899999999999999e-06,
'Ixz': 9.9699999999999994e-06,
'Iyy': 0.00374632,
'Iyz': 2.3819999999999999e-05,
'Izz': 0.0028815500000000001,
'a': 0.0,
'alpha': 90.0,
'cx': 0.0057061799999999999,
'cy': -0.0028646800000000001,
'cz': -0.021885080000000001,
'd': 0.18465000000000001,
'joint_offset': -90,
'm': 1.9910000000000001},
{'Ixx': 0.00199343,
'Ixy': -0.00076418000000000005,
'Ixz': -6.9500000000000004e-06,
'Iyy': 0.00122781,
'Iyz': 1.8309999999999999e-05,
'Izz': 0.0021946800000000001,
'a': 0.0,
'alpha': 90,
'cx': 0.0256435,
'cy': -0.04287067,
'cz': -0.00098167000000000003,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.504},
{'Ixx': 0.02883807,
'Ixy': 2.921e-05,
'Ixz': -0.0016239,
'Iyy': 0.029054839999999998,
'Iyz': -6.9670000000000002e-05,
'Izz': 0.0022021599999999999,
'a': 0.03175,
'alpha': 90.0,
'cx': 0.0052489099999999999,
'cy': 0.00101117,
'cz': -0.08821909,
'd': 0.27865000000000001,
'joint_offset': 90.0,
'm': 2.2724299999999999},
{'Ixx': 0.00056886000000000003,
'Ixy': 2.7000000000000001e-07,
'Ixz': 3.1e-07,
'Iyy': 0.00040234,
'Iyz': 1.065e-05,
'Izz': 0.00035205000000000002,
'a': -0.0063499999999999997,
'alpha': 90,
'cx': -4.0139999999999999e-05,
'cy': 0.02541156,
'cz': -0.002758,
'd': 0,
'joint_offset': 0.0,
'm': 0.19700000000000001},
{'Ixx': 0.018770990000000001,
'Ixy': -4.9999999999999998e-07,
'Ixz': -0.0010734200000000001,
'Iyy': 0.018805539999999999,
'Iyz': 7.5149999999999997e-05,
'Izz': 0.0010869600000000001,
'a': 0.0,
'alpha': -90,
'cx': 0.0078302100000000006,
'cy': -0.00024240000000000001,
'cz': -0.1170596,
'd': 0.27045999999999998,
'joint_offset': 0,
'm': 1.3400000000000001},
{'Ixx': 0.00010964,
'Ixy': -2e-08,
'Ixz': 0.0,
'Iyy': 0.00012383000000000001,
'Iyz': 4.0000000000000001e-08,
'Izz': 7.7459999999999994e-05,
'a': 0.0,
'alpha': 90,
'cx': 9.8720000000000003e-05,
'cy': -0.00016891000000000001,
'cz': -0.0012537,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.22500000000000001},
{'Ixx': 0.00010018000000000001,
'Ixy': 8.9999999999999999e-08,
'Ixz': 1.1000000000000001e-07,
'Iyy': 6.0600000000000003e-05,
'Iyz': -5.0200000000000002e-06,
'Izz': 5.838e-05,
'a': 0.0,
'alpha': 90.0,
'cx': -5.8730000000000002e-05,
'cy': -0.011888360000000001,
'cz': 0.0096118499999999999,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.11},
{'a': 0.0,
'alpha': 90.0,
'd': 0.044139999999999999,
'joint_offset': 0.0}],
'name': 'm3dynamatics_xxx',
'ndof': 7,
'param': {'payload_com': [0.0, 0.0, 0.06],
'payload_inertia': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'payload_mass': 0.856,
'use_accelerations': False,
'use_velocities': False}}
# ########################## Dynamatics Left Arm A2.R1 ########################################
config_arm_a2r1_dynamatics_left_arm={
'chain_component': 'm3arm_xxx',
'links': [{'Ixx': 0.00569726,
'Ixy': -2.7000000000000001e-07,
'Ixz': 1.2999999999999999e-05,
'Iyy': 0.0036925999999999999,
'Iyz': -8.2020000000000004e-05,
'Izz': 0.00292092,
'a': 0.0,
'alpha': 90.0,
'cx': 0.00019141000000000001,
'cy': -0.015244890000000001,
'cz': 0.021107089999999998,
'd': -0.18465000000000001,
'joint_offset': -90,
'm': 1.9910000000000001},
{'Ixx': 0.00199343,
'Ixy': 0.00076418000000000005,
'Ixz': -6.9500000000000004e-06,
'Iyy': 0.00122781,
'Iyz': -1.8309999999999999e-05,
'Izz': 0.0021946800000000001,
'a': 0.0,
'alpha': 90,
'cx': -0.0256435,
'cy': -0.04287067,
'cz': 2.268e-05,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.504},
{'Ixx': 0.029535700000000002,
'Ixy': -2.7549999999999999e-05,
'Ixz': -0.0017019400000000001,
'Iyy': 0.029802559999999999,
'Iyz': 6.8159999999999998e-05,
'Izz': 0.0021860899999999999,
'a': -0.03175,
'alpha': 90.0,
'cx': 0.0055088300000000002,
'cy': -0.0014015,
'cz': -0.089679449999999994,
'd': 0.27865000000000001,
'joint_offset': 90.0,
'm': 2.2724299999999999},
{'Ixx': 0.00056886000000000003,
'Ixy': -2.7000000000000001e-07,
'Ixz': 3.1e-07,
'Iyy': 0.00040234,
'Iyz': -1.065e-05,
'Izz': 0.00035205000000000002,
'a': -0.0063499999999999997,
'alpha': 90,
'cx': 4.0139999999999999e-05,
'cy': 0.02541156,
'cz': 0.002758,
'd': 0,
'joint_offset': 0.0,
'm': 0.19700000000000001},
{'Ixx': 0.018770990000000001,
'Ixy': -4.9999999999999998e-07,
'Ixz': -0.0010734200000000001,
'Iyy': 0.018805539999999999,
'Iyz': 7.5149999999999997e-05,
'Izz': 0.0010869600000000001,
'a': 0.0,
'alpha': -90,
'cx': 0.0078302100000000006,
'cy': -0.00024240000000000001,
'cz': -0.1170596,
'd': 0.27045999999999998,
'joint_offset': 0,
'm': 1.3400000000000001},
{'Ixx': 0.00010964,
'Ixy': -2e-08,
'Ixz': 0.0,
'Iyy': 0.00012383000000000001,
'Iyz': 4.0000000000000001e-08,
'Izz': 7.7459999999999994e-05,
'a': 0.0,
'alpha': 90,
'cx': 9.8720000000000003e-05,
'cy': -0.00016891000000000001,
'cz': -0.0012537,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.22500000000000001},
{'Ixx': 0.00010018000000000001,
'Ixy': 8.9999999999999999e-08,
'Ixz': 1.1000000000000001e-07,
'Iyy': 6.0600000000000003e-05,
'Iyz': -5.0200000000000002e-06,
'Izz': 5.838e-05,
'a': 0.0,
'alpha': 90.0,
'cx': -5.8730000000000002e-05,
'cy': -0.011888360000000001,
'cz': 0.0096118499999999999,
'd': 0.0,
'joint_offset': 90.0,
'm': 0.11},
{'a': 0.0,
'alpha': 90.0,
'd': 0.044139999999999999,
'joint_offset': 0.0}],
'name': 'm3dynamatics_xxx',
'ndof': 7,
'param': {'payload_com': [0.0, 0.0, 0.068],
'payload_inertia': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'payload_mass': 0.86,
'use_accelerations': False,
'use_velocities': False}}
# ########################## Payloads A2.R1 ########################################
config_arm_a2r1_payload_h2r1_right_hand_load_cell={
'payload_com': [0.0, 0.0, 0.068],
'payload_inertia': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'payload_mass': 0.856
}
config_arm_a2r1_payload_load_cell={
'payload_com': [0.0, 0.0, 0.008],
'payload_inertia': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'payload_mass': 0.055
}
config_arm_a2r1_payload_none={
'payload_com': [0.0, 0.0, 0.000],
'payload_inertia': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'payload_mass': 0.0
}
# ########################## A2.R1 ########################################
config_full_a2r1={'actuator_ec':[config_arm_a2r1_actuator_ec_j0,
config_arm_a2r1_actuator_ec_j1,
config_arm_a2r1_actuator_ec_j2,
config_arm_a2r1_actuator_ec_j3,
config_arm_a2r1_actuator_ec_j4,
config_arm_a2r1_actuator_ec_j5,
config_arm_a2r1_actuator_ec_j6],
'actuator':[config_arm_a2r1_actuator_j0,
config_arm_a2r1_actuator_j1,
config_arm_a2r1_actuator_j2,
config_arm_a2r1_actuator_j3,
config_arm_a2r1_actuator_j4,
config_arm_a2r1_actuator_j5,
config_arm_a2r1_actuator_j6],
'joint':[config_arm_a2r1_joint_j0,
config_arm_a2r1_joint_j1,
config_arm_a2r1_joint_j2,
config_arm_a2r1_joint_j3,
config_arm_a2r1_joint_j4,
config_arm_a2r1_joint_j5,
config_arm_a2r1_joint_j6],
'dynamatics_right_arm':config_arm_a2r1_dynamatics_right_arm,
'dynamatics_left_arm':config_arm_a2r1_dynamatics_left_arm,
'arm':config_arm_a2r1,
'payloads':{
'h2r1_right_hand_load_cell':config_arm_a2r1_payload_h2r1_right_hand_load_cell,
'load_cell':config_arm_a2r1_payload_load_cell,
'none':config_arm_a2r1_payload_none}}
| CentralLabFacilities/m3meka | python/scripts/m3qa/config_arm_a2r1.py | Python | mit | 38,774 |
import time
import copy
from re import compile as regex
from tek.tools import decode
from tek.errors import InternalError, InvalidInput, MooException
from tek.io.terminal import terminal, ColorString
class UserInputTerminated(MooException):
pass
class InputQueue(list):
delay = None
force_output = False
def push(self, *input):
self.extend(input)
@property
def pop(self):
if self.delay is not None:
time.sleep(self.delay)
return list.pop(self, 0)
@property
def suppress_output(self):
return bool(self) and not self.force_output
input_queue = InputQueue()
def is_digit(arg):
return isinstance(arg, int) or (isinstance(arg, str) and
arg.isdigit())
class UserInput(object):
def __init__(self, text, validator=None, validate=True, newline=True,
single=False, remove_text=False, initial_input=None, **kw):
""" @param remove_text: Clear the prompt lines after input has
been accepted.
@param newline: Print a newline after input has been accepted.
@param initial_input: Text that is inserted into the input
initially. Will be returned as input value.
"""
self._text = text
self._validator = validator
self._do_validate = validate
self._newline = newline
self._single = single
self._remove_text = remove_text
self._initial_input = initial_input
self.__init_attributes()
def __init_attributes(self):
self._input = None
self._args = None
self._setup_validator()
def _setup_validator(self):
pass
@property
def value(self):
return decode(self._input)
@property
def args(self):
return self._args
def read(self):
prompt = self.prompt
if input_queue:
self._synth_input()
else:
clear_count = max(len(prompt) - len(self.fail_prompt), 0)
lower = prompt[clear_count:]
upper = prompt[:clear_count]
if not terminal.locked:
terminal.lock()
if self._remove_text:
terminal.push_lock()
terminal.push(upper)
while not self._read(lower):
lower = self.fail_prompt
if self._remove_text:
terminal.pop_lock()
if self._newline:
terminal.push()
return self.value
def _read(self, prompt):
terminal.push(prompt)
terminal.write(' ')
input = terminal.input(single=self._single,
initial=self._initial_input)
valid = self._do_input(input)
if not valid:
terminal.pop()
return valid
def _synth_input(self):
if not input_queue.suppress_output:
terminal.push(self.prompt)
input = input_queue.pop
if not self._do_input(input):
raise InvalidInput(input)
def _do_input(self, input):
self._input = input
return self._validate()
def _validate(self):
return not (self._do_validate and self._validator and not
self._validator.match(str(self._input)))
@property
def prompt(self):
return self._text
@property
def fail_prompt(self):
return ['Invalid input. Try again:']
class SimpleChoice(UserInput):
def __init__(self, elements, text=[''], additional=[], *a, **kw):
self.text = text
self._elements = list(map(str, elements))
self._additional = list(map(str, additional))
UserInput.__init__(self, [], *a, **kw)
def _setup_validator(self):
self._validator = regex(r'^(%s)$' % '|'.join(self._elements +
self._additional))
@property
def prompt(self):
text = list(self.text)
text[-1] += self.input_hint_string
return text
@property
def fail_prompt(self):
sup = UserInput.fail_prompt.fget(self)
sup[-1] += self.input_hint_string
return sup
@property
def input_hint_string(self):
v = [_f for _f in self.input_hint if _f]
return ' [%s]' % '/'.join(v) if v else ''
@property
def input_hint(self):
return [e for e in self._elements if not e.isdigit()]
def add_element(self, e):
self._elements.append(str(e))
self._setup_validator()
class SingleCharSimpleChoice(SimpleChoice):
""" Restrict input to single characters, allowing omission of
newline for input. Fallback to conventional SimpleChoice if choices
contain multi-char elements.
"""
def __init__(self, elements, enter=None, additional=[], validate=True,
*args, **kwargs):
if enter:
additional += ['']
self._enter = enter
single = (all(len(str(e)) <= 1 for e in elements + additional) and
validate)
SimpleChoice.__init__(self, elements, additional=additional,
single=single, validate=validate, *args,
**kwargs)
def _do_input(self, _input):
return SimpleChoice._do_input(self, self._enter if self._enter and
_input == '' else _input)
class YesNo(SingleCharSimpleChoice):
def __init__(self, text=['Confirm'], *args, **kwargs):
SingleCharSimpleChoice.__init__(self, ['y', 'n'], text=text, enter='y')
@property
def value(self):
return self._input == 'y'
def __bool__(self):
return bool(self.value)
class SpecifiedChoice(SingleCharSimpleChoice):
""" Automatically supply enumeration for the strings available for
choice and query for a number.
@param info: list of lines to print after the corresponding
element, without enumeration.
"""
def __init__(self, elements, text_pre=None, text_post=None, simple=None,
info=None, values=None, *args, **kwargs):
""" @param values: optional list of objects to return from if
selected by number
"""
self._choices = elements
self._values = values
self._simple = simple or []
self._text_pre = text_pre or []
self._text_post = text_post or []
self._info = info or [[]] * len(elements)
self._numbers = list(range(1, len(elements) + 1))
SingleCharSimpleChoice.__init__(self, elements=self._numbers,
text=[], additional=self._simple,
*args, **kwargs)
def _format_choice(self, n, choice, info):
pad = ' ' * (len(str(self._numbers[-1])) - len(str(n)))
return [' {}[{}] {}'.format(pad, n, choice)] + [' ' * 5 + i for i in
info]
@property
def prompt(self):
text = copy.copy(self._text_pre)
for c in zip(self._numbers, self._choices, self._info):
text.append(self._format_choice(*c))
return text + self._text_post
@property
def input_hint(self):
return self._simple
def _is_choice_index(self, index):
return is_digit(index) and 0 < int(index) <= len(self._choices)
@property
def value(self):
i = SingleCharSimpleChoice.value.fget(self)
if i in self._simple:
return i
elif self._is_choice_index(i):
return self._effective_values[int(i) - 1]
elif not self._do_validate:
return i
else:
raise InternalError('SpecifiedChoice: strange input: ' +
self._input)
@property
def raw_value(self):
i = SingleCharSimpleChoice.value.fget(self)
if (i in self._simple or self._is_choice_index(i) or not
self._do_validate):
return i
else:
raise InternalError('SpecifiedChoice: strange input: ' +
self._input)
@property
def index(self):
i = self._input
return int(i) - 1 if self._is_choice_index(i) else -1
def add_choice(self, new):
self._choices.append(new)
num = len(self._choices)
self._numbers.append(num)
self.add_element(num)
@property
def _effective_values(self):
return self._values or self._choices
class LoopingInput(object):
def __init__(self, terminate='q', overwrite=True, dispatch=[],
raise_quit=False, **kw):
self._terminate = terminate
self._overwrite = overwrite
self._dispatch = dict(dispatch)
self._raise_quit = raise_quit
self._force_terminate = False
def read(self):
self._remove_text |= self._overwrite
while True:
value = super(LoopingInput, self).read()
if value == self._terminate:
break
elif isinstance(value, str) and value in self._dispatch:
self._dispatch[value]()
self.process()
if self._force_terminate:
break
if self._overwrite and not input_queue.suppress_output:
terminal.push(self.prompt)
terminal.push()
if self._raise_quit:
raise UserInputTerminated()
else:
return self.loop_value
def process(self):
pass
@property
def loop_value(self):
return None
class CheckboxList(LoopingInput, SpecifiedChoice):
def __init__(self, elements, initial=None, colors=None, simple=[], **kw):
self._lines = list(map(str, elements))
LoopingInput.__init__(self, **kw)
simple = list(set(['q', 'a'] + simple))
kw.setdefault('enter', 'q')
kw.setdefault('newline', False)
SpecifiedChoice.__init__(self, self._lines, simple=simple,
**kw)
self._states = initial or [0] * len(elements)
t = terminal
self._colors = colors or [t.white + t.bg_red, t.black + t.bg_green]
@property
def prompt(self):
text_post = copy.copy(self._text_post)
text_post[-1] += self.input_hint_string
col = lambda c: ColorString(' {} '.format(str(c+1)),
self._colors[self._states[c]])
return (self._text_pre + [[' ', col(i), ' ', el] for i, el in
enumerate(self._lines)] + text_post)
def process(self):
if self._input.isdigit():
self._toggle(int(self._input) - 1)
elif self._input == 'a':
self._toggle_all()
@property
def loop_value(self):
return self._states
def _toggle(self, index):
self._states[index] += 1
self._states[index] %= len(self._colors)
def _toggle_all(self):
for index in range(len(self._states)):
self._toggle(index)
def confirm(message, remove_text=False):
message = [str(message)] + ['Press return to continue.']
SingleCharSimpleChoice([''], text=message, remove_text=remove_text).read()
| tek/pytek | tek/user_input.py | Python | gpl-3.0 | 11,203 |
#@help:history {show|clear} - Scans for open ports on the specified device.
from game.pythonapi import PyDisplay
try:
parameters
except NameError:
PyDisplay.write(terminal, 'Please specify either show or clear.')
else:
if parameters[0] == 'clear':
terminal.getHistory().clear()
terminal.setLine(0)
else:
history = terminal.getHistory()
PyDisplay.write(terminal, 'Command history:')
for s in history:
PyDisplay.write(terminal, ' ' + s)
| Rsgm/Hakd | core/assets/python/programs/rsgm/history.py | Python | mit | 458 |
# -*- coding: UTF-8 -*-
from django.views.generic.base import TemplateView
from django_simptools.shortcuts import create_paginated_page
from payway.orders.conf.settings import ORDERS_PER_PAGE
from payway.orders.models import Order
__author__ = 'Razzhivin Alexander'
__email__ = '[email protected]'
class OrderListView(TemplateView):
template_name = 'orders/list.html'
def get(self, request, *args, **kwargs):
orders_page = create_paginated_page(
query_set=Order.objects.filter(user=request.user).order_by("-id"),
page_number=request.GET.get('page') or 1,
objects_per_page=ORDERS_PER_PAGE
)
return self.render_to_response({'orders_page': orders_page}) | RANUX/django-payway | payway/orders/views/list.py | Python | bsd-2-clause | 723 |
#! python 3
""" Initiliases server defaults. """
from application import db
from application.models import User, Group
from application.functions import hash_password
from config import admin_username, admin_name, admin_password, admin_email, admin_group_name
# create admin user
# make sure the user hasn't already been added.
query = User.query.filter_by(username = admin_username)
if query.count() == 0:
add_user = User(username=admin_username,
name=admin_name,
password=hash_password(admin_password),
email=admin_email)
db.session.add(add_user)
db.session.commit()
print("Admin user added.")
else:
print("The user '{}' already exists.".format(admin_username))
# create admin group
query = Group.query.filter_by(group_name = admin_group_name)
if query.count() == 0:
add_group = Group(group_name = admin_group_name)
db.session.add(add_group)
db.session.commit()
print("Admin group added")
else:
print("The group '{}' already exists.".format(admin_group_name))
# add user admin to the admin group
# get user object
user = User.query.filter_by(username = admin_username).first()
group = Group.query.filter_by(group_name = admin_group_name).first()
in_group = False
# see if user admin is already in admin group.
for g in group.users:
if g.username == admin_username:
in_group = True
break
if not in_group:
group.users.append(user)
db.session.commit()
print("Added admin user to admin group.")
else:
print("Admin user already in admin group.") | evereux/flask_template | setup.py | Python | mit | 1,584 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x021100
_REQUEST_MESSAGE_TYPE = 135424
# hex: 0x021101
_RESPONSE_MESSAGE_TYPE = 135425
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_LEASE_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_TIMEOUT_OFFSET = _REQUEST_LEASE_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_TIMEOUT_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(name, key, thread_id, lease, timeout, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_LEASE_OFFSET, lease)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TIMEOUT_OFFSET, timeout)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
| hazelcast/hazelcast-python-client | hazelcast/protocol/codec/multi_map_try_lock_codec.py | Python | apache-2.0 | 1,548 |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2015 FUJITSU LIMITED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# there are two representations of value and mask this module deal with.
#
# "user"
# (value, mask) or value. the latter means no mask.
# value and mask are strings.
#
# "internal"
# value and mask are on-wire bytes.
# mask is None if no mask.
import itertools
import struct
import ofproto_common
from ofproto_parser import msg_pack_into
from ryu.lib import addrconv
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
import math
# -------------------------- Fujitsu code end -------------------------------
class TypeDescr(object):
pass
class IntDescr(TypeDescr):
def __init__(self, size):
self.size = size
def to_user(self, bin):
i = 0
for x in xrange(self.size):
c = bin[:1]
i = i * 256 + ord(c)
bin = bin[1:]
return i
def from_user(self, i):
bin = ''
for x in xrange(self.size):
bin = chr(i & 255) + bin
i /= 256
return bin
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
def to_user_exp(self, bin ,size):
i = 0
for x in xrange(size):
c = bin[:1]
i = i * 256 + ord(c)
bin = bin[1:]
return i
# -------------------------- Fujitsu code end -------------------------------
Int1 = IntDescr(1)
Int2 = IntDescr(2)
Int3 = IntDescr(3)
Int4 = IntDescr(4)
Int8 = IntDescr(8)
Int32 = IntDescr(32)
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
Int5 = IntDescr(5)
Int14 = IntDescr(14)
# -------------------------- Fujitsu code end -------------------------------
class MacAddr(TypeDescr):
size = 6
to_user = addrconv.mac.bin_to_text
from_user = addrconv.mac.text_to_bin
class IPv4Addr(TypeDescr):
size = 4
to_user = addrconv.ipv4.bin_to_text
from_user = addrconv.ipv4.text_to_bin
class IPv6Addr(TypeDescr):
size = 16
to_user = addrconv.ipv6.bin_to_text
from_user = addrconv.ipv6.text_to_bin
class UnknownType(TypeDescr):
import base64
to_user = staticmethod(base64.b64encode)
from_user = staticmethod(base64.b64decode)
OFPXMC_OPENFLOW_BASIC = 0x8000
OFPXMC_EXPERIMENTER = 0xffff
class _OxmClass(object):
def __init__(self, name, num, type_):
self.name = name
self.oxm_type = num | (self._class << 7)
self.type = type_
class OpenFlowBasic(_OxmClass):
_class = OFPXMC_OPENFLOW_BASIC
def __init__(self, name, num, type_):
super(OpenFlowBasic, self).__init__(name, num, type_)
self.num = self.oxm_type
class OpenFlowExprtimenter(object):
_class = OFPXMC_EXPERIMENTER
def __init__(self, name, num, type_):
self.name = name
self.num = num | (self._class << 7)
self.type = type_
class _Experimenter(_OxmClass):
_class = OFPXMC_EXPERIMENTER
class ONFExperimenter(_Experimenter):
experimenter_id = ofproto_common.ONF_EXPERIMENTER_ID
def __init__(self, name, num, type_):
super(ONFExperimenter, self).__init__(name, 0, type_)
self.num = (ONFExperimenter, num)
self.exp_type = num
def generate(modname):
import sys
import string
import functools
mod = sys.modules[modname]
def add_attr(k, v):
setattr(mod, k, v)
for i in mod.oxm_types:
uk = string.upper(i.name)
if isinstance(i.num, tuple):
continue
oxm_class = i.num >> 7
if oxm_class == OFPXMC_OPENFLOW_BASIC:
ofpxmt = i.num & 0x3f
td = i.type
add_attr('OFPXMT_OFB_' + uk, ofpxmt)
add_attr('OXM_OF_' + uk, mod.oxm_tlv_header(ofpxmt, td.size))
add_attr('OXM_OF_' + uk + '_W', mod.oxm_tlv_header_w(ofpxmt, td.size))
elif oxm_class == OFPXMC_EXPERIMENTER:
ofpxmt = i.num & 0x3f
td = i.type
add_attr('OFPXMT_OFB_' + uk, ofpxmt)
add_attr('OXM_OF_' + uk, mod.oxm_tlv_header_ex(ofpxmt, td.size))
add_attr('OXM_OF_' + uk + '_W', mod.oxm_tlv_header_ex_w(ofpxmt, td.size))
else:
continue
name_to_field = dict((f.name, f) for f in mod.oxm_types)
num_to_field = dict((f.num, f) for f in mod.oxm_types)
add_attr('oxm_from_user', functools.partial(from_user, name_to_field))
add_attr('oxm_to_user', functools.partial(to_user, num_to_field))
add_attr('_oxm_field_desc', functools.partial(_field_desc, num_to_field))
add_attr('oxm_normalize_user', functools.partial(normalize_user, mod))
add_attr('oxm_parse', functools.partial(parse, mod))
add_attr('oxm_serialize', functools.partial(serialize, mod))
add_attr('oxm_to_jsondict', to_jsondict)
add_attr('oxm_from_jsondict', from_jsondict)
def from_user(name_to_field, name, user_value):
try:
f = name_to_field[name]
t = f.type
num = f.num
except KeyError:
t = UnknownType
if name.startswith('field_'):
num = int(name.split('_')[1])
else:
raise KeyError('unknown match field ' + name)
# the 'list' case below is a bit hack; json.dumps silently maps
# python tuples into json lists.
if isinstance(user_value, (tuple, list)):
(value, mask) = user_value
else:
value = user_value
mask = None
if value is not None:
value = t.from_user(value)
if mask is not None:
mask = t.from_user(mask)
return num, value, mask
def to_user(num_to_field, n, v, m):
try:
f = num_to_field[n]
t = f.type
name = f.name
except KeyError:
t = UnknownType
name = 'field_%d' % n
if v is not None:
if hasattr(t, 'size') and t.size != len(v):
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
# raise Exception(
# 'Unexpected OXM payload length %d for %s (expected %d)'
# % (len(v), name, t.size))
# value = t.to_user(v)
if name == "odu_sigid":
value = t.to_user_exp(v, len(v))
else:
raise Exception(
'Unexpected OXM payload length %d for %s (expected %d)'
% (len(v), name, t.size))
else:
value = t.to_user(v)
# -------------------------- Fujitsu code end -------------------------------
else:
value = None
if m is None:
user_value = value
else:
user_value = (value, t.to_user(m))
return name, user_value
def _field_desc(num_to_field, n):
return num_to_field[n]
def normalize_user(mod, k, uv):
(n, v, m) = mod.oxm_from_user(k, uv)
# apply mask
if m is not None:
v = ''.join(chr(ord(x) & ord(y)) for (x, y) in itertools.izip(v, m))
(k2, uv2) = mod.oxm_to_user(n, v, m)
assert k2 == k
return (k2, uv2)
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
OFP_EXPERIMENTER_FIELD_CHG = 38
# -------------------------- Fujitsu code end -------------------------------
def parse(mod, buf, offset):
hdr_pack_str = '!I'
(header, ) = struct.unpack_from(hdr_pack_str, buf, offset)
hdr_len = struct.calcsize(hdr_pack_str)
oxm_type = header >> 9 # class|field
oxm_hasmask = mod.oxm_tlv_header_extract_hasmask(header)
len = mod.oxm_tlv_header_extract_length(header)
oxm_class = oxm_type >> 7
if oxm_class == OFPXMC_EXPERIMENTER:
exp_hdr_pack_str = '!I' # experimenter_id
(exp_id, ) = struct.unpack_from(exp_hdr_pack_str, buf,
offset + hdr_len)
exp_hdr_len = struct.calcsize(exp_hdr_pack_str)
if exp_id == ofproto_common.ONF_EXPERIMENTER_ID:
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
'''
onf_exp_type_pack_str = '!H'
(exp_type, ) = struct.unpack_from(onf_exp_type_pack_str, buf,
offset + hdr_len + exp_hdr_len)
exp_hdr_len += struct.calcsize(onf_exp_type_pack_str)
num = (ONFExperimenter, exp_type)
'''
num = oxm_type + OFP_EXPERIMENTER_FIELD_CHG
# -------------------------- Fujitsu code end -------------------------------
else:
num = oxm_type
exp_hdr_len = 0
value_offset = offset + hdr_len + exp_hdr_len
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
# value_len = len - exp_hdr_len
value_len = len
# -------------------------- Fujitsu code end -------------------------------
value_pack_str = '!%ds' % value_len
assert struct.calcsize(value_pack_str) == value_len
(value, ) = struct.unpack_from(value_pack_str, buf, value_offset)
if oxm_hasmask:
(mask, ) = struct.unpack_from(value_pack_str, buf,
value_offset + value_len)
else:
mask = None
# -------------------------- Fujitsu code start -----------------------------
# For optical enhancing
# field_len = hdr_len + (header & 0xff)
field_len = hdr_len + (header & 0xff) + exp_hdr_len
# -------------------------- Fujitsu code end -------------------------------
return num, value, mask, field_len
def serialize(mod, n, value, mask, buf, offset):
exp_hdr = bytearray()
if isinstance(n, tuple):
(cls, exp_type) = n
desc = mod._oxm_field_desc(n)
assert issubclass(cls, _Experimenter)
assert isinstance(desc, cls)
assert cls is ONFExperimenter
onf_exp_hdr_pack_str = '!IH' # experimenter_id, exp_type
msg_pack_into(onf_exp_hdr_pack_str, exp_hdr, 0,
cls.experimenter_id, exp_type)
assert len(exp_hdr) == struct.calcsize(onf_exp_hdr_pack_str)
n = desc.oxm_type
assert (n >> 7) == OFPXMC_EXPERIMENTER
exp_hdr_len = len(exp_hdr)
value_len = len(value)
if mask:
assert value_len == len(mask)
pack_str = "!I%ds%ds%ds" % (exp_hdr_len, value_len, len(mask))
msg_pack_into(pack_str, buf, offset,
(n << 9) | (1 << 8) | (exp_hdr_len + value_len * 2),
bytes(exp_hdr), value, mask)
else:
pack_str = "!I%ds%ds" % (exp_hdr_len, value_len,)
msg_pack_into(pack_str, buf, offset,
(n << 9) | (0 << 8) | (exp_hdr_len + value_len),
bytes(exp_hdr), value)
return struct.calcsize(pack_str)
def to_jsondict(k, uv):
if isinstance(uv, tuple):
(value, mask) = uv
else:
value = uv
mask = None
return {"OXMTlv": {"field": k, "value": value, "mask": mask}}
def from_jsondict(j):
tlv = j['OXMTlv']
field = tlv['field']
value = tlv['value']
mask = tlv.get('mask')
if mask is None:
uv = value
else:
uv = (value, mask)
return (field, uv)
| o3project/ryu-oe | ryu/ofproto/oxm_fields.py | Python | apache-2.0 | 12,359 |
""" Unused so far"""
class InlineIMG():
def __init__(self, path):
self.path = path
self.is_local = False
self.id = abs(hash(self.path))
self.name = self.id
self.mime_object = self.makeMIME()
self.html_node = self.html_node()
def __repr__(self):
""" The representation of the image. It is also the fallback text for clients that cannot display HTML"""
if self.is_local:
fname = mask_local_path(self.path)
else:
fname = self.path
return '<img {} should be here'.format(fname)
def decide_local_path_or_external():
if can_load_local:
self.is_local = True
elif can_load_as_url:
self.is_local = False
else:
raise Exception('Invalid')
def mask_local_path():
return ".../" + self.path.split('/')[-1]
def html_node(self):
return '<img src="cid:{}" title="{}"/>'.format(self.id, self.name)
def makeMIME(self):
mime_object.add_header('Content-ID', '<{}>'.format(self.id))
email.encoders.encode_base64(content_object['mime_object'])
| bussiere/yagmail | yagmail/image.py | Python | mit | 1,180 |
"""
Markdown-online
Put your local markdowns online.
"""
from setuptools import setup, find_packages
setup(
name='mdonline',
version='0.1',
long_description=__doc__,
packages=find_packages(), # packages=['application'],
include_package_data=True, # look for a MANIFEST.in file
zip_safe=False,
install_requires=[
'Flask==0.10.1'
]
)
# $> python setup.py install
# $> python setup.py develop
# develop just installing a link to the site-packages folder
| fengsp/markdown-online | setup.py | Python | bsd-3-clause | 495 |
# coding: utf-8
from jumpserver.context_processor import default_interface
from django.conf import settings
class ObjectDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def get_interface_setting():
if not settings.XPACK_ENABLED:
return default_interface
from xpack.plugins.interface.models import Interface
return Interface.get_interface_setting()
def get_login_title():
return get_interface_setting()['login_title']
| jumpserver/jumpserver | apps/settings/utils/common.py | Python | gpl-3.0 | 806 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"6iuv2ksp4%q7a9_l=hoco8iz!u7-us$fd(l()+)&8okyoaws!0"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## PIPELINE CONFIGURATION
INSTALLED_APPS += (
'pipeline',
)
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS = {
'project_name': {
'source_filenames': (
'css/project_name.less',
),
'output_filename': 'css/project_name.css',
'extra_context': {
'media': 'screen,projection',
},
}
}
PIPELINE_JS = {
'vendor': {
'source_filenames': (
'vendor/bootstrap/dist/js/bootstrap.js',
),
'output_filename': 'js/vendor.js',
},
'project_name': {
'source_filenames': (
'js/*.coffee',
),
'output_filename': 'js/project_name.js'
}
}
PIPELINE_LESS_ARGUMENTS = '-x --yui-compress'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_YUGLIFY_JS_ARGUMENTS = '--terminal'
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
########## END PIPELINE CONFIGURATION
########## TEST CONFIGURATION
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
########## END TEST CONFIGURATION
| pythonvlc/workshop-deploying-django | coffeecake/coffeecake/settings/base.py | Python | mit | 8,345 |
from . import vec3_socket
from . import matrix_socket
from . import stereo_mode_socket
from . import camera_socket
from . import screen_socket
from . import avango_node_socket
def register():
# register sockets
vec3_socket.register()
matrix_socket.register()
stereo_mode_socket.register()
camera_socket.register()
screen_socket.register()
avango_node_socket.register()
def unregister():
vec3_socket.unregister()
matrix_socket.unregister()
stereo_mode_socket.unregister()
camera_socket.unregister()
screen_socket.unregister()
avango_node_socket.unregister()
| jakobharlan/avango | avango-blender/blender-addon/sockets/__init__.py | Python | lgpl-3.0 | 613 |
# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
license1 = """// Copyright """
license2 = """ The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
"""
license3 = """# Copyright """
license4 = """ The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
exceptions = [
"rt/rust_android_dummy.cpp", # BSD, chromium
"rt/rust_android_dummy.h", # BSD, chromium
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
"libsync/mpsc_queue.rs", # BSD
"libsync/spsc_queue.rs", # BSD
"libsync/mpmc_bounded_queue.rs", # BSD
"libsync/mpsc_intrusive.rs", # BSD
"test/bench/shootout-binarytrees.rs", # BSD
"test/bench/shootout-fannkuch-redux.rs", # BSD
"test/bench/shootout-mandelbrot.rs", # BSD
"test/bench/shootout-meteor.rs", # BSD
"test/bench/shootout-pidigits.rs", # BSD
"test/bench/shootout-regex-dna.rs", # BSD
]
def check_license(name, contents):
# Whitelist check
for exception in exceptions:
if name.endswith(exception):
return True
# Xfail check
firstlineish = contents[:100]
if firstlineish.find("ignore-license") != -1:
return True
# License check
boilerplate = contents[:500]
if (boilerplate.find(license1) == -1 or boilerplate.find(license2) == -1) and \
(boilerplate.find(license3) == -1 or boilerplate.find(license4) == -1):
return False
return True
| stepancheg/rust-ide-rust | src/etc/licenseck.py | Python | apache-2.0 | 2,653 |
import transmission
import matplotlib.pyplot as plt, numpy as np
from astropy.io import ascii
files = ['wasp94_140801.obs', 'wasp94_140805.obs', 'wasp94_140809.obs']
toshow = ['k']
# an empty dictionary of observations
spectra = {}
# an empty dictionary of bins (which will each contain a list of spectra)
bins = {}
# create a figure
plt.figure('three spectra')
gs = plt.matplotlib.gridspec.GridSpec(1,1)
sharex=None
ax = {}
count =0
fort = ascii.read("lambda_1500K_g10_wTiOVO.dat")
fort_lam = fort['wavelength']*10000
fort_radius= fort['radiusinkm']
fort_radius = fort_radius[fort_lam.argsort()]
fort_lam.sort()
r_jupiter = 7149200000 # cm
r_sun = 69550000000 # cm
fort_rp_over_rs = fort_radius*100000.0*1.53/1.37/(1.44*r_sun)
fort_depth = 100*fort_rp_over_rs**2
fort_depth = fort_depth* 1.2/np.mean(fort_depth[(fort_lam >7000)*(fort_lam<10000)])
for file in files:
# load the transmission spectrum
t = transmission.load(file, 100, 'fixedGeometry')
t.load()
spectra[file] = t
w = np.array([b.wavelength for b in t.bins])/t.unit
for k in toshow:
try:
ax[k]
except:
ax[k] = plt.subplot(gs[count], sharex=sharex)
count += 1
ax[k].set_ylabel(k)
sharex = ax[k]
p = np.array([b.tm.planet.__dict__[k].value for b in t.bins])
u = np.array([b.tm.planet.__dict__[k].uncertainty for b in t.bins])
print(file)
print(k)
print(2*p*u*1e6)
print('LD')
print(([b.tm.star.__dict__['u1'].value for b in t.bins]))
ax[k].errorbar(w, p, u, marker='o', markersize=10, linewidth=3, elinewidth=3, capsize=5, capthick=3, alpha=0.25)
for i in range(len(w)):
this = {k:p[i], k+'_uncertainty':u[i]}
try:
bins[w[i]].append(this)
except:
bins[w[i]] = [this]
plt.draw()
a = raw_input('!!!')
binned, unc = {}, {}
for k in bins.keys():
values = np.array([b['k'] for b in bins[k]])
uncertainties = np.array([b['k_uncertainty'] for b in bins[k]])
binned[k] = np.sum(values/uncertainties**2)/np.sum(1.0/uncertainties**2)
unc[k] = np.sqrt(1/np.sum(1.0/uncertainties**2))
k = binned.keys()
ax = plt.gca()
ax.errorbar(np.array(k), np.array([binned[i] for i in k]), np.array([unc[i] for i in k]), marker='o', markersize=10, linewidth=0, elinewidth=3, capsize=5, capthick=3, alpha=0.5, color='black')
ax = plt.gca()
ax.plot(fort_lam/10.0, np.sqrt(fort_depth/100.0), zorder=-100, alpha=0.25, color='gray')
| zkbt/mosasaurus | junk/combine.py | Python | mit | 2,521 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Miscellaneous functions
"""
import numpy as np
from ..ext.six import string_types
###############################################################################
# These fast normal calculation routines are adapted from mne-python
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _calculate_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# ensure highest precision for our summation/vectorization "trick"
rr = rr.astype(np.float64)
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts.astype(np.int32),
tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
def resize(image, shape, kind='linear'):
"""Resize an image
Parameters
----------
image : ndarray
Array of shape (N, M, ...).
shape : tuple
2-element shape.
kind : str
Interpolation, either "linear" or "nearest".
Returns
-------
scaled_image : ndarray
New image, will have dtype np.float64.
"""
image = np.array(image, float)
shape = np.array(shape, int)
if shape.ndim != 1 or shape.size != 2:
raise ValueError('shape must have two elements')
if image.ndim < 2:
raise ValueError('image must have two dimensions')
if not isinstance(kind, string_types) or kind not in ('nearest', 'linear'):
raise ValueError('mode must be "nearest" or "linear"')
r = np.linspace(0, image.shape[0] - 1, shape[0])
c = np.linspace(0, image.shape[1] - 1, shape[1])
if kind == 'linear':
r_0 = np.floor(r).astype(int)
c_0 = np.floor(c).astype(int)
r_1 = r_0 + 1
c_1 = c_0 + 1
top = (r_1 - r)[:, np.newaxis]
bot = (r - r_0)[:, np.newaxis]
lef = (c - c_0)[np.newaxis, :]
rig = (c_1 - c)[np.newaxis, :]
c_1 = np.minimum(c_1, image.shape[1] - 1)
r_1 = np.minimum(r_1, image.shape[0] - 1)
for arr in (top, bot, lef, rig):
arr.shape = arr.shape + (1,) * (image.ndim - 2)
out = top * rig * image[r_0][:, c_0, ...]
out += bot * rig * image[r_1][:, c_0, ...]
out += top * lef * image[r_0][:, c_1, ...]
out += bot * lef * image[r_1][:, c_1, ...]
else: # kind == 'nearest'
r = np.round(r).astype(int)
c = np.round(c).astype(int)
out = image[r][:, c, ...]
return out
| bollu/vispy | vispy/geometry/calculations.py | Python | bsd-3-clause | 4,276 |
# Awn Applet Library - Simplified APIs for programming applets for Awn.
#
# Copyright (C) 2007 - 2008 Pavel Panchekha <[email protected]>
# 2008 - 2010 onox <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import pygtk
pygtk.require("2.0")
import gtk
from desktopagnostic import config, Color, vfs
import awn
from awn.extras import _, configbinder, __version__
import cairo
try:
import cPickle as pickle
except ImportError:
import pickle
import glib
import gobject
___file___ = sys.argv[0]
# Basically, __file__ = current file location
# sys.argv[0] = file name or called file
# Since awnlib is in site-packages, __file__ refers to something there
# For relative paths to work, we need a way of determining where the
# User applet is. So this bit of magic works.
bug_report_link = "https://launchpad.net/awn-extras/+filebug"
def create_frame(parent, label):
"""Create a frame with a bold title. To be used in a preferences window.
"""
vbox = gtk.VBox(spacing=6)
parent.add(vbox)
label = gtk.Label("<b>" + label + "</b>")
label.set_use_markup(True)
label.props.xalign = 0.0
vbox.add(label)
alignment = gtk.Alignment()
alignment.set_padding(0, 0, 12, 0)
vbox.add(alignment)
frame_vbox = gtk.VBox(spacing=6)
alignment.add(frame_vbox)
return frame_vbox
def add_cell_renderer_text(combobox):
"""Add a gtk.CellRendererText to the combobox. To be used if the combobox
has a gtk.ListStore model with a string as the first column.
"""
text = gtk.CellRendererText()
combobox.pack_start(text, True)
combobox.add_attribute(text, "text", 0)
def is_required_version(version, required_version):
"""Return True if version is higher than or equal to
required_version, False otherwise.
"""
for i, j in zip(version, required_version):
if i > j:
return True
elif i < j:
return False
return True
class KeyringError(Exception):
pass
class KeyringCancelledError(KeyringError):
pass
class KeyringNoMatchError(KeyringError):
pass
class Dialogs:
__special_dialogs = ("menu", "about", "preferences")
def __init__(self, parent):
"""Create an instance of Dialogs. Creates a context menu,
and an About dialog, which is added to the menu.
@param parent: The parent applet of the dialogs instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__register = {}
self.__current = None
self.menu = self.new("menu")
meta_keys = self.__parent.meta.keys()
# Create the About dialog if the applet provides the necessary metadata
if all([key in meta_keys for key in ("name", "author", "copyright-year")]):
about_dialog = self.new("about")
about_item = gtk.ImageMenuItem(_("_About %s") % self.__parent.meta["name"])
about_item.props.always_show_image = True
about_item.set_image(gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU))
self.menu.append(about_item)
about_item.connect("activate", lambda w: self.toggle("about"))
about_item.show()
def connect_signals(self, parent):
def popup_menu_cb(widget, event):
self.toggle("menu", once=True, event=event)
parent.connect("context-menu-popup", popup_menu_cb)
def clicked_cb(widget, dialog_name):
if dialog_name in self.__register:
self.toggle(dialog_name)
parent.connect("clicked", clicked_cb, "main")
parent.connect("middle-clicked", clicked_cb, "secondary")
def new(self, dialog, title=None, focus=True):
"""Create a new AWN dialog.
@param dialog: The name to register the dialog under.
@type dialog: C{string}
@param title: The title of the new dialog
@type title: C{string}
@param focus: Whether to force the focus
@type focus: C{bool}
@return: The new menu or dialog
@rtype: C{gtk.Menu}, C{function}, or C{awn.AppletDialog}
"""
if dialog == "menu":
dlog = self.__parent.create_default_menu()
elif dialog == "about":
dlog = self.AboutDialog(self.__parent)
elif dialog == "preferences":
dlog = self.PreferencesDialog(self.__parent)
position = len(self.menu)
if "about" in self.__register:
position = position - 1
prefs_item = gtk.ImageMenuItem(stock_id=gtk.STOCK_PREFERENCES)
prefs_item.props.always_show_image = True
self.menu.insert(prefs_item, position)
prefs_item.connect("activate", lambda w: self.toggle("preferences", "show"))
prefs_item.show()
else:
dlog = awn.Dialog(self.__parent)
self.register(dialog, dlog, focus)
if dialog not in self.__special_dialogs and title:
dlog.set_title(" " + title + " ")
return dlog
def register(self, dialog, dlog, focus=True):
"""Register a dialog.
Once a name has been registered, it cannot be registered again
until the dialog is explicitly unregistered.
@param dialog: The name to use for the dialog.
@type dialog: C{string}
@param dlog: The actual dialog or menu or function.
@type dlog: C{function}, C{gtk.Menu}, or C{awn.AppletDialog}
@param focus: True if the dialog should be hidden when focus is lost, False otherwise.
@type focus: C{bool}
"""
if dialog in self.__register:
raise RuntimeError("Dialog '%s' already registered" % dialog)
if focus and dialog not in self.__special_dialogs and isinstance(dlog, awn.Dialog):
dlog.props.hide_on_unfocus = focus
self.__register[dialog] = dlog
def unregister(self, dialog):
"""Unregister a dialog.
@param dialog: The name to use for the dialog. Must not be equal
to the name of any of the special dialogs.
@type dialog: C{string}
"""
if dialog not in self.__register:
raise RuntimeError("Dialog '%s' not registered" % dialog)
if dialog in self.__special_dialogs:
raise RuntimeError("Unregistering special dialog '%s' is forbidden" % dialog)
if dialog == self.__current:
self.__register[dialog].hide()
self.__current = None
del self.__register[dialog]
def toggle(self, dialog, force="", once=False, event=None):
"""Show or hide a dialog.
@param dialog: The dialog that should be shown.
@type dialog: C{string}
@param force: "Hide" or "Show". Whether to force the hiding or showing
of the dialog in question.
@type force: C{string}
@param once: Only show or hide one dialog. If a dialog is already
opened, and you request that another dialog be toggled, only the
open one is hidden. False by default.
@type once: C{bool}
@param event: The event that triggered the toggle.
@type event: C{gdk.Event}
"""
force = force.lower()
assert force in ("hide", "show", ""), "Force must be \"hide\", \"show\", or \"\""
assert dialog in self.__register, "Dialog '%s' must be registered" % dialog
if dialog == "menu":
self.show_menu(self.__parent, event)
elif dialog == "about":
self.__register["about"].show()
self.__register["about"].present()
else:
if force == "hide" or (self.__register[dialog].is_active() and force != "show"):
self.__register[dialog].hide()
self.__current = None
# Because the dialog is now hidden, show the tooltip again
self.__parent.tooltip.show()
else:
self.__parent.tooltip.hide()
if self.__current is not None and self.__current not in self.__special_dialogs \
and dialog != self.__current:
current = self.__register[self.__current]
current_was_active = current.is_active()
current.hide()
if current_was_active and once:
self.__current = None
return
self.__register[dialog].show_all()
self.__current = dialog
if dialog == "preferences":
self.__register[dialog].present()
def show_menu(self, parent, event):
self.__register["menu"].show()
parent.popup_gtk_menu(self.__register["menu"], event.button, event.time)
def hide(self):
"""Hide the currently visible dialog.
"""
if self.__current is not None:
self.__register[self.__current].hide()
self.__current = None
def is_visible(self, dialog):
"""Return True if the specified dialog is visible, False otherwise.
"""
assert dialog in self.__register, "Dialog '%s' must be registered" % dialog
return self.__register[dialog].is_active()
class BaseDialog:
"""Base class for dialogs. Sets and updates the icon and hides
the dialog instead of letting it being destroyed.
"""
def __init__(self, parent):
self.__parent = parent
if "logo" in parent.meta:
self.update_logo_icon()
parent.connect_size_changed(self.update_logo_icon)
elif "theme" in parent.meta:
self.set_icon_name(parent.meta["theme"])
# Connect some signals to be able to hide the window
self.connect("response", self.response_event)
self.connect("delete_event", gtk.Widget.hide_on_delete)
def response_event(self, widget, response):
if response < 0:
self.hide()
def update_logo_icon(self):
"""Update the logo to be of the same height as the panel.
"""
self.set_icon_from_file(self.__parent.meta["logo"])
class AboutDialog(BaseDialog, gtk.AboutDialog):
"""Applet's About dialog.
"""
def __init__(self, parent):
gtk.AboutDialog.__init__(self)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_name(parent.meta["name"])
if "version" in parent.meta:
self.set_version(parent.meta["version"])
if "description" in parent.meta:
self.set_comments(parent.meta["description"])
copyright_info = (parent.meta["copyright-year"], parent.meta["author"])
self.set_copyright("Copyright \xc2\xa9 %s %s" % copyright_info)
if "authors" in parent.meta:
self.set_authors(parent.meta["authors"])
if "artists" in parent.meta:
self.set_artists(parent.meta["artists"])
if "logo" in parent.meta:
self.set_logo(gtk.gdk.pixbuf_new_from_file_at_size( \
parent.meta["logo"], 48, 48))
elif "theme" in parent.meta:
self.set_logo_icon_name(parent.meta["theme"])
class PreferencesDialog(BaseDialog, gtk.Dialog):
"""A Dialog window that has the title "<applet's name> Preferences",
uses the applet's logo as its icon and has a Close button.
"""
def __init__(self, parent):
gtk.Dialog.__init__(self, flags=gtk.DIALOG_NO_SEPARATOR)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_resizable(False)
self.set_border_width(5)
# This is a window title, %s is an applet's name.
self.set_title(_("%s Preferences") % parent.meta["name"])
self.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
class Tooltip:
def __init__(self, parent):
"""Create a new Tooltip object.
@param parent: The parent applet of the tooltip instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__tooltip = parent.get_icon().get_tooltip()
self.set(parent.meta["name"])
self.disable_toggle_on_click()
if parent.meta.has_option("no-tooltip"):
self.__tooltip.props.smart_behavior = False
def disable_toggle_on_click(self):
self.__tooltip.props.toggle_on_click = False
def is_visible(self):
return (self.__tooltip.flags() & gtk.VISIBLE) != 0
def show(self):
"""Show the applet tooltip.
"""
self.__tooltip.show()
def hide(self):
"""Hide the applet tooltip.
"""
self.__tooltip.hide()
def set(self, text):
"""Set the applet tooltip.
@param text: The new tooltip text.
@type text: C{string}
"""
self.__parent.set_tooltip_text(text)
def connect_becomes_visible(self, callback):
assert callable(callback)
self.__tooltip.connect("map-event", lambda w, e: callback())
class Icon:
APPLET_SIZE = "applet-size"
def __init__(self, parent):
"""Create a new Icon object.
@param parent: The parent applet of the icon instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__previous_context = None
self.__has_remove_custom_icon_item = False
# Set the themed icon to set the C{awn.Icons} object
if "theme" in parent.meta:
self.theme(parent.meta["theme"])
def file(self, file, set=True, size=None):
"""Get an icon from a file location.
@param file: The path to the file. Can be relative or absolute.
@type file: C{string}
@param set: Whether to also set the icon. True by default.
@type set: C{bool}
@param size: Width and height of icon.
@type size: C{int}
@return: The resultant pixbuf or None (if C{set} is C{True})
@rtype: C{gtk.gdk.Pixbuf} or C{None}
"""
if file[0] != "/":
file = os.path.join(os.path.abspath(os.path.dirname(___file___)), file)
if size is None:
icon = gtk.gdk.pixbuf_new_from_file(file)
else:
if size is self.__class__.APPLET_SIZE:
size = self.__parent.get_size()
icon = gtk.gdk.pixbuf_new_from_file_at_size(file, size, size)
if set:
self.set(icon)
else:
return icon
def theme(self, name):
"""Set an icon from the default icon theme. The resultant
pixbuf will be returned.
@param name: The name of the theme icon.
@type name: C{string}
"""
self.__parent.set_icon_name(name)
if not self.__has_remove_custom_icon_item:
self.__has_remove_custom_icon_item = True
icon = self.__parent.get_icon()
assert isinstance(icon, awn.ThemedIcon)
item = icon.create_remove_custom_icon_item()
self.__parent.dialog.menu.insert(item, 1)
def set(self, icon):
"""Set a C{gtk.gdk.pixbuf} or C{cairo.Context} as your applet icon.
@param icon: The icon to set your applet icon to.
@type icon: C{gtk.gdk.Pixbuf} or C{cairo.Context}
"""
if isinstance(icon, cairo.Context):
self.__parent.set_icon_context(icon)
if self.__previous_context != icon:
del self.__previous_context
self.__previous_context = icon
else:
self.__parent.set_icon_pixbuf(icon)
def hide(self):
"""Hide the applet's icon.
"""
self.__parent.hide()
class Theme:
def __init__(self, parent):
"""Create a new Theme object.
@param parent: The parent applet of the theme instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__states = None
self.__icon_state = None
def set_states(self, states_icons):
self.__states, icons = zip(*states_icons.items())
self.__icon_state = None
self.__parent.set_icon_info(self.__states, icons)
def icon(self, state):
if self.__states is None or state not in self.__states:
raise RuntimeError("invalid state")
if state != self.__icon_state:
self.__icon_state = state
self.__parent.set_icon_state(state)
def theme(self, theme):
self.__parent.get_icon().override_gtk_theme(theme)
class Icons:
def __init__(self, parent):
"""Create a new Icons object.
@param parent: The parent applet of the icons instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__icon_box = awn.IconBox(parent)
parent.add(self.__icon_box)
def update_size():
size = self.__parent.get_size()
for icon in self.__icon_box.get_children():
icon.set_size(size)
parent.connect_size_changed(update_size)
def add(self, icon_name, tooltip_text, context_menu=None):
"""Set an icon from the default icon theme and set the applet
tooltip. Optionally provide a context menu that should be
displayed instead of the applet's standard context menu. The
resultant themed icon will be returned.
@param icon_name: The name of the theme icon.
@type icon_name: C{string}
@param tooltip_text: The new tooltip text.
@type tooltip_text: C{string}
@param context_menu: Optional context menu.
@type context_menu: C{gtk.Menu} or C{None}
@return: The resultant themed icon
@rtype: C{awn.ThemedIcon}
"""
icon = awn.ThemedIcon()
icon.set_tooltip_text(tooltip_text)
icon.set_size(self.__parent.get_size())
if isinstance(icon_name, vfs.File):
icon_pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(icon_name.props.path, -1, self.__parent.get_size())
icon.set_from_pixbuf(icon_pixbuf)
# TODO make sure icon gets refreshed when doing update_size() (see above)
else:
icon.set_info_simple(self.__parent.meta["short"], self.__parent.get_uid(), icon_name)
# Callback context menu
if context_menu is None:
# TODO make sure item will not be added more than once
item = icon.create_remove_custom_icon_item()
self.__parent.dialog.menu.insert(item, 1)
def popup_menu_cb(widget, event):
self.__parent.dialog.show_menu(widget, event)
icon.connect("context-menu-popup", popup_menu_cb)
else:
assert isinstance(context_menu, gtk.Menu)
# TODO make sure item will not be added more than once
item = icon.create_remove_custom_icon_item()
context_menu.insert(item, 1)
def popup_menu_cb(widget, event, menu):
menu.show()
widget.popup_gtk_menu(menu, event.button, event.time)
icon.connect("context-menu-popup", popup_menu_cb, context_menu)
icon.show_all()
self.__icon_box.add(icon)
return icon
def remove(self, icon):
"""Remove the specified icon from the applet. The icon will not
be destroyed.
@param icon: The icon to be removed.
@type icon: C{awn.ThemedIcon}
"""
assert isinstance(icon, awn.ThemedIcon)
self.__icon_box.remove(icon)
def destroy_all(self):
"""Remove and destroy all icons in the applet.
"""
for icon in self.__icon_box.get_children():
icon.destroy()
class Errors:
def __init__(self, parent):
"""Create a new Modules object.
@param parent: The parent applet of the icon instance.
@type parent: L{Applet}
"""
self.__parent = parent
def module(self, scope, name):
"""Tell the user that they need to install a module to use your applet.
This function will attempts to import the module, and if this is not
possible, alert the user. Otherwise, it will call your callback with
the module as the first (and only) argument
@param scope: The dictionary that contains the globals to
import the module into
@type scope: C{dict}
@param name: the name of the module that must be installed.
@type name: C{string}
"""
try:
""" Do not add the module to globals[name], otherwise
awn.check_dependencies() will not show an error dialog. """
scope[name] = __import__(name, scope)
except ImportError:
self.__parent.icon.theme("dialog-error")
self.__parent.tooltip.set("Python module %s not found" % name)
awn.check_dependencies(scope, name)
def set_error_icon_and_click_to_restart(self):
self.__parent.icon.theme("dialog-error")
def crash_applet(widget=None, event=None):
gtk.main_quit()
self.__parent.connect("clicked", crash_applet)
def general(self, error, callback=None, traceback=None):
"""Tell the user that an error has occured.
@param error: the error itself.
@type error: C{string} or C{Exception}
@param callback: The function called when the user closes the dialog
@type callback: C{function}
@param traceback: Formatted traceback, can be copied to clipboard
via button in dialog.
@type traceback: C{str}
"""
assert isinstance(error, Exception) or type(error) in (str, tuple)
if traceback is not None:
traceback = "".join(traceback)[:-1]
args = {"message": "", "url": None}
if isinstance(error, Exception):
error_type = type(error).__name__
error = str(error)
if traceback is not None:
print "\n".join(["-" * 80, traceback, "-" * 80])
summary = "%s in %s: %s" % (error_type, self.__parent.meta["name"], error)
if self.__parent.meta["version"] == __version__:
args["message"] = "If you speak English and know how a bug tracker works, then visit Launchpad and report the bug by following these steps:\n\n" \
+ "1) Paste the error summary text in the 'summary' field\n" \
+ "2) Press Continue and then check whether the bug has already been reported or not. Do NOT add duplicates. Instead comment on the bug report that already exists.\n" \
+ "3) If you continue and report the bug, put the following in the big textarea:\n" \
+ " - exact version of awn-extras\n" \
+ " - operating system name and version\n" \
+ " - the traceback\n" \
+ " - other info requested by the guidelines found below the big textarea\n\n" \
+ "Remember: you must be able to speak English and check regularly whether the developers ask you questions. Do NOT add duplicates, but comment on the existing bug report. You cannot expect a bug to be fixed if you don't provide information.\n\n" \
+ "If you don't think you can meet these conditions, then don't file a bug report."
args["url"] = bug_report_link
else:
args["message"] = "Report this bug at the bug tracker of the %s applet." % self.__parent.meta["name"]
if "bug-report-url" in self.__parent.meta:
args["url"] = self.__parent.meta["bug-report-url"]
else:
error_type = "Error"
if isinstance(error, tuple):
args["message"] = error[1]
error = error[0]
dialog = self.ErrorDialog(self.__parent, error_type, error, **args)
if traceback is not None:
copy_traceback_button = gtk.Button("Copy traceback to clipboard")
copy_traceback_button.set_image(gtk.image_new_from_stock(gtk.STOCK_COPY, gtk.ICON_SIZE_MENU))
dialog.hbox.pack_start(copy_traceback_button, expand=False)
copy_summary_button = gtk.Button("Copy summary to clipboard")
copy_summary_button.set_image(gtk.image_new_from_stock(gtk.STOCK_COPY, gtk.ICON_SIZE_MENU))
dialog.hbox.pack_start(copy_summary_button, expand=False)
dialog.hbox.reorder_child(copy_traceback_button, 0)
dialog.hbox.reorder_child(copy_summary_button, 0)
def clicked_cb(widget, text):
clipboard = gtk.clipboard_get()
clipboard.set_text(text)
clipboard.store()
copy_traceback_button.connect("clicked", clicked_cb, traceback)
copy_summary_button.connect("clicked", clicked_cb, summary)
if callable(callback):
def response_cb(widget, response):
if response < 0:
callback()
dialog.connect("response", response_cb)
dialog.show_all()
class ErrorDialog(Dialogs.BaseDialog, gtk.MessageDialog):
"""A MessageDialog window that shows an error.
"""
def __init__(self, parent, error_type, title, message="", url=None):
gtk.MessageDialog.__init__(self, type=gtk.MESSAGE_ERROR, message_format=title)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_skip_taskbar_hint(False)
self.set_title("%s in %s" % (error_type, parent.meta["name"]))
self.hbox = gtk.HBox(spacing=6)
self.action_area.add(self.hbox)
close_button = gtk.Button(stock=gtk.STOCK_CLOSE)
close_button.connect("clicked", lambda w: self.response(gtk.RESPONSE_CLOSE))
self.hbox.add(close_button)
if len(message) > 0:
self.format_secondary_markup(message)
# Make texts non-selectable to stop unhelpful bug reports from stupid users
for i in self.get_message_area().get_children():
i.set_selectable(False)
if url is not None:
alignment = gtk.Alignment(xalign=0.5, xscale=0.0)
alignment.add(gtk.LinkButton(url, url))
self.vbox.pack_start(alignment, expand=False)
class Settings:
__setting_types = (bool, int, long, float, str, list, Color)
def __init__(self, parent):
"""Create a new Settings object. This object
can be used as a dictionary to retrieve and set values of
configuration keys. More importantly, this object provides
the methods get_binder() and load_bindings(), which should
be used to bind keys to their corresponding Gtk+ widgets,
and to make the keys available as GObject properties.
@param parent: The parent applet of the settings instance.
@type parent: L{Applet}
"""
type_parent = type(parent)
if type_parent in (AppletSimple, AppletMultiple, config.Client):
self.__folder = config.GROUP_DEFAULT
elif type_parent is str:
self.__folder = parent
parent = None
self.__client = self.ConfigClient(self.__folder, parent)
def get_binder(self, builder):
"""Return an object that can be used to bind keys to their
corresponding Gtk+ widgets, which are to be retrieved
via the given C{gtk.Builder} instance.
@param key: Instance of C{gtk.Builder}, used to retrieve Gtk+ widgets
@type key: C{gtk.Builder}
@return: An object that provides the method bind() to bind keys
@rtype: C{object}
"""
return self.__client.get_config_binder(builder)
def load_bindings(self, object):
"""Load the bindings by creating a C{gobject.GObject} from the
descriptions given by the given binder object. This object
should be an object that was returned by get_binder(). The
"props" value (instance of C{gobject.GProps}) of the GObject will
be returned.
@param key: An object returned by get_binder()
@type key: C{object}
@return: The "props" value of the created GObject
@rtype: C{gobject.GProps}
"""
return self.__client.load_bindings(object)
def __getitem__(self, key):
"""Get a key from the currect directory.
@param key: A relative path to the correct key
@type key: C{string}
@return: The value of the key
@rtype: C{object}
"""
value = self.__client.get(key)
if type(value) is str and value[:9] == "!pickle;\n":
value = pickle.loads(value[9:])
return value
def __setitem__(self, key, value):
"""Set or create a key from the currect directory.
@param key: A relative path to the correct key
@type key: C{string}
"""
unpickled_value = value
if type(value) not in self.__setting_types:
value = "!pickle;\n%s" % pickle.dumps(value)
elif type(value) is long:
value = int(value)
self.__client.set(key, value)
def __contains__(self, key):
"""Test if a key exists in the current directory.
@param key: A relative path to the correct key
@type key: C{string}
"""
return self.__client.contains(key)
class ConfigClient:
def __init__(self, folder, client=None):
"""Create a new config client.
If the client is an C{Applet}, config instances will
automatically be removed if the applet is deleted.
@param folder: Folder to start with.
@type folder: C{string}
@param client: Applet used to construct a corresponding
config.Client or a preconstructed config.Client
@type client: C{None,Applet,config.Client}
"""
self.__config_object = None
self.__parent = None
type_client = type(client)
if client is None:
self.__client = awn.config_get_default(awn.PANEL_ID_DEFAULT)
elif type_client in (AppletSimple, AppletMultiple):
self.__client = awn.config_get_default_for_applet(client)
def applet_deleted_cb(applet):
self.__client.remove_instance()
client.connect("applet-deleted", applet_deleted_cb)
self.__parent = client
elif type_client is config.Client:
self.__client = client
else:
raise RuntimeError("Parameter 'client' must be None, an Applet, or a config.Client")
self.__folder = folder
def get_config_binder(self, builder):
if not isinstance(builder, gtk.Builder):
raise RuntimeError("Builder must be an instance of gtk.Builder")
return configbinder.get_config_binder(self.__client, self.__folder, builder)
def load_bindings(self, binder):
if self.__config_object is not None:
raise RuntimeError("Configuration object already set")
self.__config_object = binder.create_gobject()
return self.__config_object.props
def set(self, key, value):
"""Set an existing key's value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@param value: The value to set the key to.
@type value: C{bool}, C{int}, C{float}, or C{string}
"""
try:
self.__config_object.set_property(key, value)
except:
try:
self.__client.set_value(self.__folder, key, value)
except:
name = self.__parent.meta["name"] if self.__parent is not None else "UNKNOWN"
print "%s: Could not set new value for key '%s'" % (name, key)
raise
def get(self, key):
"""Get an existing key's value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@return: The value of the key
@rtype: C{object}
"""
try:
return self.__config_object.get_property(key)
except:
try:
return self.__client.get_value(self.__folder, key)
except:
name = self.__parent.meta["name"] if self.__parent is not None else "UNKNOWN"
print "%s: key '%s' does not exist" % (name, key)
raise
def contains(self, key):
"""Test if the key maps to a value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@return: True if the key maps to a value, False otherwise
@rtype: C{bool}
"""
r = False
if self.__config_object is not None:
r = key in gobject.list_properties(self.__config_object)
if r:
return r
try:
self.__client.get_value(self.__folder, key)
except Exception, e:
if str(e).split(":", 1)[0] == "Could not find the key specified":
return False
return True
class Keyring:
def __init__(self, parent=None):
"""Create a new Keyring object. This includes importing the keyring
module and connecting to the daemon.
@param parent: The parent applet of the keyring instance.
@type parent: L{Applet}
"""
if parent is not None:
self.__parent = parent
self.__parent.errors.module(globals(), "gnomekeyring")
else:
awn.check_dependencies(globals(), "gnomekeyring")
if not gnomekeyring.is_available():
raise KeyringError("Keyring not available")
keyring_list = gnomekeyring.list_keyring_names_sync()
if len(keyring_list) == 0:
raise KeyringError("No keyrings available")
try:
gnomekeyring.get_default_keyring_sync()
except gnomekeyring.NoKeyringDaemonError:
raise KeyringError("Had trouble connecting to daemon")
def new(self, keyring=None, name=None, pwd=None, attrs={}, type="generic"):
"""Create a new keyring key.
@param keyring: The keyring holding the key. If omitted, the default
keyring is returned.
@type keyring: C{string}
@param name: The display name of the key. If omitted, an empty key is
returned.
@type name: C{string}
@param pwd: The password stored in the key. If omitted, empty key is
returned.
@type pwd: C{string}
@param attrs: Other attributes stored in the key. By default: {}
@type attrs: C{dict}
@param type: The type of key. By default: "generic"
@type type: C{string}; "generic", "network", or "note"
@return: A new L{Key} object
@rtype: L{Key}
"""
k = self.Key(keyring)
if name and pwd:
k.set(k.keyring, name, pwd, attrs, type)
return k
def from_token(self, keyring, token):
"""Load the key with the given token. Note: If keyring is None, the
default keyring is used. However, this is not recommended.
@param keyring: The keyring holding the key. If omitted, the default
keyring is used.
@type keyring: C{string}
@param token: The password token of the key
@type token: C{int} or C{long}
@return: A new L{Key} object
@rtype: L{Key}
"""
k = self.Key(keyring, token)
keys = gnomekeyring.list_item_ids_sync(k.keyring)
if k.token not in keys:
raise KeyringNoMatchError("Token does not exist")
return k
class Key(object):
def __init__(self, keyring=None, token=0):
"""Create a new key. If keyring is None, the default keyring or
"login" keyring is used. Note: self.keyring will hold the name of
the keyring eventually used. To identify a key unambiguously
keyring name and token are needed.
@param keyring: The keyring holding the key. If omitted, the
default keyring is used.
@type keyring: C{string}
@param token: The token of an already-existing key. Optional.
@type token: C{long}
"""
keyring_list = gnomekeyring.list_keyring_names_sync()
if keyring is None:
keyring = gnomekeyring.get_default_keyring_sync()
if keyring is None:
if "login" in keyring_list:
keyring = "login"
else:
raise KeyringError("No default keyring set")
if keyring not in keyring_list:
raise KeyringNoMatchError("Keyring does not exist")
self.keyring = keyring
self.token = token
def set(self, keyring, name, pwd, attrs={}, type="generic"):
"""Create a new keyring key. Note that if another key
exists with the same name, it will be overwritten.
@param keyring: The keyring holding the key.
@type keyring: C{string}
@param name: The display name of the key.
@type name: C{string}
@param pwd: The password stored in the key.
@type pwd: C{string}
@param attrs: Other attributes stored in the key. By default: {}
@type attrs: C{dict}
@param type: The type of key. By default: "generic"
@type type: C{string}; "generic", "network", or "note"
"""
if type == "network":
type = gnomekeyring.ITEM_NETWORK_PASSWORD
elif type == "note":
type = gnomekeyring.ITEM_NOTE
else: # Generic included
type = gnomekeyring.ITEM_GENERIC_SECRET
try:
self.token = gnomekeyring.item_create_sync(keyring, type, \
name, attrs, pwd, True)
self.keyring = keyring
except gnomekeyring.CancelledError:
self.token = 0
def __unlock(self):
"""Unlock the key's keyring."""
info = gnomekeyring.get_info_sync(self.keyring)
if not info.get_is_locked():
return
# The straight way would be:
# gnomekeyring.unlock_sync(self.keyring, None)
# But this results in a type error, see launchpad bugs #432882.
# We create a dummy key instead, this triggers a user dialog to
# unlock the keyring. We delete the dummy key then immediately.
try:
tmp = gnomekeyring.item_create_sync(self.keyring, \
gnomekeyring.ITEM_GENERIC_SECRET, "awn-extras dummy", \
{"dummy_attr": "none"}, "dummy_pwd", True)
except gnomekeyring.CancelledError:
raise KeyringCancelledError("Operation cancelled by user")
try:
gnomekeyring.item_delete_sync(self.keyring, tmp)
except gnomekeyring.BadArgumentsError:
# Race condition if several applets use this method at once
pass
def delete(self):
"""Delete the current key. Will also reset the token. Note that
"del [Key]" will not delete the key itself; that would be too
destructive. delete() MUST be called manually.
"""
self.__unlock()
gnomekeyring.item_delete_sync(self.keyring, self.token)
self.token = 0
def __get(self):
self.__unlock()
return gnomekeyring.item_get_info_sync(self.keyring, self.token)
def __getAttrs(self):
self.__unlock()
return gnomekeyring.item_get_attributes_sync(self.keyring, self.token)
def __setAttrs(self, a):
self.__unlock()
return gnomekeyring.item_set_attributes_sync(self.keyring, self.token, a)
def __getName(self):
return self.__get().get_display_name()
def __setName(self, name):
info = self.__get()
info.set_display_name(name)
return gnomekeyring.item_set_info_sync(self.keyring, self.token, info)
def __getPass(self):
return self.__get().get_secret()
def __setPass(self, passwd):
info = self.__get()
info.set_secret(passwd)
return gnomekeyring.item_set_info_sync(self.keyring, self.token, info)
attrs = property(__getAttrs, __setAttrs)
"""
@ivar: The other attributes stored in the Key. Can be used like any
property.
"""
name = property(__getName, __setName)
"""
@ivar: The display name of the Key. Can be used like any property
"""
password = property(__getPass, __setPass)
"""
@ivar: The password stored in the Key. Can be used like any property.
"""
class Timing:
"""Provides utilities to register a function to be called periodically
or once after a specified delay.
"""
def __init__(self, parent):
"""Create a new Timing object.
@param parent: The parent applet of the timing instance.
@type parent: L{Applet}
"""
self.__parent = parent
def register(self, callback, seconds, start=True):
"""Register a function to be called periodically.
@param callback: Function to be called.
@type callback: C{function}
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
@param start: Whether to start the callback automatically
@type start: C{bool}
@return: A L{Callback} object for the C{callback} parameter
@rtype: L{Callback}
"""
def callback_wrapper():
callback()
return True
cb = self.Callback(callback_wrapper, seconds)
if start:
cb.start()
return cb
def delay(self, callback, seconds, start=True):
"""Delay the execution of the given callback.
@param callback: Function
@type callback: C{function}
@param seconds: Number of seconds to delay function call
@type seconds: C{float} or C{int}
@return: A L{Callback} object for the C{callback} parameter
@rtype: L{Callback}
"""
def callback_wrapper():
callback()
return False
cb = self.Callback(callback_wrapper, seconds)
if start:
cb.start()
return cb
class Callback:
"""Wrapper around a callback function to provide ways to start and
stop the function, to change the interval or to test if the callback
is scheduled to run.
"""
def __init__(self, callback, seconds):
"""Create a new C{Callback} object.
@param callback: The function to wrap the Callback around.
@type callback: C{function}
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
"""
assert seconds > 0.0
self.__callback = callback
self.__seconds = seconds
self.__timer_id = None
def is_started(self):
"""Return True if the callback has been scheduled to run after
each interval, False if the callback is stopped.
@return: True if the callback has been scheduled, False otherwise
@rtype: L{bool}
"""
return self.__timer_id is not None
def start(self):
"""Start executing the callback periodically.
@return: True if the callback was started, False otherwise
@rtype: L{bool}
"""
if self.__timer_id is not None:
return False
if int(self.__seconds) == self.__seconds:
self.__timer_id = glib.timeout_add_seconds(int(self.__seconds), self.__callback)
else:
self.__timer_id = glib.timeout_add(int(self.__seconds * 1000), self.__callback)
return True
def stop(self):
"""Stop the callback from running again if it was scheduled
to run.
@return: True if the callback was stopped, False otherwise
@rtype: L{bool}
"""
if self.__timer_id is None:
return False
glib.source_remove(self.__timer_id)
self.__timer_id = None
return True
def change_interval(self, seconds):
"""Change the interval and restart the callback if it was scheduled
to run.
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
"""
assert seconds > 0.0
self.__seconds = seconds
# Restart if the callback was scheduled to run
if self.stop():
self.start()
class Notify:
def __init__(self, parent):
"""Create a new Notify object.
@param parent: The parent applet of the notify instance.
@type parent: L{Applet}
"""
self.__parent = parent
awn.check_dependencies(globals(), "pynotify")
pynotify.init(parent.meta["short"])
def __del__(self):
pynotify.uninit()
def send(self, *args, **kwargs):
"""Show a new notification via libnotify.
@param subject: The subject of your message. If blank, "Message from
[applet name]" is used.
@type subject: C{string}
@param body: The main body of your message. Blank by default.
@type body: C{string}
@param icon: The full absolute path to the name of the icon to use.
@type icon: C{string}
@param timeout: Timeout in seconds after which the message closes
@type timeout: C{int}
"""
notification = self.Notification(self.__parent, *args, **kwargs)
notification.show()
def create(self, *args, **kwargs):
"""Return a notification that can be shown via show().
@param subject: The subject of your message. If blank, "Message from
[applet name]" is used.
@type subject: C{string}
@param body: The main body of your message. Blank by default.
@type body: C{string}
@param icon: The full absolute path to the name of the icon to use.
@type icon: C{string}
@param timeout: Timeout in seconds after which the message closes
@type timeout: C{int}
@return: a notification object
@rtype: C{self.Notification}
"""
return self.Notification(self.__parent, *args, **kwargs)
class Notification:
"""An object that manages a libnotify notification.
"""
def __init__(self, parent, subject=None, body="", icon="", timeout=0):
if subject is None:
subject = '"Message From %s"' % parent.meta["name"]
self.__notification = pynotify.Notification(subject, body, icon)
if timeout > 0:
self.__notification.set_timeout(timeout * 1000)
def show(self):
try:
self.__notification.show()
except glib.GError:
pass # Ignore error when no reply has been received
class Meta:
def __init__(self, parent, info={}, options=()):
"""Create a new Meta object.
@param parent: The parent applet of the meta instance.
@type parent: L{Applet}
@param info: Values for the meta dictionary
@type info: C{dict}
@param options: Options to set. Format:
(option", "option", ("option": True|False), ("option":
("suboption", "suboption", ("suboption": True|False), ...)))
"""
assert "name" in info
self.__parent = parent
self.__info = info
self.__options = options
def has_option(self, option):
"""Check if the applet has set a specific option.
@param option: Option to check
@type option: C{str}
"""
return option in self.__options
def __getitem__(self, key):
"""Get a key from the dictionary.
@param key: The key
@type key: C{string}
"""
return self.__info[key]
def keys(self):
"""Return a list of keys from the dictionary.
"""
return self.__info.keys()
def __contains__(self, key):
"""Return True if the dictionary contains the key, False otherwise.
@param key: The key
@type key: C{string}
"""
return key in self.__info
def _getmodule(module):
"""Return a getter that lazy-loads a module, represented by a
single instantiated class.
@param module: The class of the module to initialize and get
@type module: C{class}
"""
instance = {}
def getter(self):
key = (self, module)
if key not in instance:
instance[key] = module(self)
return instance[key]
return property(getter)
class Applet(object):
def __init__(self, meta, options):
"""Create a new instance of the Applet object.
@param meta: The meta information to be passed to the Meta constructor
@type meta: C{dict}
"""
# Create all required child-objects, others will be lazy-loaded
self.meta = Meta(self, meta, options)
def connect_size_changed(self, callback):
self.connect("size-changed", lambda w, e: callback())
settings = _getmodule(Settings)
timing = _getmodule(Timing)
keyring = _getmodule(Keyring)
notification = _getmodule(Notify)
class AppletSimple(awn.AppletSimple, Applet):
def __init__(self, uid, panel_id, meta={}, options=[]):
"""Create a new instance of the AppletSimple object.
@param uid: The unique identifier of the applet
@type uid: C{string}
@param panel_id: Identifier of the panel in which the applet resides.
@type panel_id: C{int}
"""
awn.AppletSimple.__init__(self, meta["short"], uid, panel_id)
Applet.__init__(self, meta, options)
# Create all required child-objects, others will be lazy-loaded
self.tooltip = Tooltip(self)
self.dialog = Dialogs(self)
self.icon = Icon(self)
self.dialog.connect_signals(self)
theme = _getmodule(Theme)
errors = _getmodule(Errors)
class AppletMultiple(awn.Applet, Applet):
def __init__(self, uid, panel_id, meta={}, options=[]):
"""Create a new instance of the AppletMultiple object.
@param uid: The unique identifier of the applet
@type uid: C{string}
@param panel_id: Identifier of the panel in which the applet resides.
@type panel_id: C{int}
"""
awn.Applet.__init__(self, meta["short"], uid, panel_id)
Applet.__init__(self, meta, options)
# Create all required child-objects, others will be lazy-loaded
self.icons = Icons(self)
self.dialog = Dialogs(self)
def init_start(applet_class, meta={}, options=[]):
"""Do the work to create a new applet, and then start the applet.
This makes the icon appear on the bar and starts GTK+.
The callable applet_class parameter is called and given an instance of
C{Applet}. It can then set an icon, tooltip, dialogs, and other things,
before GTK+ starts, which makes the icon appear on the AWN panel.
@param applet_class A callable, used to do some initialization
@type applet_class: C{callable}
@param meta: The meta-information to pass to the constructor
@type meta: C{dict}
@param options: Options to set for the new applet
@type options: C{list} or C{tuple}
@return: The newly created applet.
@rtype: L{Applet}
"""
assert callable(applet_class)
glib.threads_init()
awn.init(sys.argv[1:])
if "multiple-icons" in options:
applet = AppletMultiple(awn.uid, awn.panel_id, meta, options)
else:
applet = AppletSimple(awn.uid, awn.panel_id, meta, options)
try:
applet_class(applet)
except Exception, e:
# TODO don't know what to do for multiple-icons applets
if "multiple-icons" not in options:
applet.errors.set_error_icon_and_click_to_restart()
import traceback
traceback = traceback.format_exception(type(e), e, sys.exc_traceback)
applet.errors.general(e, traceback=traceback, callback=gtk.main_quit)
else:
raise
awn.embed_applet(applet)
gtk.main()
| p12tic/awn-extras | shared/python/awnlib.py | Python | gpl-2.0 | 54,312 |
##########################################################################
#
# Copyright 2012-2015 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from d3d9 import *
HRESULT = MAKE_HRESULT(errors = [
"DXVA2_E_NOT_INITIALIZED",
"DXVA2_E_NEW_VIDEO_DEVICE",
"DXVA2_E_VIDEO_DEVICE_LOCKED",
"DXVA2_E_NOT_AVAILABLE",
])
DXVA2_SampleFormat = FakeEnum(UINT, [
"DXVA2_SampleUnknown",
"DXVA2_SampleProgressiveFrame",
"DXVA2_SampleFieldInterleavedEvenFirst",
"DXVA2_SampleFieldInterleavedOddFirst",
"DXVA2_SampleFieldSingleEven",
"DXVA2_SampleFieldSingleOdd",
"DXVA2_SampleSubStream",
])
DXVA2_VideoChromaSubSampling = FakeEnum(UINT, [
"DXVA2_VideoChromaSubsampling_Unknown",
"DXVA2_VideoChromaSubsampling_ProgressiveChroma",
"DXVA2_VideoChromaSubsampling_Horizontally_Cosited",
"DXVA2_VideoChromaSubsampling_Vertically_Cosited",
"DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes",
"DXVA2_VideoChromaSubsampling_MPEG2",
"DXVA2_VideoChromaSubsampling_MPEG1",
"DXVA2_VideoChromaSubsampling_DV_PAL",
"DXVA2_VideoChromaSubsampling_Cosited",
])
DXVA2_NominalRange = FakeEnum(UINT, [
"DXVA2_NominalRange_Unknown",
"DXVA2_NominalRange_Normal",
"DXVA2_NominalRange_Wide",
"DXVA2_NominalRange_0_255",
"DXVA2_NominalRange_16_235",
"DXVA2_NominalRange_48_208",
])
DXVA2_VideoTransferMatrix = FakeEnum(UINT, [
"DXVA2_VideoTransferMatrix_Unknown",
"DXVA2_VideoTransferMatrix_BT709",
"DXVA2_VideoTransferMatrix_BT601",
"DXVA2_VideoTransferMatrix_SMPTE240M",
])
DXVA2_VideoLighting = FakeEnum(UINT, [
"DXVA2_VideoLighting_Unknown",
"DXVA2_VideoLighting_bright",
"DXVA2_VideoLighting_office",
"DXVA2_VideoLighting_dim",
"DXVA2_VideoLighting_dark",
])
DXVA2_VideoPrimaries = FakeEnum(UINT, [
"DXVA2_VideoPrimaries_Unknown",
"DXVA2_VideoPrimaries_reserved",
"DXVA2_VideoPrimaries_BT709",
"DXVA2_VideoPrimaries_BT470_2_SysM",
"DXVA2_VideoPrimaries_BT470_2_SysBG",
"DXVA2_VideoPrimaries_SMPTE170M",
"DXVA2_VideoPrimaries_SMPTE240M",
"DXVA2_VideoPrimaries_EBU3213",
"DXVA2_VideoPrimaries_SMPTE_C",
])
DXVA2_VideoTransferFunction = FakeEnum(UINT, [
"DXVA2_VideoTransFunc_Unknown",
"DXVA2_VideoTransFunc_10",
"DXVA2_VideoTransFunc_18",
"DXVA2_VideoTransFunc_20",
"DXVA2_VideoTransFunc_22",
"DXVA2_VideoTransFunc_709",
"DXVA2_VideoTransFunc_240M",
"DXVA2_VideoTransFunc_sRGB",
"DXVA2_VideoTransFunc_28",
])
DXVA2_ExtendedFormat = Struct("DXVA2_ExtendedFormat", [
(DXVA2_SampleFormat, "SampleFormat"),
(DXVA2_VideoChromaSubSampling, "VideoChromaSubsampling"),
(DXVA2_NominalRange, "NominalRange"),
(DXVA2_VideoTransferMatrix, "VideoTransferMatrix"),
(DXVA2_VideoLighting, "VideoLighting"),
(DXVA2_VideoPrimaries, "VideoPrimaries"),
(DXVA2_VideoTransferFunction, "VideoTransferFunction"),
])
DXVA2_Frequency = Struct("DXVA2_Frequency", [
(UINT, "Numerator"),
(UINT, "Denominator"),
])
DXVA2_VideoDesc = Struct("DXVA2_VideoDesc", [
(UINT, "SampleWidth"),
(UINT, "SampleHeight"),
(DXVA2_ExtendedFormat, "SampleFormat"),
(D3DFORMAT, "Format"),
(DXVA2_Frequency, "InputSampleFreq"),
(DXVA2_Frequency, "OutputFrameFreq"),
(UINT, "UABProtectionLevel"),
(UINT, "Reserved"),
])
DXVA2_DeinterlaceTech = Flags(UINT, [
"DXVA2_DeinterlaceTech_Unknown",
"DXVA2_DeinterlaceTech_BOBLineReplicate",
"DXVA2_DeinterlaceTech_BOBVerticalStretch",
"DXVA2_DeinterlaceTech_BOBVerticalStretch4Tap",
"DXVA2_DeinterlaceTech_MedianFiltering",
"DXVA2_DeinterlaceTech_EdgeFiltering",
"DXVA2_DeinterlaceTech_FieldAdaptive",
"DXVA2_DeinterlaceTech_PixelAdaptive",
"DXVA2_DeinterlaceTech_MotionVectorSteered",
"DXVA2_DeinterlaceTech_InverseTelecine",
])
DXVA2_Filter = Enum("DXVA2_Filter", [
"DXVA2_NoiseFilterLumaLevel",
"DXVA2_NoiseFilterLumaThreshold",
"DXVA2_NoiseFilterLumaRadius",
"DXVA2_NoiseFilterChromaLevel",
"DXVA2_NoiseFilterChromaThreshold",
"DXVA2_NoiseFilterChromaRadius",
"DXVA2_DetailFilterLumaLevel",
"DXVA2_DetailFilterLumaThreshold",
"DXVA2_DetailFilterLumaRadius",
"DXVA2_DetailFilterChromaLevel",
"DXVA2_DetailFilterChromaThreshold",
"DXVA2_DetailFilterChromaRadius",
])
DXVA2_NoiseFilterTech = Flags(UINT, [
"DXVA2_NoiseFilterTech_Unsupported",
"DXVA2_NoiseFilterTech_Unknown",
"DXVA2_NoiseFilterTech_Median",
"DXVA2_NoiseFilterTech_Temporal",
"DXVA2_NoiseFilterTech_BlockNoise",
"DXVA2_NoiseFilterTech_MosquitoNoise",
])
DXVA2_DetailFilterTech = Flags(UINT, [
"DXVA2_DetailFilterTech_Unsupported",
"DXVA2_DetailFilterTech_Unknown",
"DXVA2_DetailFilterTech_Edge",
"DXVA2_DetailFilterTech_Sharpening",
])
DXVA2_ProcAmp = Flags(UINT, [
"DXVA2_ProcAmp_None",
"DXVA2_ProcAmp_Brightness",
"DXVA2_ProcAmp_Contrast",
"DXVA2_ProcAmp_Hue",
"DXVA2_ProcAmp_Saturation",
])
DXVA2_VideoProcess = Flags(UINT, [
"DXVA2_VideoProcess_None",
"DXVA2_VideoProcess_YUV2RGB",
"DXVA2_VideoProcess_StretchX",
"DXVA2_VideoProcess_StretchY",
"DXVA2_VideoProcess_AlphaBlend",
"DXVA2_VideoProcess_SubRects",
"DXVA2_VideoProcess_SubStreams",
"DXVA2_VideoProcess_SubStreamsExtended",
"DXVA2_VideoProcess_YUV2RGBExtended",
"DXVA2_VideoProcess_AlphaBlendExtended",
"DXVA2_VideoProcess_Constriction",
"DXVA2_VideoProcess_NoiseFilter",
"DXVA2_VideoProcess_DetailFilter",
"DXVA2_VideoProcess_PlanarAlpha",
"DXVA2_VideoProcess_LinearScaling",
"DXVA2_VideoProcess_GammaCompensated",
"DXVA2_VideoProcess_MaintainsOriginalFieldData",
])
DXVA2_VPDev = Flags(UINT, [
"DXVA2_VPDev_HardwareDevice",
"DXVA2_VPDev_EmulatedDXVA1",
"DXVA2_VPDev_SoftwareDevice",
])
DXVA2_SampleData = Flags(UINT, [
"DXVA2_SampleData_RFF",
"DXVA2_SampleData_TFF",
"DXVA2_SampleData_RFF_TFF_Present",
])
DXVA2_DestData = Flags(UINT, [
"DXVA2_DestData_RFF",
"DXVA2_DestData_TFF",
"DXVA2_DestData_RFF_TFF_Present",
])
DXVA2_VideoProcessorCaps = Struct("DXVA2_VideoProcessorCaps", [
(DXVA2_VPDev, "DeviceCaps"),
(D3DPOOL, "InputPool"),
(UINT, "NumForwardRefSamples"),
(UINT, "NumBackwardRefSamples"),
(UINT, "Reserved"),
(DXVA2_DeinterlaceTech, "DeinterlaceTechnology"),
(DXVA2_ProcAmp, "ProcAmpControlCaps"),
(DXVA2_VideoProcess, "VideoProcessorOperations"),
(DXVA2_NoiseFilterTech, "NoiseFilterTechnology"),
(DXVA2_DetailFilterTech, "DetailFilterTechnology"),
])
DXVA2_Fixed32 = Struct("DXVA2_Fixed32", [
(USHORT, "Fraction"),
(SHORT, "Value"),
])
DXVA2_AYUVSample8 = Struct("DXVA2_AYUVSample8", [
(UCHAR, "Cr"),
(UCHAR, "Cb"),
(UCHAR, "Y"),
(UCHAR, "Alpha"),
])
DXVA2_AYUVSample16 = Struct("DXVA2_AYUVSample16", [
(USHORT, "Cr"),
(USHORT, "Cb"),
(USHORT, "Y"),
(USHORT, "Alpha"),
])
REFERENCE_TIME = Alias("REFERENCE_TIME", LONGLONG)
DXVA2_VideoSample = Struct("DXVA2_VideoSample", [
(REFERENCE_TIME, "Start"),
(REFERENCE_TIME, "End"),
(DXVA2_ExtendedFormat, "SampleFormat"),
(ObjPointer(IDirect3DSurface9), "SrcSurface"),
(RECT, "SrcRect"),
(RECT, "DstRect"),
(Array(DXVA2_AYUVSample8, 16), "Pal"),
(DXVA2_Fixed32, "PlanarAlpha"),
(DWORD, "SampleData"),
])
DXVA2_ValueRange = Struct("DXVA2_ValueRange", [
(DXVA2_Fixed32, "MinValue"),
(DXVA2_Fixed32, "MaxValue"),
(DXVA2_Fixed32, "DefaultValue"),
(DXVA2_Fixed32, "StepSize"),
])
DXVA2_ProcAmpValues = Struct("DXVA2_ProcAmpValues", [
(DXVA2_Fixed32, "Brightness"),
(DXVA2_Fixed32, "Contrast"),
(DXVA2_Fixed32, "Hue"),
(DXVA2_Fixed32, "Saturation"),
])
DXVA2_FilterValues = Struct("DXVA2_FilterValues", [
(DXVA2_Fixed32, "Level"),
(DXVA2_Fixed32, "Threshold"),
(DXVA2_Fixed32, "Radius"),
])
DXVA2_VideoProcessBltParams = Struct("DXVA2_VideoProcessBltParams", [
(REFERENCE_TIME, "TargetFrame"),
(RECT, "TargetRect"),
(SIZE, "ConstrictionSize"),
(UINT, "StreamingFlags"),
(DXVA2_AYUVSample16, "BackgroundColor"),
(DXVA2_ExtendedFormat, "DestFormat"),
(DXVA2_ProcAmpValues, "ProcAmpValues"),
(DXVA2_Fixed32, "Alpha"),
(DXVA2_FilterValues, "NoiseFilterLuma"),
(DXVA2_FilterValues, "NoiseFilterChroma"),
(DXVA2_FilterValues, "DetailFilterLuma"),
(DXVA2_FilterValues, "DetailFilterChroma"),
(DWORD, "DestData"),
])
DXVA2_BufferType = FakeEnum(UINT, [
"DXVA2_PictureParametersBufferType",
"DXVA2_MacroBlockControlBufferType",
"DXVA2_ResidualDifferenceBufferType",
"DXVA2_DeblockingControlBufferType",
"DXVA2_InverseQuantizationMatrixBufferType",
"DXVA2_SliceControlBufferType",
"DXVA2_BitStreamDateBufferType",
"DXVA2_MotionVectorBuffer",
"DXVA2_FilmGrainBuffer",
])
DXVA2_Type = FakeEnum(DWORD, [
"DXVA2_VideoDecoderRenderTarget",
"DXVA2_VideoProcessorRenderTarget",
"DXVA2_VideoSoftwareRenderTarget",
])
DXVA2_ConfigPictureDecode = Struct("DXVA2_ConfigPictureDecode", [
(GUID, "guidConfigBitstreamEncryption"),
(GUID, "guidConfigMBcontrolEncryption"),
(GUID, "guidConfigResidDiffEncryption"),
(UINT, "ConfigBitstreamRaw"),
(UINT, "ConfigMBcontrolRasterOrder"),
(UINT, "ConfigResidDiffHost"),
(UINT, "ConfigSpatialResid8"),
(UINT, "ConfigResid8Subtraction"),
(UINT, "ConfigSpatialHost8or9Clipping"),
(UINT, "ConfigSpatialResidInterleaved"),
(UINT, "ConfigIntraResidUnsigned"),
(UINT, "ConfigResidDiffAccelerator"),
(UINT, "ConfigHostInverseScan"),
(UINT, "ConfigSpecificIDCT"),
(UINT, "Config4GroupedCoefs"),
(USHORT, "ConfigMinRenderTargetBuffCount"),
(USHORT, "ConfigDecoderSpecific"),
])
DXVA2_AES_CTR_IV = Struct("DXVA2_AES_CTR_IV", [
(UINT64, "IV"),
(UINT64, "Count"),
])
DXVA2_DecodeBufferDesc = Struct("DXVA2_DecodeBufferDesc", [
(DXVA2_BufferType, "CompressedBufferType"),
(UINT, "BufferIndex"),
(UINT, "DataOffset"),
(UINT, "DataSize"),
(UINT, "FirstMBaddress"),
(UINT, "NumMBsInBuffer"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Stride"),
(UINT, "ReservedBits"),
(Blob(VOID, "sizeof(DXVA2_AES_CTR_IV)"), "pvPVPState"),
])
DXVA2_DecodeExtensionData = Struct("DXVA2_DecodeExtensionData", [
(UINT, "Function"),
(Blob(Void, "{self}.PrivateInputDataSize"), "pPrivateInputData"),
(UINT, "PrivateInputDataSize"),
(Blob(Void, "{self}.PrivateOutputDataSize"), "pPrivateOutputData"),
(UINT, "PrivateOutputDataSize"),
])
DXVA2_DecodeExecuteParams = Struct("DXVA2_DecodeExecuteParams", [
(UINT, "NumCompBuffers"),
(Array(DXVA2_DecodeBufferDesc, "{self}.NumCompBuffers"), "pCompressedBuffers"),
(Pointer(DXVA2_DecodeExtensionData), "pExtensionData"),
])
RESET_TOKEN = Handle("resetToken", UINT)
IDirect3DDeviceManager9 = Interface("IDirect3DDeviceManager9", IUnknown)
IDirectXVideoAccelerationService = Interface("IDirectXVideoAccelerationService", IUnknown)
IDirectXVideoDecoderService = Interface("IDirectXVideoDecoderService", IDirectXVideoAccelerationService)
IDirectXVideoProcessorService = Interface("IDirectXVideoProcessorService", IDirectXVideoAccelerationService)
IDirectXVideoDecoder = Interface("IDirectXVideoDecoder", IUnknown)
IDirectXVideoProcessor = Interface("IDirectXVideoProcessor", IUnknown)
IDirect3DDeviceManager9.methods += [
StdMethod(HRESULT, "ResetDevice", [(ObjPointer(IDirect3DDevice9), "pDevice"), (RESET_TOKEN, "resetToken")]),
StdMethod(HRESULT, "OpenDeviceHandle", [Out(Pointer(HANDLE), "phDevice")]),
StdMethod(HRESULT, "CloseDeviceHandle", [(HANDLE, "hDevice")]),
StdMethod(HRESULT, "TestDevice", [(HANDLE, "hDevice")]),
StdMethod(HRESULT, "LockDevice", [(HANDLE, "hDevice"), Out(Pointer(ObjPointer(IDirect3DDevice9)), "ppDevice"), (BOOL, "fBlock")]),
StdMethod(HRESULT, "UnlockDevice", [(HANDLE, "hDevice"), (BOOL, "fSaveState")]),
StdMethod(HRESULT, "GetVideoService", [(HANDLE, "hDevice"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppService")]),
]
IDirectXVideoAccelerationService.methods += [
StdMethod(HRESULT, "CreateSurface", [(UINT, "Width"), (UINT, "Height"), (UINT, "BackBuffers"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), (D3DUSAGE, "Usage"), (DXVA2_Type, "DxvaType"), Out(Array(ObjPointer(IDirect3DSurface9), "1 + BackBuffers"), "ppSurface"), Out(Pointer(HANDLE), "pSharedHandle")]),
]
IDirectXVideoDecoderService.methods += [
StdMethod(HRESULT, "GetDecoderDeviceGuids", [Out(Pointer(UINT), "pCount"), Out(Pointer(Array(GUID, "pCount ? *pCount : 0")), "pGuids")], sideeffects=False),
StdMethod(HRESULT, "GetDecoderRenderTargets", [(REFGUID, "Guid"), Out(Pointer(UINT), "pCount"), Out(Pointer(Array(D3DFORMAT, "pCount ? *pCount : 0")), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "GetDecoderConfigurations", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (OpaquePointer(Void), "pReserved"), Out(Pointer(UINT), "pCount"), Out(Pointer(Array(DXVA2_ConfigPictureDecode, "pCount ? *pCount : 0")), "ppConfigs")], sideeffects=False),
StdMethod(HRESULT, "CreateVideoDecoder", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (Pointer(Const(DXVA2_ConfigPictureDecode)), "pConfig"), (Array(ObjPointer(IDirect3DSurface9), "NumRenderTargets"), "ppDecoderRenderTargets"), (UINT, "NumRenderTargets"), Out(Pointer(ObjPointer(IDirectXVideoDecoder)), "ppDecode")]),
]
IDirectXVideoProcessorService.methods += [
StdMethod(HRESULT, "RegisterVideoProcessorSoftwareDevice", [(OpaquePointer(Void), "pCallbacks")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorDeviceGuids", [(Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount"), Out(Pointer(Array(GUID, "pCount ? *pCount : 0")), "pGuids")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorRenderTargets", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount"), Out(Pointer(Array(D3DFORMAT, "pCount ? *pCount : 0")), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorSubStreamFormats", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "RenderTargetFormat"), Out(Pointer(UINT), "pCount"), Out(Pointer(Array(D3DFORMAT, "pCount ? *pCount : 0")), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorCaps", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "RenderTargetFormat"), Out(Pointer(DXVA2_VideoProcessorCaps), "pCaps")], sideeffects=False),
StdMethod(HRESULT, "GetProcAmpRange", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "RenderTargetFormat"), (UINT, "ProcAmpCap"), Out(Pointer(DXVA2_ValueRange), "pRange")], sideeffects=False),
StdMethod(HRESULT, "GetFilterPropertyRange", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "RenderTargetFormat"), (UINT, "FilterSetting"), Out(Pointer(DXVA2_ValueRange), "pRange")], sideeffects=False),
StdMethod(HRESULT, "CreateVideoProcessor", [(REFGUID, "VideoProcDeviceGuid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "RenderTargetFormat"), (UINT, "MaxNumSubStreams"), Out(Pointer(ObjPointer(IDirectXVideoProcessor)), "ppVidProcess")]),
]
IDirectXVideoDecoder.methods += [
StdMethod(HRESULT, "GetVideoDecoderService", [Out(Pointer(ObjPointer(IDirectXVideoDecoderService)), "ppService")]),
StdMethod(HRESULT, "GetCreationParameters", [Out(Pointer(GUID), "pDeviceGuid"), Out(Pointer(DXVA2_VideoDesc), "pVideoDesc"), Out(Pointer(DXVA2_ConfigPictureDecode), "pConfig"), Out(Pointer(Pointer(ObjPointer(IDirect3DSurface9))), "pDecoderRenderTargets"), Out(Pointer(UINT), "pNumSurfaces")]),
StdMethod(HRESULT, "GetBuffer", [(DXVA2_BufferType, "BufferType"), Out(Pointer(LinearPointer(Void, "*pBufferSize")), "ppBuffer"), Out(Pointer(UINT), "pBufferSize")]),
StdMethod(HRESULT, "ReleaseBuffer", [(DXVA2_BufferType, "BufferType")]),
StdMethod(HRESULT, "BeginFrame", [(ObjPointer(IDirect3DSurface9), "pRenderTarget"), (Blob(Void, 16), "pvPVPData")]),
StdMethod(HRESULT, "EndFrame", [Out(Pointer(HANDLE), "pHandleComplete")]),
StdMethod(HRESULT, "Execute", [(Pointer(Const(DXVA2_DecodeExecuteParams)), "pExecuteParams")]),
]
IDirectXVideoProcessor.methods += [
StdMethod(HRESULT, "GetVideoProcessorService", [Out(Pointer(ObjPointer(IDirectXVideoProcessorService)), "ppService")]),
StdMethod(HRESULT, "GetCreationParameters", [Out(Pointer(GUID), "pDeviceGuid"), Out(Pointer(DXVA2_VideoDesc), "pVideoDesc"), Out(Pointer(D3DFORMAT), "pRenderTargetFormat"), Out(Pointer(UINT), "pMaxNumSubStreams")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorCaps", [Out(Pointer(DXVA2_VideoProcessorCaps), "pCaps")], sideeffects=False),
StdMethod(HRESULT, "GetProcAmpRange", [(UINT, "ProcAmpCap"), Out(Pointer(DXVA2_ValueRange), "pRange")], sideeffects=False),
StdMethod(HRESULT, "GetFilterPropertyRange", [(UINT, "FilterSetting"), Out(Pointer(DXVA2_ValueRange), "pRange")], sideeffects=False),
StdMethod(HRESULT, "VideoProcessBlt", [(ObjPointer(IDirect3DSurface9), "pRenderTarget"), (Pointer(Const(DXVA2_VideoProcessBltParams)), "pBltParams"), (Array(Const(DXVA2_VideoSample), "NumSamples"), "pSamples"), (UINT, "NumSamples"), Out(Pointer(HANDLE), "pHandleComplete")]),
]
DXVA2_SurfaceType = Enum("DXVA2_SurfaceType", [
"DXVA2_SurfaceType_DecoderRenderTarget",
"DXVA2_SurfaceType_ProcessorRenderTarget",
"DXVA2_SurfaceType_D3DRenderTargetTexture",
])
IDirectXVideoMemoryConfiguration = Interface("IDirectXVideoMemoryConfiguration", IUnknown)
IDirectXVideoMemoryConfiguration.methods += [
StdMethod(HRESULT, "GetAvailableSurfaceTypeByIndex", [(DWORD, "dwTypeIndex"), Out(Pointer(DXVA2_SurfaceType), "pdwType")], sideeffects=False),
StdMethod(HRESULT, "SetSurfaceType", [(DXVA2_SurfaceType, "dwType")]),
]
dxva2 = Module("dxva2")
dxva2.addInterfaces([
IDirectXVideoAccelerationService,
IDirectXVideoDecoderService,
IDirectXVideoProcessorService,
IDirectXVideoMemoryConfiguration,
])
dxva2.addFunctions([
StdFunction(HRESULT, "DXVA2CreateDirect3DDeviceManager9", [Out(Pointer(RESET_TOKEN), "pResetToken"), Out(Pointer(ObjPointer(IDirect3DDeviceManager9)), "ppDeviceManager")]),
StdFunction(HRESULT, "DXVA2CreateVideoService", [(ObjPointer(IDirect3DDevice9), "pDD"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppService")]),
])
| schulmar/apitrace | specs/dxva2.py | Python | mit | 19,676 |
"""
A number of Cocoa API's have a 'context' argument that is a plain 'void*'
in ObjC, and an Integer value in Python. The 'context' object defined here
allows you to get a unique integer number that can be used as the context
argument for any Python object, and retrieve that object later on using the
context number.
Usage::
...
ctx = objc.context.register(myContext)
someObject.observeValueForKeyPath_ofObject_change_context_(
kp, obj, {}, ctx)
...
and in the callback::
def observeValueForKeyPath_ofObject_change_context_(self,
kp, obj, change, ctx):
myContext = objc.context.get(ctx)
...
Use ``objc.context.unregister`` to remove the registration of ``myObject``
when you're done. The argument to unregister is the same object as was
passed in during registration.
"""
__all__ = ('context',)
class ContextRegistry (object):
def __init__(self):
self._registry = {}
def register(self, object):
uniq = id(object)
self._registry[uniq] = object
return uniq
def unregister(self, object):
try:
del self._registry[id(object)]
except KeyError:
pass
def get(self, uniq):
return self._registry[uniq]
context = ContextRegistry()
| albertz/music-player | mac/pyobjc-core/Lib/objc/_context.py | Python | bsd-2-clause | 1,283 |
# -*- coding: utf-8 -*-
"""
This file is part of SimpleFSM.
SimpleFSM is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SimpleFSM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with SimpleFSM. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014 Lucas Liendo.
"""
from abc import ABCMeta, abstractmethod
from exceptions import *
class State(object):
"""
The State class models a defined state.
To create a new state an id must be supplied to identify it among other
states. Two other keyword arguments can be supplied to identify if the
state is a start state and/or a final state.
Note that at least a final state is needed between all states and just
only one start state must be established among all states.
"""
def __init__(self, id, start_state=False, final_state=False):
self._id = id
self._start_state = start_state
self._final_state = final_state
@property
def id(self):
"""Returns the id of the state."""
return self._id
@property
def start_state(self):
"""Returns True if the state is marked as a start state."""
return self._start_state
@start_state.setter
def start_state(self, start_state):
self._start_state = start_state
@property
def final_state(self):
"""Returns True if the state is marked as a final state."""
return self._final_state
@final_state.setter
def final_state(self, final_state):
self._final_state = final_state
def transit(self, fsm):
"""
This method is automatically called from SimpleFSM and performs
the transition from one state to another provided that a transition
match applies otherwise a FSMRejectedInput is raised.
"""
symbol = fsm.read_symbol()
try:
transition = [t for t in fsm.transitions if t.from_state.id == self.id and t.accepts(symbol)].pop()
except IndexError:
raise FSMRejectedInput([symbol])
fsm.current_state = transition.to_state
return symbol
def __eq__(self, other):
return self.id == other.id
class Transition(object):
"""
The Transition class models a transition between two given states.
To create a new transition three mandatory arguments must be supplied :
from_state : The state from which you want to transit.
to_state : The state you want to transit to.
transition_function : The function used to actually test if a symbol matches
the transition. This function must take only the symbol to be tested.
"""
def __init__(self, from_state, to_state, transition_function):
self._from_state = from_state
self._to_state = to_state
self._transition_function = transition_function
@property
def from_state(self):
"""Returns the state from which this transition should transit."""
return self._from_state
@property
def to_state(self):
"""Returns the state from which this transition should transit to."""
return self._to_state
@property
def transition_function(self):
"""Returns the transition function used by a Transition object."""
return self._transition_function
def accepts(self, symbol):
"""
Returns True if the read symbol is accepted by the transition function.
"""
return self._transition_function(symbol)
def __eq__(self, other):
return self.from_state == other.from_state \
and self.to_state == other.to_state \
and self.transition_function == other.transition_function
class SimpleFSM(object):
"""
The SimpleFSM class models a finite state machine. To use this class
you must create a custom class that inherits from SimpleFSM and implement
the read_symbol() method. This method is responsible for returning a symbol
each time is called. This symbol is then tested to check if it's actually
accepted by the FSM.
Typically you would instantiate a set of States and Transitions. After
this is done you instantiate your custom-implemented FSM and add all the
states and transitions.
After your custom-implemented FSM is built you should call the run()
method. If the word is recognized a list with all the accepted symbols
is returned otherwise a FSMRejectedInput is raised.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._states = []
self._transitions = []
self._accepted_symbols = []
self._final_states = None
self._current_state = None
self._remaining_input = True
@property
def transitions(self):
"""Returns a list containing all the defined transitions for this FSM."""
return self._transitions
@property
def current_state(self):
return self._current_state
@current_state.setter
def current_state(self, state):
self._current_state = state
def add_state(self, state):
"""
Adds a new state to the FSM. If the supplied state already exists
a FSMDuplicatedState exception is raised.
"""
if state in self._states:
raise FSMDuplicatedState(state)
self._states.append(state)
def add_states(self, states):
"""
Adds a set of states to the FSM. If one of the states is already
present a FSMDuplicatedState exception is raised.
"""
[self.add_state(s) for s in states]
def add_transition(self, transition):
"""
Adds a new transition to this FSM. If the supplied transition already
exists a FSMDuplicatedTransition exception is raised.
"""
if transition in self._transitions:
raise FSMDuplicatedTransition(transition)
self._transitions.append(transition)
def add_transitions(self, transitions):
"""
Adds a set of transitions to the FSM. If one of the transitions is
already present a FSMDuplicatedTransition exception is raised.
"""
[self.add_transition(t) for t in transitions]
def pre_transit(self):
"""
This method is called just before a transition is performed.
You may optionally implement this method.
"""
pass
@abstractmethod
def read_symbol(self):
"""
Abstract method that must be implemented by the user. When there
is no more input a FSMEndOfInput exception should be raised
to notify the FSM that no more input is available.
"""
raise FSMNotImplementedInput()
def post_transit(self):
"""
This method is called after a sucessfull transition between two
states is performed. You may optionally implement this method.
"""
pass
def _set_initial_state(self):
start_state = [s for s in self._states if s.start_state]
if len(start_state) > 1:
raise FSMStartStatesError()
try:
self._current_state = start_state.pop()
except IndexError:
raise FSMNoStartStateError()
def _set_final_states(self):
self._final_states = [s for s in self._states if s.final_state]
if not self._final_states:
raise FSMFinalStateError()
def _set_states(self):
self._accepted_symbols = []
self._remaining_input = True
self._set_initial_state()
self._set_final_states()
def run(self):
"""
Starts the FSM. Returns a list containing the accepted symbols
otherwise a FSMRejectedInput exception is raised.
"""
self._set_states()
while self._remaining_input:
try:
self.pre_transit()
self._accepted_symbols.append(self._current_state.transit(self))
self.post_transit()
except FSMEndOfInput:
self._remaining_input = False
if self.current_state not in self._final_states:
raise FSMRejectedInput(self._accepted_symbols, type='string')
return self._accepted_symbols
| lliendo/SimpleFSM | simplefsm/__init__.py | Python | lgpl-3.0 | 8,624 |
#!/usr/bin/python
import sys
def word_count():
#lines_words = []
new_list = []
file = sys.argv[1]
my_dict = {}
# try:
fh = open(file,'r')
for lines in fh:
#print lines
lines = lines.strip()
#lines = lines.lower()
words = lines.split()
for word in words:
my_dict[word] = my_dict.get(word, 0) + 1
lines_words = list(my_dict.items())
fh.close()
#print my_dict
for k,v in lines_words:
new_list.append((v,k))
#print new_list
new_list.sort()
my_rank = len(new_list)
for i in range(len(new_list)):
print my_rank, "-" , new_list[i][1] , "(times: %d) " %(new_list[i][0])
my_rank -= 1
word_count()
| hiteshagrawal/python | info/bkcom/pack.py | Python | gpl-2.0 | 626 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Breton():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Breton"])) | xbmcmegapack/plugin.video.megapack.dev | resources/lib/menus/home_languages_breton.py | Python | gpl-3.0 | 1,109 |
ConnectionError = None
| vsquare95/JiyuuBot | dummympd.py | Python | gpl-3.0 | 23 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostPlugStoreTopologyPath(vim, *args, **kwargs):
'''This data object type is an association class that describes a Path and its
associated Device. A Path may be claimed by at most one Device.'''
obj = vim.client.factory.create('ns0:HostPlugStoreTopologyPath')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'key', 'name' ]
optional = [ 'adapter', 'channelNumber', 'device', 'lunNumber', 'target', 'targetNumber',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| xuru/pyvisdk | pyvisdk/do/host_plug_store_topology_path.py | Python | mit | 1,194 |
import scipy.stats as ss
import numpy as np
#import statsmodels.api as sm
### Tests on residuals
def normal_Kolmogorov_Smirnov(sample):
"""The moon illumination expressed as a percentage.
:param astropy sun: the sun ephemeris
:param astropy moon: the moon ephemeris
:return: a numpy array like indicated the moon illumination.
:rtype: array_like
"""
mu, sigma = ss.norm.fit(sample)
#use mu sigma for anomaly, 0,1 for rescaling???
KS_stat, KS_pvalue = ss.kstest(sample, 'norm', args=(0, 1))
# the sample is likely Gaussian-like if KS_stat (~ maximum distance between sample and theoritical distribution) -> 0
# the null hypothesis can not be rejected ( i.e the distribution of sample come from a Gaussian) if KS_pvalue -> 1
KS_judgement = 0
if KS_pvalue > 0.01:
KS_judgement = 1
if KS_pvalue > 0.05:
KS_judgement = 2
return KS_stat, KS_pvalue, KS_judgement
def normal_Anderson_Darling(sample):
"""Compute a Anderson-Darling test on the sample versus a normal distribution with mu = 0, sigma = 1
:param array_like sample: the sample you want to check the "Gaussianity"
:returns: the Anderson-Darling statistic, the Anderson-Darling critical values associated to the significance
level of 15 % and the Anderson-Darling judgement
:rtype: float, array_like, array_like
"""
AD_stat, AD_critical_values, AD_significance_levels = ss.anderson(sample)
# the sample is likely Gaussian-like if AD_stat (~ maximum distance between sample and theoritical distribution) -> 0
# the null hypothesis can not be rejected ( i.e the distribution of sample come from a Gaussian) if AD_pvalue -> 1
AD_judgement = 0
if AD_stat < 2*AD_critical_values[-1]:
AD_judgement = 1
if AD_stat < AD_critical_values[-1]:
AD_judgement = 2
return AD_stat, AD_critical_values[-1], AD_judgement
def normal_Shapiro_Wilk(sample):
"""Compute a Shapiro-Wilk test on the sample versus a normal distribution with mu = 0, sigma = 1
:param array_like sample: the sample you want to check the "Gaussianity"
:returns: the Shapiro-Wilk statistic and its related p_value
:rtype: float, float
"""
SW_stat, SW_pvalue = ss.shapiro(sample)
# the null hypothesis can not be rejected ( i.e the distribution of sample come from a Gaussian) if SW_stat -> 1
# the null hypothesis can not be rejected ( i.e the distribution of sample come from a Gaussian) if SW_pvalue -> 1
# Judegement made on the STATISTIC because 'W test statistic is accurate but the p-value may not be" (see scipy doc)
SW_judgement = 0
if SW_pvalue > 0.01:
SW_judgement = 1
if SW_pvalue > 0.05:
SW_judgement = 2
return SW_stat, SW_pvalue, SW_judgement
### Statistics fit quality metrics
def normalized_chi2(chi2, n_data, n_parameters) :
"""Compute the chi^2/dof
:param float chi2: the chi^2
:param int n_data: the number of data_points
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof and the chi2dof_judgement
:rtype: float
"""
chi2_sur_dof = chi2/(n_data-n_parameters)
chi2dof_judgement = 0
if chi2_sur_dof < 2 :
chi2dof_judgement = 2
return chi2_sur_dof,chi2dof_judgement
def Bayesian_Information_Criterion(chi2, n_data, n_parameters):
"""Compute the BIC statistic.
:param float chi2: the chi^2
:param int n_data: the number of data_points
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof
:rtype: float
"""
BIC = chi2 + n_parameters*np.log(n_data)
return BIC
def Akaike_Information_Criterion(chi2, n_parameters):
"""Compute the BIC statistic.
:param float chi2: the chi^2
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof
:rtype: float
"""
AIC = chi2 + 2*n_parameters
return AIC
| ebachelet/pyLIMA | pyLIMA/microlstats.py | Python | gpl-3.0 | 4,184 |
# Generated by Django 2.2.13 on 2020-08-20 14:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0029_sites_blank'),
]
operations = [
migrations.AddField(
model_name='task',
name='available',
field=models.BooleanField(default=True, help_text='Designates whether this task is generally available for projects.', verbose_name='Available'),
),
]
| rdmorganiser/rdmo | rdmo/tasks/migrations/0030_available.py | Python | apache-2.0 | 484 |
from __future__ import print_function
import os
import re
def get_version():
node_version_h = os.path.join(
os.path.dirname(__file__),
'..',
'src',
'node_version.h')
f = open(node_version_h)
regex = '^#define NODE_MODULE_VERSION [0-9]+'
for line in f:
if re.match(regex, line):
major = line.split()[2]
return major
raise Exception('Could not find pattern matching %s' % regex)
if __name__ == '__main__':
print(get_version())
| MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/getmoduleversion.py | Python | mit | 477 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-07 14:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('salt_observer', '0005_auto_20160615_1439'),
]
operations = [
migrations.RemoveField(
model_name='minion',
name='grains',
),
migrations.AddField(
model_name='minion',
name='data',
field=models.TextField(default='{}'),
),
]
| hs-hannover/salt-observer | salt_observer/migrations/0006_auto_20160707_1629.py | Python | mit | 554 |
"""Modulo que contiene la clase directorio de funciones
-----------------------------------------------------------------
Compilers Design Project
Tec de Monterrey
Julio Cesar Aguilar Villanueva A01152537
Jose Fernando Davila Orta A00999281
-----------------------------------------------------------------
DOCUMENTATION: For complete Documentation see UserManual.pdf"""
from stack import Stack
from function import Function
from variable import Variable
def get_var_type(var_type):
'''retorna el identificador de cada tipo de variable'''
if var_type == 'int':
return 'i'
elif var_type == 'double':
return 'd'
elif var_type == 'string':
return 's'
elif var_type == 'bool':
return 'b'
def get_var_scope(scope):
'''retorna el identificador de cada tipo de scope'''
if scope == 'global':
return 'g'
elif scope == 'main':
return 'l'
else:
return 't'
def get_var_name(var_type, scope, var_name):
'''construct the direccion of a variable based on
the type, scope and variable name.'''
name_type = get_var_type(var_type)
name_scope = get_var_scope(scope)
name = name_type + name_scope + var_name
return name
class FunctionsDir(object):
'''Las funciones son entradas en el diccionario functions.
Las funciones son objetos con diccionarios de variables.
Scope global del programa se inicia con una funcion global
sin variables.
Scope es el function_id de cada funcion.'''
def __init__(self):
'''Metodo de inicializacion'''
self.functions = {}
self.functions['global'] = Function()
self.scope = 'global'
# Define si se esta evaluando la existencia de variables o se estan agregando al directorio
self.evaluating = True
# Indica si es necesario acutlaizar la lista de prametros de una funcion
self.updating_params = False
# Indica si se va a leer variable con funcion read
self.reading = False
# Ultimo token ID, usado para el read
self.last_id = Stack()
# Ultimo token de tipo que fue leido por el directorio de funciones
self.last_type = None
'''Funciones que estan siendo llamadas.
Se utiliza una pila para llamadas nesteadas a funciones'''
self.call_function = Stack()
'''Cantidad de argumentos que estan siendo utilizados al llamar a una funcion.
Se utiliza una pilla para llamadas nesteadas'''
self.call_arguments = Stack()
self.last_read = Stack()
def add_function(self, function_id):
'''Add function to fuctions directory. Verify if function already exists'''
if self.functions.get(function_id, None) is not None:
raise NameError('Error: 1001 Function already declared! Function: ' + str(function_id))
else:
self.functions[function_id] = Function()
def validate_function(self, function_id):
'''Validate function exists'''
if self.functions.get(function_id, None) is None:
raise NameError('Error: 1002 Function not declared! Name: ' + str(function_id))
def increase_expected_arguments(self):
'''Manda llamar el metodo increase expected arguments de la clase Function'''
self.functions[self.scope].increase_expected_arguments()
def update_function_params(self, var_id, var_type):
'''Manda llamar metodo update params de la clase Funcion'''
self.functions[self.scope].update_params(var_id, var_type)
def set_return_type(self, function_return_type):
'''Manda llamar el metodo set return type de la clase Function'''
self.functions[self.scope].set_return_type(function_return_type)
def set_func_quad(self, func_quad):
'''Manda llamar el metodo set_func_quad de la clase Function'''
self.functions[self.scope].set_func_quad(func_quad)
def set_scope(self, scope):
'''Cambia el scope actual del directorio de funciones al scope que recibe'''
self.scope = scope
def reset_scope(self):
'''Reset del scope a global scope'''
self.scope = 'global'
# Add variable to current function scope
def add_var(self, variable_id, var_type, value=0, size=1):
'''Agrega variable a el diccionario de variables de una Funcion'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
var_name = get_var_name(var_type, self.scope, variable_id)
self.functions[self.scope].variables_dict[variable_id] = Variable(var_name, value, var_type, self.scope, size)
else:
variable_type = self.functions[self.scope].variables_dict[variable_id].get_type()
msg = 'Error 2001: Variable already declared! ' + str(variable_id) + '. TYPE: ' + variable_type
raise NameError(msg)
def add_for_var(self, variable_id, var_type):
'''Agrega variable al diccionario del current scope, si ya existe sobreescribe valor
Marca error si existe y no es tipo int'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
var_name = get_var_name(var_type, self.scope, variable_id)
self.functions[self.scope].variables_dict[variable_id] = Variable(var_name, -1, var_type, self.scope, 1)
else:
variable_type = self.functions[self.scope].variables_dict[variable_id].get_type()
if variable_type != 'int':
msg = 'Error 2001: Variable already declared! ' + str(variable_id) + '. TYPE: ' + variable_type
raise NameError(msg)
else:
self.functions[self.scope].variables_dict[variable_id].value = -1
def validate_variable(self, variable_id):
'''Busca variable en el scope actual'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
# Busca variable en el scope global
if self.functions['global'].variables_dict.get(variable_id, None) is None:
raise NameError('Error 2002: Variable not declared! VAR: ' + variable_id)
def start_evaluating(self):
'''Indica que el directorio de funciones esta evaluando la existencia de variables'''
self.evaluating = True
def finish_evaluating(self):
'''Indica que el directorio de funciones deja de evaluar funciones'''
self.evaluating = False
def set_type(self, last_type):
'''Set del ultimo token de tipo que fue leido'''
self.last_type = last_type
def get_func_dir(self):
'''Obtiene el diccionario de funciones'''
return self.functions
def get_var(self, variable_id):
'''Obtiene la lista con los datos de la variable del
diccionario de funciones en el scope actual o el global'''
if variable_id in self.functions[self.scope].variables_dict:
return self.functions[self.scope].variables_dict.get(variable_id)
elif variable_id in self.functions['global'].variables_dict:
return self.functions['global'].variables_dict.get(variable_id)
return None
def set_call_function(self, function_id):
'''Set del id de la funcion que esta siendo llamada
una vez que se valido su existencia en el diccionario de funciones'''
self.call_function.push(function_id)
self.call_arguments.push(0)
def increase_call_arguments(self):
'''# Incrementa la cantidad de argumentos que estan siendo usados para llamar una funcion.
Obtiene el tope de la pila, aumenta y vuelve a insertar en la pila'''
curr = self.call_arguments.pop()
curr += 1
self.call_arguments.push(curr)
def update_var_size(self, size):
'''Actualiza el size de una variable en caso de ser dimensionada'''
if size <= 0:
raise ValueError('Error 7005: Array size must be a positive integer')
else:
self.functions[self.scope].variables_dict[self.last_id.top].size = size
self.functions[self.scope].variables_dict[self.last_id.top].is_dim = True
def validate_call_arguments(self):
'''Funcion que valida que la cantidad de argumentos utilizados en una llamada a funcion
sea igual a los parametros que espera recibir'''
if self.functions[self.call_function.top].expected_arguments != self.call_arguments.top:
if self.functions[self.call_function.top].expected_arguments > self.call_arguments.top:
msg = 'Error 3001: Missing arguments in function call for function: ' + str(self.call_function)
elif self.functions[self.call_function.top].expected_arguments < self.call_arguments.top:
msg = 'Error 3002: Too many arguments in function call for function: ' + str(self.call_function)
msg += '. Expected arguments: ' + str(self.functions[self.call_function.top].expected_arguments) + '. Got: ' + str(self.call_arguments.top)
self.call_arguments.pop()
self.call_function.pop()
raise ValueError(msg)
else:
self.call_arguments.pop()
return self.call_function.pop()
def validate_arg_type(self, var_type):
'''Funcion que valida que el tipo de argumento que se manda sea del tipo esperado'''
expected_type = self.functions[self.call_function.top].params[self.call_arguments.top - 1][1]
if var_type != expected_type:
msg = 'Error 3003: Expected type in function call ' + str(self.scope) + ': ' + expected_type
msg += '. Got: ' + var_type
raise ValueError(msg)
return self.functions[self.call_function.top].params[self.call_arguments.top - 1]
def verify_var_dim(self):
'''Verifica que el id de una variable sea dimensionada'''
var = self.get_var(self.last_id.top)
if not var.is_dim:
raise ValueError('Error 7003: Variable is not array')
@property
def current_scope(self):
'''Propiedad del directorio de funciones para obtener el scope actual'''
return self.scope
def printeame(self):
'''Funcion auxiliar para imprimir el contenido del directorio de funciones'''
print('************ Functions Directory ************\n')
for key, val in self.functions.iteritems():
print(str(val.return_type) + ' ' + str(key) + '('),
for var in val.params:
print(str(var[1]) + ' ' + str(var[0]) + ', '),
print('): quad_num ' + str(val.get_function_quad()))
for k, vals in val.variables_dict.iteritems():
print('\t' + vals.get_type() + ' ' + k + ' = ' + str(vals.get_value()) + ' size: ' + str(vals.get_size()))
print('')
print('*********************************************')
| davilajose23/ProjectCobra | functions_dir.py | Python | mit | 10,907 |
# Copyright (c) 2016 NEC Technologies Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import network
L2_ADJACENCY = 'l2_adjacency'
ALIAS = L2_ADJACENCY
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'L2 Adjacency'
API_PREFIX = ''
DESCRIPTION = 'Display L2 Adjacency for Neutron Networks.'
UPDATED_TIMESTAMP = '2016-04-12T16:00:00-00:00'
RESOURCE_NAME = network.RESOURCE_NAME
COLLECTION_NAME = network.COLLECTION_NAME
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
L2_ADJACENCY: {
'allow_post': False,
'allow_put': False,
'is_visible': True
}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| openstack/neutron-lib | neutron_lib/api/definitions/l2_adjacency.py | Python | apache-2.0 | 1,334 |
from __future__ import unicode_literals
from django.apps import AppConfig
class D4S2ApiV1Config(AppConfig):
name = 'd4s2_api_v1'
| Duke-GCB/DukeDSHandoverService | d4s2_api_v1/apps.py | Python | mit | 136 |
"""
En este modulo se encuentran definida el decorador encargado de comprobar en
las vistas de la aplicacion que el usuario que intenta acceder a la vista, esta
logueado y tiene los permisos de superusuario.
"""
__author__ = 'llerena'
| jmllerena/django-easydata | easydata/decorators/__init__.py | Python | gpl-3.0 | 236 |
#!/usr/bin/env python3
def parse_file(filename):
amino_acids=[]
for line in open(filename):
name, pk1, pk2, pk3 = line.strip().split()
if pk3=="na":
amino_acids.append((name, float(pk1),float(pk2)))
else:
amino_acids.append((name, float(pk1),float(pk2), float(pk3)))
return amino_acids
def determine_closeness(parameters, amino_acid):
closeness_parameter=0
for index,i in enumerate(parameters):
closeness_parameter+=(i-amino_acid[index+1])**2
return closeness_parameter/len(parameters)
def closest_amino_acids(parameters, amino_acids, count=3):
valid_amino_acids=[i for i in amino_acids if (len(i)-1)==len(parameters)]
valid_amino_acids.sort(key=lambda x:determine_closeness(parameters, x))
return [(i, determine_closeness(parameters, i)) for i in valid_amino_acids[:count]]
| Bolt64/my_code | Code Snippets/amino_acid.py | Python | mit | 870 |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import GithubException
import Consts
class _NotSetType:
def __repr__(self):
return "NotSet"
value = None
NotSet = _NotSetType()
class _ValuedAttribute:
def __init__(self, value):
self.value = value
class _BadAttribute:
def __init__(self, value, expectedType, exception=None):
self.__value = value
self.__expectedType = expectedType
self.__exception = exception
@property
def value(self):
raise GithubException.BadAttributeException(self.__value, self.__expectedType, self.__exception)
class GithubObject(object):
"""
Base class for all classes representing objects returned by the API.
"""
'''
A global debug flag to enable header validation by requester for all objects
'''
CHECK_AFTER_INIT_FLAG = False
@classmethod
def setCheckAfterInitFlag(cls, flag):
cls.CHECK_AFTER_INIT_FLAG = flag
def __init__(self, requester, headers, attributes, completed):
self._requester = requester
self._initAttributes()
self._storeAndUseAttributes(headers, attributes)
# Ask requester to do some checking, for debug and test purpose
# Since it's most handy to access and kinda all-knowing
if self.CHECK_AFTER_INIT_FLAG: # pragma no branch (Flag always set in tests)
requester.check_me(self)
def _storeAndUseAttributes(self, headers, attributes):
# Make sure headers are assigned before calling _useAttributes
# (Some derived classes will use headers in _useAttributes)
self._headers = headers
self._rawData = attributes
self._useAttributes(attributes)
@property
def raw_data(self):
"""
:type: dict
"""
self._completeIfNeeded()
return self._rawData
@property
def raw_headers(self):
"""
:type: dict
"""
self._completeIfNeeded()
return self._headers
@staticmethod
def _parentUrl(url):
return "/".join(url.split("/")[: -1])
@staticmethod
def __makeSimpleAttribute(value, type):
if value is None or isinstance(value, type):
return _ValuedAttribute(value)
else:
return _BadAttribute(value, type)
@staticmethod
def __makeSimpleListAttribute(value, type):
if isinstance(value, list) and all(isinstance(element, type) for element in value):
return _ValuedAttribute(value)
else:
return _BadAttribute(value, [type])
@staticmethod
def __makeTransformedAttribute(value, type, transform):
if value is None:
return _ValuedAttribute(None)
elif isinstance(value, type):
try:
return _ValuedAttribute(transform(value))
except Exception, e:
return _BadAttribute(value, type, e)
else:
return _BadAttribute(value, type)
@staticmethod
def _makeStringAttribute(value):
return GithubObject.__makeSimpleAttribute(value, (str, unicode))
@staticmethod
def _makeIntAttribute(value):
return GithubObject.__makeSimpleAttribute(value, (int, long))
@staticmethod
def _makeBoolAttribute(value):
return GithubObject.__makeSimpleAttribute(value, bool)
@staticmethod
def _makeDictAttribute(value):
return GithubObject.__makeSimpleAttribute(value, dict)
@staticmethod
def _makeTimestampAttribute(value):
return GithubObject.__makeTransformedAttribute(value, (int, long), datetime.datetime.utcfromtimestamp)
@staticmethod
def _makeDatetimeAttribute(value):
def parseDatetime(s):
if len(s) == 24: # pragma no branch (This branch was used only when creating a download)
# The Downloads API has been removed. I'm keeping this branch because I have no mean
# to check if it's really useless now.
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.000Z") # pragma no cover (This branch was used only when creating a download)
elif len(s) == 25:
return datetime.datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S") + (1 if s[19] == '-' else -1) * datetime.timedelta(hours=int(s[20:22]), minutes=int(s[23:25]))
else:
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
return GithubObject.__makeTransformedAttribute(value, (str, unicode), parseDatetime)
def _makeClassAttribute(self, klass, value):
return GithubObject.__makeTransformedAttribute(value, dict, lambda value: klass(self._requester, self._headers, value, completed=False))
@staticmethod
def _makeListOfStringsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, (str, unicode))
@staticmethod
def _makeListOfIntsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, int)
@staticmethod
def _makeListOfListOfStringsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, list)
def _makeListOfClassesAttribute(self, klass, value):
if isinstance(value, list) and all(isinstance(element, dict) for element in value):
return _ValuedAttribute([klass(self._requester, self._headers, element, completed=False) for element in value])
else:
return _BadAttribute(value, [dict])
def _makeDictOfStringsToClassesAttribute(self, klass, value):
if isinstance(value, dict) and all(isinstance(key, (str, unicode)) and isinstance(element, dict) for key, element in value.iteritems()):
return _ValuedAttribute(dict((key, klass(self._requester, self._headers, element, completed=False)) for key, element in value.iteritems()))
else:
return _BadAttribute(value, {(str, unicode): dict})
@property
def etag(self):
'''
:type: str
'''
return self._headers.get(Consts.RES_ETAG)
@property
def last_modified(self):
'''
:type: str
'''
return self._headers.get(Consts.RES_LAST_MODIFED)
class NonCompletableGithubObject(GithubObject):
def _completeIfNeeded(self):
pass
class CompletableGithubObject(GithubObject):
def __init__(self, requester, headers, attributes, completed):
GithubObject.__init__(self, requester, headers, attributes, completed)
self.__completed = completed
def __eq__(self, other):
return other.__class__ is self.__class__ and other._url.value == self._url.value
def __ne__(self, other):
return not self == other
def _completeIfNotSet(self, value):
if value is NotSet:
self._completeIfNeeded()
def _completeIfNeeded(self):
if not self.__completed:
self.__complete()
def __complete(self):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._url.value
)
self._storeAndUseAttributes(headers, data)
self.__completed = True
def update(self):
'''
Check and update the object with conditional request
:rtype: Boolean value indicating whether the object is changed
'''
conditionalRequestHeader = dict()
if self.etag is not None:
conditionalRequestHeader[Consts.REQ_IF_NONE_MATCH] = self.etag
if self.last_modified is not None:
conditionalRequestHeader[Consts.REQ_IF_MODIFIED_SINCE] = self.last_modified
status, responseHeaders, output = self._requester.requestJson(
"GET",
self._url.value,
headers=conditionalRequestHeader
)
if status == 304:
return False
else:
headers, data = self._requester._Requester__check(status, responseHeaders, output)
self._storeAndUseAttributes(headers, data)
self.__completed = True
return True
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/github/GithubObject.py | Python | apache-2.0 | 9,896 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Commented to avoid issues with FLAKE8
# on the order of libraries not installed in the execution server
# TODO:: require fix
# import os
# from mlflow.tracking import MlflowClient
# from ray.tune.logger import DEFAULT_LOGGERS, MLFLowLogger
# from nupic.research.archive.dynamic_sparse.common.utils import run_ray
# # alternative initialization based on configuration
# exp_config = dict(
# device="cuda",
# network="MLPHeb",
# dataset_name="MNIST",
# input_size=784,
# num_classes=10,
# model="SparseModel",
# data_dir="~/nta/data",
# on_perc=0.2,
# batch_size_train=10,
# batch_size_test=10,
# debug_sparse=True,
# )
# client = MlflowClient()
# exp_config["mlflow_experiment_id"] = client.create_experiment("test_mlflow5")
# # run
# tune_config = dict(
# name=__file__,
# num_samples=3,
# local_dir=os.path.expanduser("~/nta/results"),
# checkpoint_freq=0,
# checkpoint_at_end=False,
# stop={"training_iteration": 5},
# resources_per_trial={"cpu": 1, "gpu": 1},
# verbose=2,
# loggers=DEFAULT_LOGGERS + (MLFLowLogger,),
# )
# run_ray(tune_config, exp_config)
# # df = mlflow.search_runs([experiment_id])
| mrcslws/nupic.research | projects/archive/dynamic_sparse/runs/run_test_mlflow.py | Python | agpl-3.0 | 2,172 |
#
#
# This example shows the diffraction by a Si 111 crystal calculated in a variety of modes (see main):
#
# - make_plots( calculate_standard_interface() )
# using the standard interface via definition of a photon grid (DiffractionSetupSweeps) and
# the DiffractionResult object
#
# - calculate_with_complex_amplitude_photon(method=0 or 1)
# Calculates diffraction of many photons (0) or a photon bunch (1) using ComplexAmplitudePhoton,
# so a photon with electric field amplitude.
#
# - calculate_with_polarized_photon(method=0 or 1)
# Calculates Stokes parameters after diffraction of many photons (0) or a photon bunch (1) using
# PolarizedPhoton, so photons with info on the Stokes parameters.
#
#
import numpy
# for plots
from srxraylib.plot.gol import plot
from crystalpy.diffraction.GeometryType import BraggDiffraction
from crystalpy.diffraction.DiffractionSetup import DiffractionSetup
from crystalpy.diffraction.DiffractionSetupSweeps import DiffractionSetupSweeps
from crystalpy.diffraction.Diffraction import Diffraction
from crystalpy.polarization.MuellerDiffraction import MuellerDiffraction
from crystalpy.util.StokesVector import StokesVector
from crystalpy.util.Vector import Vector
from crystalpy.util.Photon import Photon
from crystalpy.util.ComplexAmplitudePhoton import ComplexAmplitidePhoton
from crystalpy.util.PolarizedPhoton import PolarizedPhoton
from crystalpy.util.ComplexAmplitudePhotonBunch import ComplexAmplitudePhotonBunch
from crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch
def calculate_standard_interface():
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetupSweeps(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2
, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0, # radians
energy_min = 8000.0, # eV
energy_max = 8000.0, # eV
energy_points = 1, # int
angle_deviation_min = -100e-6, # radians
angle_deviation_max = 100e-6, # radians
angle_deviation_points = 500) # int
# Create a Diffraction object.
diffraction = Diffraction()
# Create a DiffractionResult object holding the results of the diffraction calculations.
print("\nCalculating the diffraction results...")
diffraction_result = diffraction.calculateDiffraction(diffraction_setup)
#
# Now the Mueller/Stokes calculation from the diffraction results
#
mueller_diffraction = MuellerDiffraction(diffraction_result,
StokesVector([1,0,1,0]),
inclination_angle=0.0) #np.pi*45/180)
# Create a MullerResult object.
print("\nCalculating the Stokes vector...")
mueller_result = mueller_diffraction.calculate_stokes()
return mueller_result
def make_plots(mueller_result):
#
# plots
#
diffraction_result = mueller_result.diffraction_result
photon_energies = diffraction_result.energies()
deviation_angles = diffraction_result.angleDeviations()
print("Number of energy points: %d"%photon_energies.size)
print("Number of angular points: %d"%deviation_angles.size)
print("_intensity shape: ",diffraction_result._intensities.shape)
print("_phases shape: ",diffraction_result._phases.shape)
from srxraylib.plot.gol import plot, four_plots
plot( 1e6*deviation_angles,diffraction_result._intensities[0,:,0],
1e6*deviation_angles,diffraction_result._intensities[0,:,1],
1e6*deviation_angles,diffraction_result._intensities[0,:,2],
title="Intensity for photon energy = %4.3f "%photon_energies[0],
xtitle="Deviation angle urad",ytitle="Reflectivity",
legend=['s-pol','p-pol','p/s ratio',],show=False)
plot( 1e6*deviation_angles,diffraction_result._phases[0,:,0],
1e6*deviation_angles,diffraction_result._phases[0,:,1],
1e6*deviation_angles,diffraction_result._phases[0,:,2],
title="Phase for photon energy = %4.3f "%photon_energies[0],
xtitle="Deviation angle urad",ytitle="Reflectivity",
legend=['s-pol','p-pol','p minus s pol'],show=False)
# Stokes
four_plots(1e6*deviation_angles,mueller_result._s0[0],
1e6*deviation_angles,mueller_result._s1[0],
1e6*deviation_angles,mueller_result._s2[0],
1e6*deviation_angles,mueller_result._s3[0],
title=["S0","S1","S2","S3"],xtitle="Deviation angle [urad]",
yrange=[-1,1],show=False)
# Plot the degree of circular polarization.
plot(1e6*deviation_angles,mueller_result._s3[0]/mueller_result._s0[0],yrange=[-1,1],
title="Circular Polarization S3/S0",xtitle="Deviation angle [urad]",ytitle="S3/S0",show=True)
#
#
#
def calculate_with_complex_amplitude_photon(method=0):
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetup(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0) # radians # int
energy = 8000.0 # eV
angle_deviation_min = -100e-6 # radians
angle_deviation_max = 100e-6 # radians
angle_deviation_points = 500
angle_step = (angle_deviation_max-angle_deviation_min)/angle_deviation_points
bragg_angle = diffraction_setup.angleBragg(energy)
print("Bragg angle for E=%f eV is %f deg"%(energy,bragg_angle*180.0/numpy.pi))
# Create a Diffraction object.
diffraction = Diffraction()
#
# get wavevector with incident direction matching Bragg angle
#
K0 = diffraction_setup.getK0(energy)
K0unitary = K0.getNormalizedVector()
print("K0",K0.components())
# method = 0 # diffraction for individual photons
# method = 1 # diffraction for bunch
ZZ = numpy.zeros(angle_deviation_points)
if method == 0:
# deviations = numpy.zeros(angle_deviation_points)
intensityS = numpy.zeros(angle_deviation_points)
intensityP = numpy.zeros(angle_deviation_points)
bunch_out = ComplexAmplitudePhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vin)
photon_out = diffraction.calculateDiffractedComplexAmplitudePhoton(diffraction_setup,photon)
bunch_out.addPhoton(photon_out)
ZZ[ia] = deviation
elif method == 1: # diffraction for bunch
bunch_in = ComplexAmplitudePhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vin)
bunch_in.addPhoton( photon )
ZZ[ia] = angle_deviation_min + ia * angle_step
bunch_out = diffraction.calculateDiffractedComplexAmplitudePhotonBunch(diffraction_setup,bunch_in)
bunch_out_dict = bunch_out.toDictionary()
print(bunch_out_dict.keys())
plot(1e6*ZZ,bunch_out_dict["intensityS"],1e6*ZZ,bunch_out_dict["intensityP"],
xtitle="theta - thetaB [urad]",title="Reflectivity calculation using ComplexAmplitudePhoton method:%d"%method,
legend=["Sigma","Pi"])
#
#
#
def calculate_with_polarized_photon(method=0):
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetup(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2
, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0) # radians # int
energy = 8000.0 # eV
angle_deviation_min = -100e-6 # radians
angle_deviation_max = 100e-6 # radians
angle_deviation_points = 500
angle_step = (angle_deviation_max-angle_deviation_min)/angle_deviation_points
bunch_in = PolarizedPhotonBunch()
bragg_angle = diffraction_setup.angleBragg(energy)
print("Bragg angle for E=%f eV is %f deg"%(energy,bragg_angle*180.0/numpy.pi))
# Create a Diffraction object.
diffraction = Diffraction()
#
# get wavevector with incident direction matching Bragg angle
#
K0 = diffraction_setup.getK0(energy)
K0unitary = K0.getNormalizedVector()
print("K0",K0.components())
# method = 0 # diffraction for individual photons
# method = 1 # diffraction for bunch
ZZ = numpy.zeros(angle_deviation_points)
if method == 0:
bunch_out = PolarizedPhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz),
# stokes_vector=StokesVector([1,0,1,0]))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vin,
stokes_vector=StokesVector([1,0,1,0]))
photon_out = diffraction.calculateDiffractedPolarizedPhoton(diffraction_setup,
incoming_polarized_photon=photon,
inclination_angle=0.0)
bunch_out.addPhoton( photon_out )
ZZ[ia] = angle_deviation_min + ia * angle_step
elif method == 1: # diffraction for bunch
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz),
# stokes_vector=StokesVector([1,0,1,0]))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vin,
stokes_vector=StokesVector([1,0,1,0]))
bunch_in.addPhoton( photon )
ZZ[ia] = angle_deviation_min + ia * angle_step
bunch_out = diffraction.calculateDiffractedPolarizedPhotonBunch(diffraction_setup,bunch_in,0.0)
bunch_out_dict = bunch_out.toDictionary()
plot(1e6*ZZ,bunch_out_dict["s0"],1e6*ZZ,bunch_out_dict["s1"],legend=["S0","S1"],
xtitle="theta - thetaB [urad]",title="Polarized reflectivity calculation using method %d"%method)
#
# main
#
if __name__ == "__main__":
make_plots( calculate_standard_interface() )
calculate_with_complex_amplitude_photon(method=0)
calculate_with_complex_amplitude_photon(method=1)
calculate_with_polarized_photon(method=0)
calculate_with_polarized_photon(method=1) | edocappelli/crystalpy | crystalpy/examples/Si111.py | Python | mit | 15,040 |
# -*- coding:utf_8 -*-
from datetime import date
import sys
import Bilibili, Youku, Iqiyi, PPTV, AcFun
import Tools
class CLIApp:
def __init__(self, keywords):
self.errorCount = 0
self.bilibili = Bilibili.BiliBili()
self.acfun = AcFun.AcFun()
self.pptv = PPTV.PPTV()
self.iqiyi = Iqiyi.Iqiyi()
self.youku = Youku.Youku()
self.en_keywords = keywords
# For Chinese printing orz
self.charset = sys.getfilesystemencoding()
def printNotice(self):
"""Print notice info"""
print u"""
注意 Notice:
1. Bilibili 的番组在今天之前的两天显示的是正常的已经更新的内容,而今天之后标记的更新信息均为将要更新的内容,而不是已经更新的内容,今天则标记的是今天将要更新的内容,也可能是已经更新的,如:今天是周三,则周一、周二显示的是更新之后的内容,周四、周五、周六、周日显示的是即将更新的内容;
2. 不显示更新信息的番剧可能是大长篇,也有可能是平台自制的节目;
3. 今天是 {0}
""".format(date.today().strftime('%A'))
def cmdPrint(self, bclass):
"""Print out the Bangumi class"""
if bclass.errorFlag:
print (bclass.name + u' 出错啦 QAQ').encode(self.charset)
self.errorCount += 1
else:
print '--------------------------------------------------------------------------------'
print bclass.name.encode(self.charset).center(80)
print '--------------------------------------------------------------------------------'
for i in range(7):
print '===== {0} ====='.format(Tools.dow2string(i).encode(self.charset)).center(80)
for j in bclass.bangumi[i]:
print j[0].encode(self.charset).rjust(38), ' -- ', j[1].encode(self.charset)
def cmdSearch(self, bclass):
'''Print the result to the console.'''
keywords = []
for keyword in self.en_keywords:
keywords.append(keyword.decode(self.charset))
result = bclass.search(keywords)
if len(result) != 0:
print '===== {0} ====='.format(bclass.name.encode(self.charset)).center(80)
for b in result:
print (b[1] + ' - ' + b[2] + ' - ' + Tools.dow2string(b[0])).encode(self.charset)
def run(self):
'''Main process'''
# Title lol
print u'*************************************'.encode(self.charset).center(80)
print u'* Whoami 的中国视频网站动画番剧列表 *'.encode(self.charset).center(80)
print u'* 命令行版 *'.encode(self.charset).center(80)
print u'*************************************'.encode(self.charset).center(80)
if len(self.en_keywords) == 0:
# Print all
self.cmdPrint(self.acfun)
self.cmdPrint(self.bilibili)
self.cmdPrint(self.iqiyi)
self.cmdPrint(self.pptv)
self.cmdPrint(self.youku)
else:
# Print search result
self.cmdSearch(self.acfun)
self.cmdSearch(self.bilibili)
self.cmdSearch(self.iqiyi)
self.cmdSearch(self.pptv)
self.cmdSearch(self.youku)
# End statistics
print u'\n出错的网站数量:{}'.format(self.errorCount).encode(self.charset)
self.printNotice()
if __name__ == '__main__':
app = CLIApp(sys.argv[1:])
app.run()
| MrWhoami/WhoamiBangumi | cli.py | Python | mit | 3,577 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-access-dev-stat',
version='0.1',
packages=['access_dev_stat'],
include_package_data=True,
license='BSD License', # example license
description='calculate the number of devices which request each api',
long_description=README,
url='http://www.example.com/',
author='jingping yi',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| yijingping/django-access-dev-stat | setup.py | Python | bsd-3-clause | 1,182 |
from skybeard.utils import setup_beard
setup_beard("dicebeard")
| nasfarley88/dicebeard | setup_beard.py | Python | unlicense | 65 |
import blaze
import numpy as np
import unittest
class TestDatashapeCreation(unittest.TestCase):
def test_raise_on_bad_input(self):
# Make sure it raises exceptions on a few nonsense inputs
self.assertRaises(TypeError, blaze.dshape, None)
self.assertRaises(TypeError, blaze.dshape, lambda x: x+1)
def test_atom_shapes(self):
self.assertEqual(blaze.dshape('bool'), blaze.bool_)
self.assertEqual(blaze.dshape('int8'), blaze.i1)
self.assertEqual(blaze.dshape('int16'), blaze.i2)
self.assertEqual(blaze.dshape('int32'), blaze.i4)
self.assertEqual(blaze.dshape('int64'), blaze.i8)
self.assertEqual(blaze.dshape('uint8'), blaze.u1)
self.assertEqual(blaze.dshape('uint16'), blaze.u2)
self.assertEqual(blaze.dshape('uint32'), blaze.u4)
self.assertEqual(blaze.dshape('uint64'), blaze.u8)
self.assertEqual(blaze.dshape('float32'), blaze.f4)
self.assertEqual(blaze.dshape('float64'), blaze.f8)
self.assertEqual(blaze.dshape('complex64'), blaze.c8)
self.assertEqual(blaze.dshape('complex128'), blaze.c16)
def test_atom_shape_errors(self):
self.assertRaises(TypeError, blaze.dshape, 'boot')
self.assertRaises(TypeError, blaze.dshape, 'int33')
self.assertRaises(TypeError, blaze.dshape, '12')
def test_type_decl(self):
self.assertRaises(TypeError, blaze.dshape, 'type X T = 3, T')
self.assertEqual(blaze.dshape('3, int32'), blaze.dshape('type X = 3, int32'))
def test_string_atom(self):
self.assertEqual(blaze.dshape('string'), blaze.dshape("string('U8')"))
self.assertEqual(blaze.dshape("string('ascii')").encoding, 'A')
self.assertEqual(blaze.dshape("string('A')").encoding, 'A')
self.assertEqual(blaze.dshape("string('utf-8')").encoding, 'U8')
self.assertEqual(blaze.dshape("string('U8')").encoding, 'U8')
self.assertEqual(blaze.dshape("string('utf-16')").encoding, 'U16')
self.assertEqual(blaze.dshape("string('U16')").encoding, 'U16')
self.assertEqual(blaze.dshape("string('utf-32')").encoding, 'U32')
self.assertEqual(blaze.dshape("string('U32')").encoding, 'U32')
def test_struct_of_array(self):
self.assertEqual(str(blaze.dshape('5, int32')), '5, int32')
self.assertEqual(str(blaze.dshape('{field: 5, int32}')), '{ field : 5, int32 }')
self.assertEqual(str(blaze.dshape('{field: M, int32}')), '{ field : M, int32 }')
if __name__ == '__main__':
unittest.main()
| seibert/blaze-core | blaze/tests/test_datashape_creation.py | Python | bsd-2-clause | 2,553 |
from django.apps import AppConfig
class StudentConfig(AppConfig):
name = 'student'
| jianghc724/HappyXueTang | student/apps.py | Python | gpl-3.0 | 89 |
#!/usr/bin/env python
from django.conf import settings
from opencontext_py.libs.general import LastUpdatedOrderedDict
class Languages():
""" Useful methods for Open Context interacctions
with other APIs
"""
DEFAULT_LANGUAGE = 'en' # defaulting to English
DEFAULT_SCRIPT = 'la' # defulting to the Latin script
def __init__(self):
self.codes = {
'ar': {
'label': 'Arabic',
'localized': 'العَرَبِية',
'script_code': 'ar',
'default_key': 'ar'
},
'de': {
'label': 'German',
'localized': 'Deutsch',
'script_code': 'la',
'default_key': 'de'
},
'el': {
'label': 'Greek',
'localized': 'ελληνικά',
'script_code': 'el',
'default_key': 'el'
},
'en': {
'label': 'English',
'localized': 'English',
'script_code': 'la',
'default_key': 'en'
},
'es': {
'label': 'Spanish',
'localized': 'Español',
'script_code': 'la',
'default_key': 'es'
},
'fa': {
'label': 'Persian',
'localized': 'فارسی',
'script_code': 'ar',
'default_key': 'fa'
},
'fr': {
'label': 'French',
'localized': 'Français',
'script_code': 'la',
'default_key': 'fr'
},
'he': {
'label': 'Hebrew',
'localized': 'עברית',
'script_code': 'he',
'default_key': 'he'
},
'it': {
'label': 'Italian',
'localized': 'Italiano',
'script_code': 'la',
'default_key': 'it'
},
'tr': {
'label': 'Turkish',
'localized': 'Türkçe',
'script_code': 'la',
'default_key': 'tr'
},
'zh': {
'label': 'Chinese',
'localized': '中文',
'script_code': 'zh',
'default_key': 'zh'
}}
def get_language_default_key(self, language):
""" gets a key for language to
express in a JSON-LD
object
"""
default_key = None
if language in self.codes:
default_key = self.codes[language]['default_key']
return default_key
def get_language_script_key(self, language, script):
""" gets a key for language to
express in a JSON-LD
object
"""
key = None
if language in self.codes:
l_dict = self.codes[language]
key = l_dict['default_key']
if isinstance(script, str):
if script != l_dict['script_code']:
# we're requesting a script that
# is not in the normal default for
# the language, so needs specification
key = language + '-' + script
return key
def modify_localization_json(self, localized_json, key, translation):
""" updates localization json with new text, or removes
a language key of the text is blank
"""
translation = translation.strip()
if not isinstance(localized_json, dict):
localized_json = LastUpdatedOrderedDict()
if key != self.DEFAULT_LANGUAGE:
# we will only modify localizations if the key is not
# the same as the default language
if len(translation) > 1:
# we have non-blank translation text
localized_json[key] = translation
else:
if key in localized_json:
# we're deleting the translation, since
# the translation text is blank
localized_json.pop(key, None)
return localized_json
def make_json_ld_value_obj(self, default_content, localized_json):
""" makes an value object for json_ld, which is either
just a string or is a dict object (container) for
localized_json
"""
output = default_content
if isinstance(localized_json, dict):
# ok, we have dict
if self.DEFAULT_LANGUAGE in localized_json:
# we do not allow the default language in the
# localized array
localized_json.pop(self.DEFAULT_LANGUAGE, None)
if len(localized_json) > 0:
# we have a non-empty dict
output = LastUpdatedOrderedDict()
# now add the default content to this dict
# the first key will always be the default language
output[self.DEFAULT_LANGUAGE] = default_content
# add the other content
for key, value in localized_json.items():
output[key] = value
return output
def get_default_value_str(self, value_obj):
""" gets the default value string from a
value object found in JSON-LD
"""
output = value_obj
if isinstance(value_obj, dict):
# ok, we have dict
if self.DEFAULT_LANGUAGE in value_obj:
output = value_obj[self.DEFAULT_LANGUAGE]
return output
def get_other_values_dict(self, value_obj):
""" gets a dictionary object for all the
non-default localized / translated languges
as a key => value dict.
"""
output = None
if isinstance(value_obj, dict):
# ok, we have dict
output = LastUpdatedOrderedDict()
for lang_code, value in value_obj.items():
if lang_code != self.DEFAULT_LANGUAGE:
output[lang_code] = value
return output
def get_all_value_str(self, value_obj, delim=' \n '):
""" gets and concatenates all the localization values in
a value string or a value dict object found in JSON-LD
"""
output = value_obj
if isinstance(value_obj, dict):
# ok, we have dict
vals_list = []
for key, value in value_obj.items():
vals_list.append(value)
output = delim.join(vals_list)
return output
| ekansa/open-context-py | opencontext_py/libs/languages.py | Python | gpl-3.0 | 6,689 |
import re
import sys
import time
import numpy
import pprint
provideTimeByServer = dict()
inaugurationTimes = list()
inaugurationTimeByServer = dict()
errornousLines = list()
suspiciousInaugurationTimes = list()
lineCounter = 0
prevMonth = ""
hasYearPassed = False
def parseTime(timeStr):
global hasYearPassed, prevMonth
curMonth = timeStr[:3]
if not hasYearPassed and curMonth == "Jan" and prevMonth == "Dec":
hasYearPassed = True
prevMonth = curMonth
if hasYearPassed:
nrYearsPassed = 2017
else:
nrYearsPassed = 2016
timeTuple = time.strptime("%04d %s" % (nrYearsPassed, timeStr), "%Y %b %d %H:%M:%S")
return time.mktime(timeTuple)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "First argument should be Rackattack's journal filename for the analyzed period"
sys.exit(1)
filename = sys.argv[1]
with open(filename) as log:
for line in log:
lineCounter += 1
matches = re.findall("([a-zA-Z]{3} \d{2} \d{2}:\d{2}:\d{2}) .* Node (rack\d\d-server\d\d) being", line)
if len(matches) == 1 and len(matches[0]) == 2:
provideTime, server = matches[0]
provideTimeByServer[server] = parseTime(provideTime)
continue
matches = re.findall("([a-zA-Z]{3} \d{2} \d{2}:\d{2}:\d{2}) .* (rack\d\d-server\d\d)[ \t]done", line)
if len(matches) == 1 and len(matches[0]) == 2:
doneTime, server = matches[0]
if server not in provideTimeByServer:
continue
doneTime = parseTime(doneTime)
inaugurationTime = doneTime - provideTimeByServer[server]
del provideTimeByServer[server]
if inaugurationTime < 0:
print server
print lineCounter
if inaugurationTime > 60 * 60:
suspiciousInaugurationTimes.append(inaugurationTime)
continue
inaugurationTimes.append(inaugurationTime)
inaugurationTimeByServer.setdefault(server, list()).append(inaugurationTime)
continue
errornousLines.append(line)
print "#Lines: ", lineCounter
print "#Bad lines: ", len(errornousLines)
print "#Suspicious inauguration times (too long): ", len(suspiciousInaugurationTimes)
print "average inauguration time ", sum(inaugurationTimes) / len(inaugurationTimes)
#print "average inauguration time per server "
#for server, times in inaugurationTimeByServer.iteritems():
# print str(sum(times) / len(times)), server
print "std ", numpy.std(inaugurationTimes)
| Stratoscale/rackattack-physical | racktools/calc-average-inauguration-time.py | Python | apache-2.0 | 2,686 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.