text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
#!/usr/bin/env python # -*- coding: utf-8 -*- """ __init__ --------- Contains testing helpers. """ import os import shutil import stat import sys if sys.version_info[:2] < (2, 7): import unittest2 as unittest else: import unittest def force_delete(func, path, exc_info): """ Error handler for `shutil.rmtree()` equivalent to `rm -rf` Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/2656322 """ if not os.access(path, os.W_OK): # Is the error an access error? os.chmod(path, stat.S_IWUSR) func(path) else: raise class CookiecutterCleanSystemTestCase(unittest.TestCase): """ Test case that simulates a clean system with no config/cloned cookiecutters. During setUp: * Back up the `~/.cookiecutterrc` config file to `~/.cookiecutterrc.backup` * Back up the `~/.cookiecutters/` dir to `~/.cookiecutters.backup/` * Starts off a test case with no pre-existing `~/.cookiecutterrc` or `~/.cookiecutters/` During tearDown: * Delete `~/.cookiecutters/` only if a backup is present at `~/.cookiecutters.backup/` * Restore the `~/.cookiecutterrc` config file from `~/.cookiecutterrc.backup` * Restore the `~/.cookiecutters/` dir from `~/.cookiecutters.backup/` """ def setUp(self): # If ~/.cookiecutterrc is pre-existing, move it to a temp location self.user_config_path = os.path.expanduser('~/.cookiecutterrc') self.user_config_path_backup = os.path.expanduser( '~/.cookiecutterrc.backup' ) if os.path.exists(self.user_config_path): self.user_config_found = True shutil.copy(self.user_config_path, self.user_config_path_backup) os.remove(self.user_config_path) else: self.user_config_found = False # If the default cookiecutters_dir is pre-existing, move it to a # temp location self.cookiecutters_dir = os.path.expanduser('~/.cookiecutters') self.cookiecutters_dir_backup = os.path.expanduser('~/.cookiecutters.backup') if os.path.isdir(self.cookiecutters_dir): self.cookiecutters_dir_found = True # Remove existing backups before backing up. If they exist, they're stale. if os.path.isdir(self.cookiecutters_dir_backup): shutil.rmtree(self.cookiecutters_dir_backup) shutil.copytree(self.cookiecutters_dir, self.cookiecutters_dir_backup) else: self.cookiecutters_dir_found = False def tearDown(self): # If it existed, restore ~/.cookiecutterrc # We never write to ~/.cookiecutterrc, so this logic is simpler. if self.user_config_found and os.path.exists(self.user_config_path_backup): shutil.copy(self.user_config_path_backup, self.user_config_path) os.remove(self.user_config_path_backup) # Carefully delete the created ~/.cookiecutters dir only in certain # conditions. if self.cookiecutters_dir_found: # Delete the created ~/.cookiecutters dir as long as a backup exists if os.path.isdir(self.cookiecutters_dir) and os.path.isdir(self.cookiecutters_dir_backup): shutil.rmtree(self.cookiecutters_dir) else: # Delete the created ~/.cookiecutters dir. # There's no backup because it never existed if os.path.isdir(self.cookiecutters_dir): shutil.rmtree(self.cookiecutters_dir) # Restore the user's default cookiecutters_dir contents if os.path.isdir(self.cookiecutters_dir_backup): shutil.copytree(self.cookiecutters_dir_backup, self.cookiecutters_dir) if os.path.isdir(self.cookiecutters_dir): shutil.rmtree(self.cookiecutters_dir_backup)
ericholscher/cookiecutter
tests/__init__.py
Python
bsd-3-clause
3,893
0.003596
#!/usr/bin/env python3 # _*_ coding: utf-8 _*_ u""" One way of implementing default dictionary. """ class DefaultDict(dict): def __missing__(self, key): u""" Return default value as key if no value specified dictionary key. """ return key if __name__ == "__main__": d = DefaultDict() print(d, type(d), d.keys()) d['flop'] = 127 print(d, type(d), d.keys()) d['flip'] = 130 print(d, type(d), d.keys()) print(d['no_value'])
sjh/python
default_dict.py
Python
apache-2.0
477
0.002096
################################################################################################### # # PySpice - A Spice Package for Python # Copyright (C) 2014 Fabrice Salvaire # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #################################################################################################### #################################################################################################### import logging #################################################################################################### from ..Tools.StringTools import join_list, join_dict from .NgSpice.Shared import NgSpiceShared from .Server import SpiceServer #################################################################################################### _module_logger = logging.getLogger(__name__) #################################################################################################### class CircuitSimulation: """Define and generate the spice instruction to perform a circuit simulation. .. warning:: In some cases NgSpice can perform several analyses one after the other. This case is partially supported. """ _logger = _module_logger.getChild('CircuitSimulation') ############################################## def __init__(self, circuit, temperature=27, nominal_temperature=27, pipe=True, ): self._circuit = circuit self._options = {} # .options self._initial_condition = {} # .ic self._saved_nodes = () self._analysis_parameters = {} self.temperature = temperature self.nominal_temperature = nominal_temperature if pipe: self.options('NOINIT') self.options(filetype='binary') ############################################## @property def circuit(self): return self._circuit ############################################## def options(self, *args, **kwargs): for item in args: self._options[str(item)] = None for key, value in kwargs.items(): self._options[str(key)] = str(value) ############################################## @property def temperature(self): return self._options['TEMP'] @temperature.setter def temperature(self, value): self._options['TEMP'] = value ############################################## @property def nominal_temperature(self): return self._options['TNOM'] @nominal_temperature.setter def nominal_temperature(self, value): self._options['TNOM'] = value ############################################## def initial_condition(self, **kwargs): """ Set initial condition for voltage nodes. Usage: initial_condition(node_name1=value, ...) """ for key, value in kwargs.items(): self._initial_condition['V({})'.format(str(key))] = str(value) # Fixme: .nodeset ############################################## def save(self, *args): # Fixme: pass Node for voltage node, Element for source branch current, ... """Set the list of saved vectors. If no *.save* line is given, then the default set of vectors is saved (node voltages and voltage source branch currents). If *.save* lines are given, only those vectors specified are saved. Node voltages may be saved by giving the node_name or *v(node_name)*. Currents through an independent voltage source (including inductor) are given by *i(source_name)* or *source_name#branch*. Internal device data are accepted as *@dev[param]*. If you want to save internal data in addition to the default vector set, add the parameter *all* to the additional vectors to be saved. """ self._saved_nodes = list(args) ############################################## @property def save_currents(self): """ Save all currents. """ return self._options.get('SAVECURRENTS', False) @save_currents.setter def save_currents(self, value): if value: self._options['SAVECURRENTS'] = True else: del self._options['SAVECURRENTS'] ############################################## def reset_analysis(self): self._analysis_parameters.clear() ############################################## def operating_point(self): """Compute the operating point of the circuit with capacitors open and inductors shorted.""" self._analysis_parameters['op'] = '' ############################################## def dc_sensitivity(self, output_variable): """Compute the sensitivity of the DC operating point of a node voltage or voltage-source branch current to all non-zero device parameters. General form: .. code:: .sens outvar Examples: .. code:: .SENS V(1, OUT) .SENS I(VTEST) """ self._analysis_parameters['sens'] = (output_variable,) ############################################## def ac_sensitivity(self, output_variable, start_frequency, stop_frequency, number_of_points, variation): """Compute the sensitivity of the AC values of a node voltage or voltage-source branch current to all non-zero device parameters. General form: .. code:: .sens outvar ac dec nd fstart fstop .sens outvar ac oct no fstart fstop .sens outvar ac lin np fstart fstop Examples: .. code:: .SENS V(OUT) AC DEC 10 100 100 k """ if variation not in ('dec', 'oct', 'lin'): raise ValueError("Incorrect variation type") self._analysis_parameters['sens'] = (output_variable, variation, number_of_points, start_frequency, stop_frequency) ############################################## def dc(self, **kwargs): """Compute the DC transfer fonction of the circuit with capacitors open and inductors shorted. General form: .. code:: .dc srcnam vstart vstop vincr [ src2 start2 stop2 incr2 ] *srcnam* is the name of an independent voltage or current source, a resistor or the circuit temperature. *vstart*, *vstop*, and *vincr* are the starting, final, and incrementing values respectively. A second source (*src2*) may optionally be specified with associated sweep parameters. In this case, the first source is swept over its range for each value of the second source. Examples: .. code:: .dc VIN 0 .2 5 5.0 0.25 .dc VDS 0 10 .5 VGS 0 5 1 .dc VCE 0 10 .2 5 IB 0 10U 1U .dc RLoad 1k 2k 100 .dc TEMP -15 75 5 """ parameters = [] for variable, value_slice in kwargs.items(): variable_lower = variable.lower() if variable_lower[0] in ('v', 'i', 'r') or variable_lower == 'temp': parameters += [variable, value_slice.start, value_slice.stop, value_slice.step] else: raise NameError('Sweep variable must be a voltage/current source, ' 'a resistor or the circuit temperature') self._analysis_parameters['dc'] = parameters ############################################## def ac(self, start_frequency, stop_frequency, number_of_points, variation): # fixme: concise keyword ? """Perform a small-signal AC analysis of the circuit where all non-linear devices are linearized around their actual DC operating point. Note that in order for this analysis to be meaningful, at least one independent source must have been specified with an AC value. Typically it does not make much sense to specify more than one AC source. If you do, the result will be a superposition of all sources, thus difficult to interpret. Examples: .. code:: .ac dec nd fstart fstop .ac oct no fstart fstop .ac lin np fstart fstop The parameter *variation* must be either `dec`, `oct` or `lin`. """ if variation not in ('dec', 'oct', 'lin'): raise ValueError("Incorrect variation type") self._analysis_parameters['ac'] = (variation, number_of_points, start_frequency, stop_frequency) ############################################## def transient(self, step_time, end_time, start_time=None, max_time=None, use_initial_condition=False): """Perform a transient analysis of the circuit. General Form: .. code:: .tran tstep tstop <tstart <tmax>> <uic> """ if use_initial_condition: uic = 'uic' else: uic = None self._analysis_parameters['tran'] = (step_time, end_time, start_time, max_time, uic) ############################################## def __str__(self): netlist = str(self._circuit) if self.options: for key, value in self._options.items(): if value is not None: netlist += '.options {} = {}\n'.format(key, value) else: netlist += '.options {}\n'.format(key) if self.initial_condition: netlist += '.ic ' + join_dict(self._initial_condition) + '\n' if self._saved_nodes: netlist += '.save ' + join_list(self._saved_nodes) + '\n' for analysis, analysis_parameters in self._analysis_parameters.items(): netlist += '.' + analysis + ' ' + join_list(analysis_parameters) + '\n' netlist += '.end\n' return netlist #################################################################################################### class CircuitSimulator(CircuitSimulation): """ This class implements a circuit simulator. Each analysis mode is performed by a method that return the measured probes. For *ac* and *transient* analyses, the user must specify a list of nodes using the *probes* key argument. """ _logger = _module_logger.getChild('CircuitSimulator') ############################################## def _run(self, analysis_method, *args, **kwargs): self.reset_analysis() if 'probes' in kwargs: self.save(* kwargs.pop('probes')) method = getattr(CircuitSimulation, analysis_method) method(self, *args, **kwargs) self._logger.debug('desk\n' + str(self)) ############################################## def operating_point(self, *args, **kwargs): return self._run('operating_point', *args, **kwargs) ############################################## def dc(self, *args, **kwargs): return self._run('dc', *args, **kwargs) ############################################## def dc_sensitivity(self, *args, **kwargs): return self._run('dc_sensitivity', *args, **kwargs) ############################################## def ac(self, *args, **kwargs): return self._run('ac', *args, **kwargs) ############################################## def transient(self, *args, **kwargs): return self._run('transient', *args, **kwargs) #################################################################################################### class SubprocessCircuitSimulator(CircuitSimulator): _logger = _module_logger.getChild('SubprocessCircuitSimulator') ############################################## def __init__(self, circuit, temperature=27, nominal_temperature=27, spice_command='ngspice', ): # Fixme: kwargs super().__init__(circuit, temperature, nominal_temperature, pipe=True) self._spice_server = SpiceServer() ############################################## def _run(self, analysis_method, *args, **kwargs): super()._run(analysis_method, *args, **kwargs) raw_file = self._spice_server(str(self)) self.reset_analysis() # for field in raw_file.variables: # print field return raw_file.to_analysis(self._circuit) #################################################################################################### class NgSpiceSharedCircuitSimulator(CircuitSimulator): _logger = _module_logger.getChild('NgSpiceSharedCircuitSimulator') ############################################## def __init__(self, circuit, temperature=27, nominal_temperature=27, ngspice_shared=None, ): # Fixme: kwargs super().__init__(circuit, temperature, nominal_temperature, pipe=False) if ngspice_shared is None: self._ngspice_shared = NgSpiceShared(send_data=False) else: self._ngspice_shared = ngspice_shared ############################################## def _run(self, analysis_method, *args, **kwargs): super()._run(analysis_method, *args, **kwargs) self._ngspice_shared.load_circuit(str(self)) self._ngspice_shared.run() self._logger.debug(str(self._ngspice_shared.plot_names)) self.reset_analysis() if analysis_method == 'dc': plot_name = 'dc1' elif analysis_method == 'ac': plot_name = 'ac1' elif analysis_method == 'transient': plot_name = 'tran1' else: raise NotImplementedError return self._ngspice_shared.plot(plot_name).to_analysis() #################################################################################################### # # End # ####################################################################################################
thomaslima/PySpice
PySpice/Spice/Simulation.py
Python
gpl-3.0
14,801
0.004256
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolution blocks for mobilenet.""" import contextlib import functools import tensorflow as tf slim = tf.contrib.slim def _fixed_padding(inputs, kernel_size, rate=1): """Pads the input along the spatial dimensions independently of input size. Pads the input such that if it was used in a convolution with 'VALID' padding, the output would have the same dimensions as if the unpadded input was used in a convolution with 'SAME' padding. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. rate: An integer, rate for atrous convolution. Returns: output: A tensor of size [batch, height_out, width_out, channels] with the input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], [0, 0]]) return padded_inputs def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v def _split_divisible(num, num_ways, divisible_by=8): """Evenly splits num, num_ways so each piece is a multiple of divisible_by.""" assert num % divisible_by == 0 assert num / num_ways >= divisible_by # Note: want to round down, we adjust each split to match the total. base = num // num_ways // divisible_by * divisible_by result = [] accumulated = 0 for i in range(num_ways): r = base while accumulated + r < num * (i + 1) / num_ways: r += divisible_by result.append(r) accumulated += r assert accumulated == num return result @contextlib.contextmanager def _v1_compatible_scope_naming(scope): if scope is None: # Create uniqified separable blocks. with tf.variable_scope(None, default_name='separable') as s, \ tf.name_scope(s.original_name_scope): yield '' else: # We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts. # which provide numbered scopes. scope += '_' yield scope @slim.add_arg_scope def split_separable_conv2d(input_tensor, num_outputs, scope=None, normalizer_fn=None, stride=1, rate=1, endpoints=None, use_explicit_padding=False): """Separable mobilenet V1 style convolution. Depthwise convolution, with default non-linearity, followed by 1x1 depthwise convolution. This is similar to slim.separable_conv2d, but differs in tha it applies batch normalization and non-linearity to depthwise. This matches the basic building of Mobilenet Paper (https://arxiv.org/abs/1704.04861) Args: input_tensor: input num_outputs: number of outputs scope: optional name of the scope. Note if provided it will use scope_depthwise for deptwhise, and scope_pointwise for pointwise. normalizer_fn: which normalizer function to use for depthwise/pointwise stride: stride rate: output rate (also known as dilation rate) endpoints: optional, if provided, will export additional tensors to it. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. Returns: output tesnor """ with _v1_compatible_scope_naming(scope) as scope: dw_scope = scope + 'depthwise' endpoints = endpoints if endpoints is not None else {} kernel_size = [3, 3] padding = 'SAME' if use_explicit_padding: padding = 'VALID' input_tensor = _fixed_padding(input_tensor, kernel_size, rate) net = slim.separable_conv2d( input_tensor, None, kernel_size, depth_multiplier=1, stride=stride, rate=rate, normalizer_fn=normalizer_fn, padding=padding, scope=dw_scope) endpoints[dw_scope] = net pw_scope = scope + 'pointwise' net = slim.conv2d( net, num_outputs, [1, 1], stride=1, normalizer_fn=normalizer_fn, scope=pw_scope) endpoints[pw_scope] = net return net def expand_input_by_factor(n, divisible_by=8): return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by) @slim.add_arg_scope def expanded_conv(input_tensor, num_outputs, expansion_size=expand_input_by_factor(6), stride=1, rate=1, kernel_size=(3, 3), residual=True, normalizer_fn=None, project_activation_fn=tf.identity, split_projection=1, split_expansion=1, split_divisible_by=8, expansion_transform=None, depthwise_location='expansion', depthwise_channel_multiplier=1, endpoints=None, use_explicit_padding=False, padding='SAME', scope=None): """Depthwise Convolution Block with expansion. Builds a composite convolution that has the following structure expansion (1x1) -> depthwise (kernel_size) -> projection (1x1) Args: input_tensor: input num_outputs: number of outputs in the final layer. expansion_size: the size of expansion, could be a constant or a callable. If latter it will be provided 'num_inputs' as an input. For forward compatibility it should accept arbitrary keyword arguments. Default will expand the input by factor of 6. stride: depthwise stride rate: depthwise rate kernel_size: depthwise kernel residual: whether to include residual connection between input and output. normalizer_fn: batchnorm or otherwise project_activation_fn: activation function for the project layer split_projection: how many ways to split projection operator (that is conv expansion->bottleneck) split_expansion: how many ways to split expansion op (that is conv bottleneck->expansion) ops will keep depth divisible by this value. split_divisible_by: make sure every split group is divisible by this number. expansion_transform: Optional function that takes expansion as a single input and returns output. depthwise_location: where to put depthwise covnvolutions supported values None, 'input', 'output', 'expansion' depthwise_channel_multiplier: depthwise channel multiplier: each input will replicated (with different filters) that many times. So if input had c channels, output will have c x depthwise_channel_multpilier. endpoints: An optional dictionary into which intermediate endpoints are placed. The keys "expansion_output", "depthwise_output", "projection_output" and "expansion_transform" are always populated, even if the corresponding functions are not invoked. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. padding: Padding type to use if `use_explicit_padding` is not set. scope: optional scope. Returns: Tensor of depth num_outputs Raises: TypeError: on inval """ with tf.variable_scope(scope, default_name='expanded_conv') as s, \ tf.name_scope(s.original_name_scope): prev_depth = input_tensor.get_shape().as_list()[3] if depthwise_location not in [None, 'input', 'output', 'expansion']: raise TypeError('%r is unknown value for depthwise_location' % depthwise_location) if use_explicit_padding: if padding != 'SAME': raise TypeError('`use_explicit_padding` should only be used with ' '"SAME" padding.') padding = 'VALID' depthwise_func = functools.partial( slim.separable_conv2d, num_outputs=None, kernel_size=kernel_size, depth_multiplier=depthwise_channel_multiplier, stride=stride, rate=rate, normalizer_fn=normalizer_fn, padding=padding, scope='depthwise') # b1 -> b2 * r -> b2 # i -> (o * r) (bottleneck) -> o input_tensor = tf.identity(input_tensor, 'input') net = input_tensor if depthwise_location == 'input': if use_explicit_padding: net = _fixed_padding(net, kernel_size, rate) net = depthwise_func(net, activation_fn=None) if callable(expansion_size): inner_size = expansion_size(num_inputs=prev_depth) else: inner_size = expansion_size if inner_size > net.shape[3]: net = split_conv( net, inner_size, num_ways=split_expansion, scope='expand', divisible_by=split_divisible_by, stride=1, normalizer_fn=normalizer_fn) net = tf.identity(net, 'expansion_output') if endpoints is not None: endpoints['expansion_output'] = net if depthwise_location == 'expansion': if use_explicit_padding: net = _fixed_padding(net, kernel_size, rate) net = depthwise_func(net) net = tf.identity(net, name='depthwise_output') if endpoints is not None: endpoints['depthwise_output'] = net if expansion_transform: net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor) # Note in contrast with expansion, we always have # projection to produce the desired output size. net = split_conv( net, num_outputs, num_ways=split_projection, stride=1, scope='project', divisible_by=split_divisible_by, normalizer_fn=normalizer_fn, activation_fn=project_activation_fn) if endpoints is not None: endpoints['projection_output'] = net if depthwise_location == 'output': if use_explicit_padding: net = _fixed_padding(net, kernel_size, rate) net = depthwise_func(net, activation_fn=None) if callable(residual): # custom residual net = residual(input_tensor=input_tensor, output_tensor=net) elif (residual and # stride check enforces that we don't add residuals when spatial # dimensions are None stride == 1 and # Depth matches net.get_shape().as_list()[3] == input_tensor.get_shape().as_list()[3]): net += input_tensor return tf.identity(net, name='output') def split_conv(input_tensor, num_outputs, num_ways, scope, divisible_by=8, **kwargs): """Creates a split convolution. Split convolution splits the input and output into 'num_blocks' blocks of approximately the same size each, and only connects $i$-th input to $i$ output. Args: input_tensor: input tensor num_outputs: number of output filters num_ways: num blocks to split by. scope: scope for all the operators. divisible_by: make sure that every part is divisiable by this. **kwargs: will be passed directly into conv2d operator Returns: tensor """ b = input_tensor.get_shape().as_list()[3] if num_ways == 1 or min(b // num_ways, num_outputs // num_ways) < divisible_by: # Don't do any splitting if we end up with less than 8 filters # on either side. return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs) outs = [] input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by) output_splits = _split_divisible( num_outputs, num_ways, divisible_by=divisible_by) inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope) base = scope for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)): scope = base + '_part_%d' % (i,) n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs) n = tf.identity(n, scope + '_output') outs.append(n) return tf.concat(outs, 3, name=scope + '_concat')
derekjchow/models
research/slim/nets/mobilenet/conv_blocks.py
Python
apache-2.0
13,351
0.005243
class Solution(object): def firstMissingPositive(self, nums): """ :type nums: List[int] :rtype: int """ l = len(nums) for i in range(0, l): cur = nums[i] while cur >= 1 and cur <= l and nums[cur - 1] != cur: tmp = nums[cur - 1] nums[cur - 1] = cur cur = tmp for i in range(0, l): if nums[i] != i + 1: return i + 1 return l + 1
hawkphantomnet/leetcode
FirstMissingPositive/Solution.py
Python
mit
495
0.00202
import copy import datetime import decimal import math import warnings from itertools import tee from django.db import connection from django.db.models.query_utils import QueryWrapper from django.conf import settings from django import forms from django.core import exceptions, validators from django.utils.datastructures import DictWrapper from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.functional import curry from django.utils.text import capfirst from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode, force_unicode, smart_str from django.utils.ipv6 import clean_ipv6_address class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] BLANK_CHOICE_NONE = [("", "None")] class FieldDoesNotExist(Exception): pass # A guide to Field parameters: # # * name: The name of the field specifed in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) class Field(object): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _(u'Value %r is not a valid choice.'), 'null': _(u'This field cannot be null.'), 'blank': _(u'This field cannot be blank.'), 'unique': _(u'%(model_name)s with this %(field_label)s ' u'already exists.'), } # Generic field type description, usually overriden by subclasses def _description(self): return _(u'Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=[], error_messages=None): self.name = name self.verbose_name = verbose_name self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls): self.null = True self.rel = rel self.default = default self.editable = editable self.serialize = serialize self.unique_for_date, self.unique_for_month = (unique_for_date, unique_for_month) self.unique_for_year = unique_for_year self._choices = choices or [] self.help_text = help_text self.db_column = db_column self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE self.auto_created = auto_created # Set db_index to True if the field has a relationship and doesn't # explicitly set db_index. self.db_index = db_index # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self.validators = self.default_validators + validators messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self.error_messages = messages def __cmp__(self, other): # This is needed because bisect does not take a comparison function. return cmp(self.creation_counter, other.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.rel: obj.rel = copy.copy(self.rel) memodict[id(self)] = obj return obj def to_python(self, value): """ Converts the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Returns the converted value. Subclasses should override this. """ return value def run_validators(self, value): if value in validators.EMPTY_VALUES: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError, e: if hasattr(e, 'code') and e.code in self.error_messages: message = self.error_messages[e.code] if e.params: message = message % e.params errors.append(message) else: errors.extend(e.messages) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validates value and throws ValidationError. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self._choices and value: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return msg = self.error_messages['invalid_choice'] % value raise exceptions.ValidationError(msg) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null']) if not self.blank and value in validators.EMPTY_VALUES: raise exceptions.ValidationError(self.error_messages['blank']) def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python and validate are propagated. The correct value is returned if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type(self, connection): """ Returns the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific DATA_TYPES dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") try: return (connection.creation.data_types[self.get_internal_type()] % data) except KeyError: return None @property def unique(self): return self._unique or self.primary_key def set_attributes_from_name(self, name): if not self.name: self.name = name self.attname, self.column = self.get_attname_column() if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name): self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self) if self.choices: setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self)) def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_cache_name(self): return '_%s_cache' % self.name def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """ Returns field's value just before saving. """ return getattr(model_instance, self.attname) def get_prep_value(self, value): """ Perform preliminary non-db specific value checks and conversions. """ return value def get_db_prep_value(self, value, connection, prepared=False): """Returns field's value prepared for interacting with the database backend. Used by the default implementations of ``get_db_prep_save``and `get_db_prep_lookup``` """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """ Returns field's value prepared for saving into a database. """ return self.get_db_prep_value(value, connection=connection, prepared=False) def get_prep_lookup(self, lookup_type, value): """ Perform preliminary non-db specific lookup checks and conversions """ if hasattr(value, 'prepare'): return value.prepare() if hasattr(value, '_prepare'): return value._prepare() if lookup_type in ( 'regex', 'iregex', 'month', 'day', 'week_day', 'search', 'contains', 'icontains', 'iexact', 'startswith', 'istartswith', 'endswith', 'iendswith', 'isnull' ): return value elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): return self.get_prep_value(value) elif lookup_type in ('range', 'in'): return [self.get_prep_value(v) for v in value] elif lookup_type == 'year': try: return int(value) except ValueError: raise ValueError("The __year lookup type requires an integer " "argument") raise TypeError("Field has invalid lookup: %s" % lookup_type) def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False): """ Returns field's value prepared for database lookup. """ if not prepared: value = self.get_prep_lookup(lookup_type, value) if hasattr(value, 'get_compiler'): value = value.get_compiler(connection=connection) if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'): # If the value has a relabel_aliases method, it will need to # be invoked before the final SQL is evaluated if hasattr(value, 'relabel_aliases'): return value if hasattr(value, 'as_sql'): sql, params = value.as_sql() else: sql, params = value._as_sql(connection=connection) return QueryWrapper(('(%s)' % sql), params) if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'): return [value] elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): return [self.get_db_prep_value(value, connection=connection, prepared=prepared)] elif lookup_type in ('range', 'in'): return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value] elif lookup_type in ('contains', 'icontains'): return ["%%%s%%" % connection.ops.prep_for_like_query(value)] elif lookup_type == 'iexact': return [connection.ops.prep_for_iexact_query(value)] elif lookup_type in ('startswith', 'istartswith'): return ["%s%%" % connection.ops.prep_for_like_query(value)] elif lookup_type in ('endswith', 'iendswith'): return ["%%%s" % connection.ops.prep_for_like_query(value)] elif lookup_type == 'isnull': return [] elif lookup_type == 'year': if self.get_internal_type() == 'DateField': return connection.ops.year_lookup_bounds_for_date_field(value) else: return connection.ops.year_lookup_bounds(value) def has_default(self): """ Returns a boolean of whether this field has a default value. """ return self.default is not NOT_PROVIDED def get_default(self): """ Returns the default value for this field. """ if self.has_default(): if callable(self.default): return self.default() return force_unicode(self.default, strings_only=True) if (not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls)): return None return "" def get_validator_unique_lookup_type(self): return '%s__exact' % self.name def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): """Returns choices with a default blank choices included, for use as SelectField choices for this field.""" first_choice = include_blank and blank_choice or [] if self.choices: return first_choice + list(self.choices) rel_model = self.rel.to if hasattr(self.rel, 'get_related_field'): lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter( self.rel.limit_choices_to)] else: lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter( self.rel.limit_choices_to)] return first_choice + lst def get_choices_default(self): return self.get_choices() def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): """ Returns flattened choices with a default blank choice included. """ first_choice = include_blank and blank_choice or [] return first_choice + list(self.flatchoices) def _get_val_from_obj(self, obj): if obj is not None: return getattr(obj, self.attname) else: return self.get_default() def value_to_string(self, obj): """ Returns a string value of this field from the passed obj. This is used by the serialization framework. """ return smart_unicode(self._get_val_from_obj(obj)) def bind(self, fieldmapping, original, bound_field_class): return bound_field_class(self, fieldmapping, original) def _get_choices(self): if hasattr(self._choices, 'next'): choices, self._choices = tee(self._choices) return choices else: return self._choices choices = property(_get_choices) def _get_flatchoices(self): """Flattened version of choices tuple.""" flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice,value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=forms.CharField, **kwargs): """ Returns a django.forms.Field instance for this database Field. """ defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text} if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in kwargs.keys(): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial'): del kwargs[k] defaults.update(kwargs) return form_class(**defaults) def value_from_object(self, obj): """ Returns the value of this field in the given model instance. """ return getattr(obj, self.attname) def __repr__(self): """ Displays the module, class and name of the field. """ path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path class AutoField(Field): description = _("Automatic key") empty_strings_allowed = False def __init__(self, *args, **kwargs): assert kwargs.get('primary_key', False) is True, \ "%ss must have primary_key=True." % self.__class__.__name__ kwargs['blank'] = True Field.__init__(self, *args, **kwargs) def get_internal_type(self): return "AutoField" def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if value is None: return value return connection.ops.value_to_db_auto(value) def contribute_to_class(self, cls, name): assert not cls._meta.has_auto_field, \ "A model can't have more than one AutoField." super(AutoField, self).contribute_to_class(cls, name) cls._meta.has_auto_field = True cls._meta.auto_field = self def formfield(self, **kwargs): return None class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _(u"'%s' value must be either True or False."), } description = _("Boolean (Either True or False)") def __init__(self, *args, **kwargs): kwargs['blank'] = True if 'default' not in kwargs and not kwargs.get('null'): kwargs['default'] = False Field.__init__(self, *args, **kwargs) def get_internal_type(self): return "BooleanField" def to_python(self, value): if value in (True, False): # if value is 1 or 0 than it's equal to True or False, but we want # to return a true bool for semantic reasons. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False msg = self.error_messages['invalid'] % str(value) raise exceptions.ValidationError(msg) def get_prep_lookup(self, lookup_type, value): # Special-case handling for filters coming from a Web request (e.g. the # admin interface). Only works for scalar values (not lists). If you're # passing in a list, you might as well make things the right type when # constructing the list. if value in ('1', '0'): value = bool(int(value)) return super(BooleanField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): if value is None: return None return bool(value) def formfield(self, **kwargs): # Unlike most fields, BooleanField figures out include_blank from # self.null instead of self.blank. if self.choices: include_blank = (self.null or not (self.has_default() or 'initial' in kwargs)) defaults = {'choices': self.get_choices( include_blank=include_blank)} else: defaults = {'form_class': forms.BooleanField} defaults.update(kwargs) return super(BooleanField, self).formfield(**defaults) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super(CharField, self).__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, basestring) or value is None: return value return smart_unicode(value) def get_prep_value(self, value): return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} defaults.update(kwargs) return super(CharField, self).formfield(**defaults) # TODO: Maybe move this into contrib, because it's specialized. class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") def formfield(self, **kwargs): defaults = { 'error_messages': { 'invalid': _(u'Enter only digits separated by commas.'), } } defaults.update(kwargs) return super(CommaSeparatedIntegerField, self).formfield(**defaults) class DateField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _(u"'%s' value has an invalid date format. It must be " u"in YYYY-MM-DD format."), 'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) " u"but it is an invalid date."), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True Field.__init__(self, verbose_name, name, **kwargs) def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value value = smart_str(value) try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: msg = self.error_messages['invalid_date'] % value raise exceptions.ValidationError(msg) msg = self.error_messages['invalid'] % value raise exceptions.ValidationError(msg) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super(DateField, self).pre_save(model_instance, add) def contribute_to_class(self, cls, name): super(DateField,self).contribute_to_class(cls, name) if not self.null: setattr(cls, 'get_next_by_%s' % self.name, curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)) setattr(cls, 'get_previous_by_%s' % self.name, curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)) def get_prep_lookup(self, lookup_type, value): # For "__month", "__day", and "__week_day" lookups, convert the value # to an int so the database backend always sees a consistent type. if lookup_type in ('month', 'day', 'week_day'): return int(value) return super(DateField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_date(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.DateField} defaults.update(kwargs) return super(DateField, self).formfield(**defaults) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _(u"'%s' value has an invalid format. It must be in " u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."), 'invalid_date': _(u"'%s' value has the correct format " u"(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _(u"'%s' value has the correct format " u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " u"but it is an invalid date/time."), } description = _("Date (with time)") # __init__ is inherited from DateField def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn(u"DateTimeField received a naive datetime (%s)" u" while time zone support is active." % value, RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value value = smart_str(value) try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: msg = self.error_messages['invalid_datetime'] % value raise exceptions.ValidationError(msg) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: msg = self.error_messages['invalid_date'] % value raise exceptions.ValidationError(msg) msg = self.error_messages['invalid'] % value raise exceptions.ValidationError(msg) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super(DateTimeField, self).pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO # get_prep_lookup is inherited from DateField def get_prep_value(self, value): value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. warnings.warn(u"DateTimeField received a naive datetime (%s)" u" while time zone support is active." % value, RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_datetime(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.DateTimeField} defaults.update(kwargs) return super(DateTimeField, self).formfield(**defaults) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _(u"'%s' value must be a decimal number."), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places Field.__init__(self, verbose_name, name, **kwargs) def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value try: return decimal.Decimal(value) except decimal.InvalidOperation: msg = self.error_messages['invalid'] % str(value) raise exceptions.ValidationError(msg) def _format(self, value): if isinstance(value, basestring) or value is None: return value else: return self.format_number(value) def format_number(self, value): """ Formats a number into a string with the requisite number of digits and decimal places. """ # Method moved to django.db.backends.util. # # It is preserved because it is used by the oracle backend # (django.db.backends.oracle.query), and also for # backwards-compatibility with any external code which may have used # this method. from django.db.backends import util return util.format_number(value, self.max_digits, self.decimal_places) def get_db_prep_save(self, value, connection): return connection.ops.value_to_db_decimal(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): return self.to_python(value) def formfield(self, **kwargs): defaults = { 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, } defaults.update(kwargs) return super(DecimalField, self).formfield(**defaults) class EmailField(CharField): default_validators = [validators.validate_email] description = _("E-mail address") def __init__(self, *args, **kwargs): kwargs['max_length'] = kwargs.get('max_length', 75) CharField.__init__(self, *args, **kwargs) def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. defaults = { 'form_class': forms.EmailField, } defaults.update(kwargs) return super(EmailField, self).formfield(**defaults) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive kwargs['max_length'] = kwargs.get('max_length', 100) Field.__init__(self, verbose_name, name, **kwargs) def get_prep_value(self, value): value = super(FilePathField, self).get_prep_value(value) if value is None: return None return smart_unicode(value) def formfield(self, **kwargs): defaults = { 'path': self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, } defaults.update(kwargs) return super(FilePathField, self).formfield(**defaults) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%s' value must be a float."), } description = _("Floating point number") def get_prep_value(self, value): if value is None: return None return float(value) def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): msg = self.error_messages['invalid'] % str(value) raise exceptions.ValidationError(msg) def formfield(self, **kwargs): defaults = {'form_class': forms.FloatField} defaults.update(kwargs) return super(FloatField, self).formfield(**defaults) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%s' value must be an integer."), } description = _("Integer") def get_prep_value(self, value): if value is None: return None return int(value) def get_prep_lookup(self, lookup_type, value): if ((lookup_type == 'gte' or lookup_type == 'lt') and isinstance(value, float)): value = math.ceil(value) return super(IntegerField, self).get_prep_lookup(lookup_type, value) def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): msg = self.error_messages['invalid'] % str(value) raise exceptions.ValidationError(msg) def formfield(self, **kwargs): defaults = {'form_class': forms.IntegerField} defaults.update(kwargs) return super(IntegerField, self).formfield(**defaults) class BigIntegerField(IntegerField): empty_strings_allowed = False description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT} defaults.update(kwargs) return super(BigIntegerField, self).formfield(**defaults) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 Field.__init__(self, *args, **kwargs) def get_prep_value(self, value): value = super(IPAddressField, self).get_prep_value(value) if value is None: return None return smart_unicode(value) def get_internal_type(self): return "IPAddressField" def formfield(self, **kwargs): defaults = {'form_class': forms.IPAddressField} defaults.update(kwargs) return super(IPAddressField, self).formfield(**defaults) class GenericIPAddressField(Field): empty_strings_allowed = True description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 Field.__init__(self, verbose_name, name, *args, **kwargs) def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value and ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return value or None def get_prep_value(self, value): if value is None: return value if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return smart_unicode(value) def formfield(self, **kwargs): defaults = {'form_class': forms.GenericIPAddressField} defaults.update(kwargs) return super(GenericIPAddressField, self).formfield(**defaults) class NullBooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%s' value must be either None, True or False."), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True Field.__init__(self, *args, **kwargs) def get_internal_type(self): return "NullBooleanField" def to_python(self, value): if value is None: return None if value in (True, False): return bool(value) if value in ('None',): return None if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False msg = self.error_messages['invalid'] % str(value) raise exceptions.ValidationError(msg) def get_prep_lookup(self, lookup_type, value): # Special-case handling for filters coming from a Web request (e.g. the # admin interface). Only works for scalar values (not lists). If you're # passing in a list, you might as well make things the right type when # constructing the list. if value in ('1', '0'): value = bool(int(value)) return super(NullBooleanField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): if value is None: return None return bool(value) def formfield(self, **kwargs): defaults = { 'form_class': forms.NullBooleanField, 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text} defaults.update(kwargs) return super(NullBooleanField, self).formfield(**defaults) class PositiveIntegerField(IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): defaults = {'min_value': 0} defaults.update(kwargs) return super(PositiveIntegerField, self).formfield(**defaults) class PositiveSmallIntegerField(IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): defaults = {'min_value': 0} defaults.update(kwargs) return super(PositiveSmallIntegerField, self).formfield(**defaults) class SlugField(CharField): description = _("Slug (up to %(max_length)s)") def __init__(self, *args, **kwargs): kwargs['max_length'] = kwargs.get('max_length', 50) # Set db_index=True unless it's been set manually. if 'db_index' not in kwargs: kwargs['db_index'] = True super(SlugField, self).__init__(*args, **kwargs) def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): defaults = {'form_class': forms.SlugField} defaults.update(kwargs) return super(SlugField, self).formfield(**defaults) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def get_prep_value(self, value): if isinstance(value, basestring) or value is None: return value return smart_unicode(value) def formfield(self, **kwargs): defaults = {'widget': forms.Textarea} defaults.update(kwargs) return super(TextField, self).formfield(**defaults) class TimeField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _(u"'%s' value has an invalid format. It must be in " u"HH:MM[:ss[.uuuuuu]] format."), 'invalid_time': _(u"'%s' value has the correct format " u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True Field.__init__(self, verbose_name, name, **kwargs) def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() value = smart_str(value) try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: msg = self.error_messages['invalid_time'] % value raise exceptions.ValidationError(msg) msg = self.error_messages['invalid'] % value raise exceptions.ValidationError(msg) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super(TimeField, self).pre_save(model_instance, add) def get_prep_value(self, value): return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_time(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.TimeField} defaults.update(kwargs) return super(TimeField, self).formfield(**defaults) class URLField(CharField): description = _("URL") def __init__(self, verbose_name=None, name=None, verify_exists=False, **kwargs): kwargs['max_length'] = kwargs.get('max_length', 200) CharField.__init__(self, verbose_name, name, **kwargs) self.validators.append( validators.URLValidator(verify_exists=verify_exists)) def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. defaults = { 'form_class': forms.URLField, } defaults.update(kwargs) return super(URLField, self).formfield(**defaults)
klnprj/testapp
django/db/models/fields/__init__.py
Python
bsd-3-clause
47,219
0.000911
from django.contrib.auth.models import User from django.db import models from django.utils import timezone import jsonfield from .hooks import hookset from .utils import load_path_attr class UserState(models.Model): """ this stores the overall state of a particular user. """ user = models.OneToOneField(User, null=True, on_delete=models.SET_NULL) data = jsonfield.JSONField(default=dict, blank=True) @classmethod def for_user(cls, user): assert user.is_authenticated(), "user must be authenticated" user_state, _ = cls.objects.get_or_create(user=user) return user_state def get(self, key): return self.data.get(key) def set(self, key, value): self.data[key] = value self.save() class ActivityState(models.Model): """ this stores the overall state of a particular user doing a particular activity across all sessions of that activity. """ user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL) activity_key = models.CharField(max_length=300) activity_class_path = models.CharField(max_length=300) # how many sessions have been completed by this user completed_count = models.IntegerField(default=0) data = jsonfield.JSONField(default=dict, blank=True) class Meta: unique_together = [("user", "activity_key")] @property def activity_class(self): return load_path_attr(self.activity_class_path) @property def in_progress(self): return next(iter(self.sessions.filter(completed=None)), None) @property def latest(self): session, _ = self.sessions.get_or_create(completed=None) return session @property def last_completed(self): return self.sessions.filter(completed__isnull=False).order_by("-started").first() @property def all_sessions(self): return self.sessions.order_by("started") @classmethod def state_for_user(cls, user, activity_key): assert user.is_authenticated(), "user must be authenticated" return cls.objects.filter(user=user, activity_key=activity_key).first() @property def progression(self): if self.in_progress: return "continue" elif self.activity_class.repeatable: return "repeat" else: return "completed" class ActivitySessionState(models.Model): """ this stores the state of a particular session of a particular user doing a particular activity. """ activity_state = models.ForeignKey(ActivityState, related_name="sessions", on_delete=models.CASCADE) started = models.DateTimeField(default=timezone.now) completed = models.DateTimeField(null=True) # NULL means in progress data = jsonfield.JSONField(default=dict, blank=True) class Meta: unique_together = [("activity_state", "started")] def mark_completed(self): self.completed = timezone.now() self.save() self.activity_state.completed_count = models.F("completed_count") + 1 self.activity_state.save() def activities_for_user(user): activities = { "available": [], "inprogress": [], "completed": [], "repeatable": [] } for key, activity_class_path in hookset.all_activities(): activity = load_path_attr(activity_class_path) state = ActivityState.state_for_user(user, key) user_num_completions = ActivitySessionState.objects.filter( user=user, activity_key=key, completed__isnull=False ).count() activity_entry = { "activity_key": key, "title": activity.title, "description": activity.description, "state": state, "user_num_completions": user_num_completions, "repeatable": activity.repeatable, } if state: if state.in_progress: activities["inprogress"].append(activity_entry) elif activity.repeatable: activities["repeatable"].append(activity_entry) else: activities["completed"].append(activity_entry) else: activities["available"].append(activity_entry) return activities
pinax/pinax-lms-activities
pinax/lms/activities/models.py
Python
mit
4,304
0.000465
""" Fichier main qui lance le programme """ from Game import * game = Game() game.play()
Vodak/SINS
src/main.py
Python
gpl-3.0
91
0
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) stack = sqlalchemy.Table('stack', meta, autoload=True) name_index = sqlalchemy.Index('ix_stack_owner_id', stack.c.owner_id, mysql_length=36) name_index.create(migrate_engine)
cwolferh/heat-scratch
heat/db/sqlalchemy/migrate_repo/versions/071_stack_owner_id_index.py
Python
apache-2.0
897
0
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import itertools import sys from django.core import exceptions from django.db import connections, router, transaction, IntegrityError from django.db.models.fields import AutoField from django.db.models.query_utils import (Q, select_related_descend, deferred_class_factory, InvalidQuery) from django.db.models.deletion import Collector from django.db.models import sql from django.utils.functional import partition # Used to control how many objects are worked with at once in some cases (e.g. # when deleting objects). CHUNK_SIZE = 100 ITER_CHUNK_SIZE = CHUNK_SIZE # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 # Pull into this namespace for backwards compatibility. EmptyResultSet = sql.EmptyResultSet class QuerySet(object): """ Represents a lazy database lookup for a set of objects. """ def __init__(self, model=None, query=None, using=None): self.model = model # EmptyQuerySet instantiates QuerySet with model as None self._db = using self.query = query or sql.Query(self.model) self._result_cache = None self._iter = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = [] self._prefetch_done = False self._known_related_object = None # (attname, rel_obj) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """ Deep copy of a QuerySet doesn't populate the cache """ obj = self.__class__() for k,v in self.__dict__.items(): if k in ('_iter','_result_cache'): obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): """ Allows the QuerySet to be pickled. """ # Force the cache to be fully populated. len(self) obj_dict = self.__dict__.copy() obj_dict['_iter'] = None return obj_dict def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return repr(data) def __len__(self): # Since __len__ is called quite frequently (for example, as part of # list(qs), we make some effort here to be as efficient as possible # whilst not messing up any existing iterators against the QuerySet. if self._result_cache is None: if self._iter: self._result_cache = list(self._iter) else: self._result_cache = list(self.iterator()) elif self._iter: self._result_cache.extend(self._iter) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() return len(self._result_cache) def __iter__(self): if self._prefetch_related_lookups and not self._prefetch_done: # We need all the results in order to be able to do the prefetch # in one go. To minimize code duplication, we use the __len__ # code path which also forces this, and also does the prefetch len(self) if self._result_cache is None: self._iter = self.iterator() self._result_cache = [] if self._iter: return self._result_iter() # Python's list iterator is better than our version when we're just # iterating over the cache. return iter(self._result_cache) def _result_iter(self): pos = 0 while 1: upper = len(self._result_cache) while pos < upper: yield self._result_cache[pos] pos = pos + 1 if not self._iter: raise StopIteration if len(self._result_cache) <= pos: self._fill_cache() def __nonzero__(self): if self._prefetch_related_lookups and not self._prefetch_done: # We need all the results in order to be able to do the prefetch # in one go. To minimize code duplication, we use the __len__ # code path which also forces this, and also does the prefetch len(self) if self._result_cache is not None: return bool(self._result_cache) try: next(iter(self)) except StopIteration: return False return True def __contains__(self, val): # The 'in' operator works without this method, due to __iter__. This # implementation exists only to shortcut the creation of Model # instances, by bailing out early if we find a matching element. pos = 0 if self._result_cache is not None: if val in self._result_cache: return True elif self._iter is None: # iterator is exhausted, so we have our answer return False # remember not to check these again: pos = len(self._result_cache) else: # We need to start filling the result cache out. The following # ensures that self._iter is not None and self._result_cache is not # None it = iter(self) # Carry on, one result at a time. while True: if len(self._result_cache) <= pos: self._fill_cache(num=1) if self._iter is None: # we ran out of items return False if self._result_cache[pos] == val: return True pos += 1 def __getitem__(self, k): """ Retrieves an item or slice from the set of results. """ if not isinstance(k, (slice, int, long)): raise TypeError assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported." if self._result_cache is not None: if self._iter is not None: # The result cache has only been partially populated, so we may # need to fill it out a bit more. if isinstance(k, slice): if k.stop is not None: # Some people insist on passing in strings here. bound = int(k.stop) else: bound = None else: bound = k + 1 if len(self._result_cache) < bound: self._fill_cache(bound - len(self._result_cache)) return self._result_cache[k] if isinstance(k, slice): qs = self._clone() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return k.step and list(qs)[::k.step] or qs try: qs = self._clone() qs.query.set_limits(k, k + 1) return list(qs)[0] except self.model.DoesNotExist as e: raise IndexError(e.args) def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other._clone() combined = self._clone() combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) combined = self._clone() if isinstance(other, EmptyQuerySet): return combined combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ fill_cache = False if connections[self.db].features.supports_select_related: fill_cache = self.query.select_related if isinstance(fill_cache, dict): requested = fill_cache else: requested = None max_depth = self.query.max_depth extra_select = self.query.extra_select.keys() aggregate_select = self.query.aggregate_select.keys() only_load = self.query.get_loaded_field_names() if not fill_cache: fields = self.model._meta.fields load_fields = [] # If only/defer clauses have been specified, # build the list of fields that are to be loaded. if only_load: for field, model in self.model._meta.get_fields_with_model(): if model is None: model = self.model try: if field.name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) index_start = len(extra_select) aggregate_start = index_start + len(load_fields or self.model._meta.fields) skip = None if load_fields and not fill_cache: # Some fields have been deferred, so we have to initialise # via keyword arguments. skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) # Cache db, model and known_related_object outside the loop db = self.db model = self.model kro_attname, kro_instance = self._known_related_object or (None, None) compiler = self.query.get_compiler(using=db) if fill_cache: klass_info = get_klass_info(model, max_depth=max_depth, requested=requested, only_load=only_load) for row in compiler.results_iter(): if fill_cache: obj, _ = get_cached_row(row, index_start, db, klass_info, offset=len(aggregate_select)) else: # Omit aggregates in object creation. row_data = row[index_start:aggregate_start] if skip: obj = model_cls(**dict(zip(init_list, row_data))) else: obj = model(*row_data) # Store the source database of the object obj._state.db = db # This object came from the database; it's not being added. obj._state.adding = False if extra_select: for i, k in enumerate(extra_select): setattr(obj, k, row[i]) # Add the aggregates to the model if aggregate_select: for i, aggregate in enumerate(aggregate_select): setattr(obj, aggregate, row[i + aggregate_start]) # Add the known related object to the model, if there is one if kro_instance: setattr(obj, kro_attname, kro_instance) yield obj def aggregate(self, *args, **kwargs): """ Returns a dictionary containing the calculations (aggregation) over the current queryset If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") for arg in args: kwargs[arg.default_alias] = arg query = self.query.clone() for (alias, aggregate_expr) in kwargs.items(): query.add_aggregate(aggregate_expr, self.model, alias, is_summary=True) return query.get_aggregation(using=self.db) def count(self): """ Performs a SELECT COUNT() and returns the number of records as an integer. If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None and not self._iter: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Performs the query and returns a single object matching the given keyword arguments. """ clone = self.filter(*args, **kwargs) if self.query.can_filter(): clone = clone.order_by() num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist. " "Lookup parameters were %s" % (self.model._meta.object_name, kwargs)) raise self.model.MultipleObjectsReturned( "get() returned more than one %s -- it returned %s! " "Lookup parameters were %s" % (self.model._meta.object_name, num, kwargs)) def create(self, **kwargs): """ Creates a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def bulk_create(self, objs): """ Inserts each of the instances into the database. This does *not* call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field. """ # So this case is fun. When you bulk insert you don't get the primary # keys back (if it's an autoincrement), so you can't insert into the # child tables which references this. There are two workarounds, 1) # this could be implemented if you didn't have an autoincrement pk, # and 2) you could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back, and then doing a single bulk # insert into the childmost table. We're punting on these for now # because they are relatively rare cases. if self.model._meta.parents: raise ValueError("Can't bulk create an inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] fields = self.model._meta.local_fields if not transaction.is_managed(using=self.db): transaction.enter_transaction_management(using=self.db) forced_managed = True else: forced_managed = False try: if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk and self.model._meta.has_auto_field): self.model._base_manager._insert(objs, fields=fields, using=self.db) else: objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: self.model._base_manager._insert(objs_with_pk, fields=fields, using=self.db) if objs_without_pk: self.model._base_manager._insert(objs_without_pk, fields=[f for f in fields if not isinstance(f, AutoField)], using=self.db) if forced_managed: transaction.commit(using=self.db) else: transaction.commit_unless_managed(using=self.db) finally: if forced_managed: transaction.leave_transaction_management(using=self.db) return objs def get_or_create(self, **kwargs): """ Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created. """ assert kwargs, \ 'get_or_create() must be passed at least one keyword argument' defaults = kwargs.pop('defaults', {}) lookup = kwargs.copy() for f in self.model._meta.fields: if f.attname in lookup: lookup[f.name] = lookup.pop(f.attname) try: self._for_write = True return self.get(**lookup), False except self.model.DoesNotExist: try: params = dict([(k, v) for k, v in kwargs.items() if '__' not in k]) params.update(defaults) obj = self.model(**params) sid = transaction.savepoint(using=self.db) obj.save(force_insert=True, using=self.db) transaction.savepoint_commit(sid, using=self.db) return obj, True except IntegrityError as e: transaction.savepoint_rollback(sid, using=self.db) exc_info = sys.exc_info() try: return self.get(**lookup), False except self.model.DoesNotExist: # Re-raise the IntegrityError with its original traceback. raise exc_info[1], None, exc_info[2] def latest(self, field_name=None): """ Returns the latest object, according to the model's 'get_latest_by' option or optional given field_name. """ latest_by = field_name or self.model._meta.get_latest_by assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model" assert self.query.can_filter(), \ "Cannot change a query once a slice has been taken." obj = self._clone() obj.query.set_limits(high=1) obj.query.clear_ordering() obj.query.add_ordering('-%s' % latest_by) return obj.get() def in_bulk(self, id_list): """ Returns a dictionary mapping each of the given IDs to the object with that ID. """ assert self.query.can_filter(), \ "Cannot use 'limit' or 'offset' with in_bulk" if not id_list: return {} qs = self._clone() qs.query.add_filter(('pk__in', id_list)) qs.query.clear_ordering(force_empty=True) return dict([(obj._get_pk_val(), obj) for obj in qs]) def delete(self): """ Deletes the records in the current QuerySet. """ assert self.query.can_filter(), \ "Cannot use 'limit' or 'offset' with delete." del_query = self._clone() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering() collector = Collector(using=del_query.db) collector.collect(del_query) collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None delete.alters_data = True def update(self, **kwargs): """ Updates all elements in the current QuerySet, setting all the given fields to the appropriate values. """ assert self.query.can_filter(), \ "Cannot update a query once a slice has been taken." self._for_write = True query = self.query.clone(sql.UpdateQuery) query.add_update_values(kwargs) if not transaction.is_managed(using=self.db): transaction.enter_transaction_management(using=self.db) forced_managed = True else: forced_managed = False try: rows = query.get_compiler(self.db).execute_sql(None) if forced_managed: transaction.commit(using=self.db) else: transaction.commit_unless_managed(using=self.db) finally: if forced_managed: transaction.leave_transaction_management(using=self.db) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ assert self.query.can_filter(), \ "Cannot update a query once a slice has been taken." query = self.query.clone(sql.UpdateQuery) query.add_update_fields(values) self._result_cache = None return query.get_compiler(self.db).execute_sql(None) _update.alters_data = True def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, self._prefetch_related_lookups) self._prefetch_done = True ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def values(self, *fields): return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields) def values_list(self, *fields, **kwargs): flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (kwargs.keys(),)) if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat, _fields=fields) def dates(self, field_name, kind, order='ASC'): """ Returns a list of datetime objects representing all available dates for the given field_name, scoped to 'kind'. """ assert kind in ("month", "year", "day"), \ "'kind' must be one of 'year', 'month' or 'day'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." return self._clone(klass=DateQuerySet, setup=True, _field_name=field_name, _kind=kind, _order=order) def none(self): """ Returns an empty QuerySet. """ return self._clone(klass=EmptyQuerySet) ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._clone() def filter(self, *args, **kwargs): """ Returns a new QuerySet instance with the args ANDed to the existing set. """ return self._filter_or_exclude(False, *args, **kwargs) def exclude(self, *args, **kwargs): """ Returns a new QuerySet instance with NOT (args) ANDed to the existing set. """ return self._filter_or_exclude(True, *args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert self.query.can_filter(), \ "Cannot filter a query once a slice has been taken." clone = self._clone() if negate: clone.query.add_q(~Q(*args, **kwargs)) else: clone.query.add_q(Q(*args, **kwargs)) return clone def complex_filter(self, filter_obj): """ Returns a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'): clone = self._clone() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(None, **filter_obj) def select_for_update(self, **kwargs): """ Returns a new QuerySet instance that will select objects with a FOR UPDATE lock. """ # Default to false for nowait nowait = kwargs.pop('nowait', False) obj = self._clone() obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait return obj def select_related(self, *fields, **kwargs): """ Returns a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. """ depth = kwargs.pop('depth', 0) if kwargs: raise TypeError('Unexpected keyword arguments to select_related: %s' % (kwargs.keys(),)) obj = self._clone() if fields: if depth: raise TypeError('Cannot pass both "depth" and fields to select_related()') obj.query.add_select_related(fields) else: obj.query.select_related = True if depth: obj.query.max_depth = depth return obj def prefetch_related(self, *lookups): """ Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared. """ clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = [] else: clone._prefetch_related_lookups.extend(lookups) return clone def dup_select_related(self, other): """ Copies the related selection status from the QuerySet 'other' to the current QuerySet. """ self.query.select_related = other.query.select_related def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with data aggregated from related fields. """ for arg in args: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) kwargs[arg.default_alias] = arg names = getattr(self, '_fields', None) if names is None: names = set(self.model._meta.get_all_field_names()) for aggregate in kwargs: if aggregate in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % aggregate) obj = self._clone() obj._setup_aggregate_query(kwargs.keys()) # Add the aggregates to the query for (alias, aggregate_expr) in kwargs.items(): obj.query.add_aggregate(aggregate_expr, self.model, alias, is_summary=False) return obj def order_by(self, *field_names): """ Returns a new QuerySet instance with the ordering changed. """ assert self.query.can_filter(), \ "Cannot reorder a query once a slice has been taken." obj = self._clone() obj.query.clear_ordering() obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Returns a new QuerySet instance that will select only distinct results. """ assert self.query.can_filter(), \ "Cannot create distinct fields once a slice has been taken." obj = self._clone() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """ Adds extra SQL fragments to the query. """ assert self.query.can_filter(), \ "Cannot change a query once a slice has been taken" clone = self._clone() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """ Reverses the ordering of the QuerySet. """ clone = self._clone() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option). """ clone = self._clone() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") clone = self._clone() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """ Selects which database this QuerySet should excecute its query against. """ clone = self._clone() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### def ordered(self): """ Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model. """ if self.query.extra_order_by or self.query.order_by: return True elif self.query.default_ordering and self.query.model._meta.ordering: return True else: return False ordered = property(ordered) @property def db(self): "Return the database that will be used if this query is executed now" if self._for_write: return self._db or router.db_for_write(self.model) return self._db or router.db_for_read(self.model) ################### # PRIVATE METHODS # ################### def _clone(self, klass=None, setup=False, **kwargs): if klass is None: klass = self.__class__ query = self.query.clone() if self._sticky_filter: query.filter_is_sticky = True c = klass(model=self.model, query=query, using=self._db) c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_object = self._known_related_object c.__dict__.update(kwargs) if setup and hasattr(c, '_setup_query'): c._setup_query() return c def _fill_cache(self, num=None): """ Fills the result cache with 'num' more entries (or until the results iterator is exhausted). """ if self._iter: try: for i in range(num or ITER_CHUNK_SIZE): self._result_cache.append(next(self._iter)) except StopIteration: self._iter = None def _next_is_sticky(self): """ Indicates that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """ Checks that we are merging two comparable QuerySet classes. By default this does nothing, but see the ValuesQuerySet for an example of where it's useful. """ pass def _setup_aggregate_query(self, aggregates): """ Prepare the query for computing a result that contains aggregate annotations. """ opts = self.model._meta if self.query.group_by is None: field_names = [f.attname for f in opts.fields] self.query.add_fields(field_names, False) self.query.set_group_by() def _prepare(self): return self def _as_sql(self, connection): """ Returns the internal query's SQL and parameters (as a tuple). """ obj = self.values("pk") if obj._db is None or connection == connections[obj._db]: return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") # When used as part of a nested query, a queryset will never be an "always # empty" result. value_annotation = True class ValuesQuerySet(QuerySet): def __init__(self, *args, **kwargs): super(ValuesQuerySet, self).__init__(*args, **kwargs) # select_related isn't supported in values(). (FIXME -#3358) self.query.select_related = False # QuerySet.clone() will also set up the _fields attribute with the # names of the model fields to select. def iterator(self): # Purge any extra columns that haven't been explicitly asked for extra_names = self.query.extra_select.keys() field_names = self.field_names aggregate_names = self.query.aggregate_select.keys() names = extra_names + field_names + aggregate_names for row in self.query.get_compiler(self.db).results_iter(): yield dict(zip(names, row)) def _setup_query(self): """ Constructs the field_names list that the values query will be retrieving. Called by the _clone() method after initializing the rest of the instance. """ self.query.clear_deferred_loading() self.query.clear_select_fields() if self._fields: self.extra_names = [] self.aggregate_names = [] if not self.query.extra and not self.query.aggregates: # Short cut - if there are no extra or aggregates, then # the values() clause must be just field names. self.field_names = list(self._fields) else: self.query.default_cols = False self.field_names = [] for f in self._fields: # we inspect the full extra_select list since we might # be adding back an extra select item that we hadn't # had selected previously. if f in self.query.extra: self.extra_names.append(f) elif f in self.query.aggregate_select: self.aggregate_names.append(f) else: self.field_names.append(f) else: # Default to all fields. self.extra_names = None self.field_names = [f.attname for f in self.model._meta.fields] self.aggregate_names = None self.query.select = [] if self.extra_names is not None: self.query.set_extra_mask(self.extra_names) self.query.add_fields(self.field_names, True) if self.aggregate_names is not None: self.query.set_aggregate_mask(self.aggregate_names) def _clone(self, klass=None, setup=False, **kwargs): """ Cloning a ValuesQuerySet preserves the current fields. """ c = super(ValuesQuerySet, self)._clone(klass, **kwargs) if not hasattr(c, '_fields'): # Only clone self._fields if _fields wasn't passed into the cloning # call directly. c._fields = self._fields[:] c.field_names = self.field_names c.extra_names = self.extra_names c.aggregate_names = self.aggregate_names if setup and hasattr(c, '_setup_query'): c._setup_query() return c def _merge_sanity_check(self, other): super(ValuesQuerySet, self)._merge_sanity_check(other) if (set(self.extra_names) != set(other.extra_names) or set(self.field_names) != set(other.field_names) or self.aggregate_names != other.aggregate_names): raise TypeError("Merging '%s' classes must involve the same values in each case." % self.__class__.__name__) def _setup_aggregate_query(self, aggregates): """ Prepare the query for computing a result that contains aggregate annotations. """ self.query.set_group_by() if self.aggregate_names is not None: self.aggregate_names.extend(aggregates) self.query.set_aggregate_mask(self.aggregate_names) super(ValuesQuerySet, self)._setup_aggregate_query(aggregates) def _as_sql(self, connection): """ For ValueQuerySet (and subclasses like ValuesListQuerySet), they can only be used as nested queries if they're already set up to select only a single field (in which case, that is the field column that is returned). This differs from QuerySet.as_sql(), where the column to select is set up by Django. """ if ((self._fields and len(self._fields) > 1) or (not self._fields and len(self.model._meta.fields) > 1)): raise TypeError('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__) obj = self._clone() if obj._db is None or connection == connections[obj._db]: return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") def _prepare(self): """ Validates that we aren't trying to do a query like value__in=qs.values('value1', 'value2'), which isn't valid. """ if ((self._fields and len(self._fields) > 1) or (not self._fields and len(self.model._meta.fields) > 1)): raise TypeError('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__) return self class ValuesListQuerySet(ValuesQuerySet): def iterator(self): if self.flat and len(self._fields) == 1: for row in self.query.get_compiler(self.db).results_iter(): yield row[0] elif not self.query.extra_select and not self.query.aggregate_select: for row in self.query.get_compiler(self.db).results_iter(): yield tuple(row) else: # When extra(select=...) or an annotation is involved, the extra # cols are always at the start of the row, and we need to reorder # the fields to match the order in self._fields. extra_names = self.query.extra_select.keys() field_names = self.field_names aggregate_names = self.query.aggregate_select.keys() names = extra_names + field_names + aggregate_names # If a field list has been specified, use it. Otherwise, use the # full list of fields, including extras and aggregates. if self._fields: fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names) else: fields = names for row in self.query.get_compiler(self.db).results_iter(): data = dict(zip(names, row)) yield tuple([data[f] for f in fields]) def _clone(self, *args, **kwargs): clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs) if not hasattr(clone, "flat"): # Only assign flat if the clone didn't already get it from kwargs clone.flat = self.flat return clone class DateQuerySet(QuerySet): def iterator(self): return self.query.get_compiler(self.db).results_iter() def _setup_query(self): """ Sets up any special features of the query attribute. Called by the _clone() method after initializing the rest of the instance. """ self.query.clear_deferred_loading() self.query = self.query.clone(klass=sql.DateQuery, setup=True) self.query.select = [] self.query.add_date_select(self._field_name, self._kind, self._order) def _clone(self, klass=None, setup=False, **kwargs): c = super(DateQuerySet, self)._clone(klass, False, **kwargs) c._field_name = self._field_name c._kind = self._kind if setup and hasattr(c, '_setup_query'): c._setup_query() return c class EmptyQuerySet(QuerySet): def __init__(self, model=None, query=None, using=None): super(EmptyQuerySet, self).__init__(model, query, using) self._result_cache = [] def __and__(self, other): return self._clone() def __or__(self, other): return other._clone() def count(self): return 0 def delete(self): pass def _clone(self, klass=None, setup=False, **kwargs): c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs) c._result_cache = [] return c def iterator(self): # This slightly odd construction is because we need an empty generator # (it raises StopIteration immediately). yield next(iter([])) def all(self): """ Always returns EmptyQuerySet. """ return self def filter(self, *args, **kwargs): """ Always returns EmptyQuerySet. """ return self def exclude(self, *args, **kwargs): """ Always returns EmptyQuerySet. """ return self def complex_filter(self, filter_obj): """ Always returns EmptyQuerySet. """ return self def select_related(self, *fields, **kwargs): """ Always returns EmptyQuerySet. """ return self def annotate(self, *args, **kwargs): """ Always returns EmptyQuerySet. """ return self def order_by(self, *field_names): """ Always returns EmptyQuerySet. """ return self def distinct(self, fields=None): """ Always returns EmptyQuerySet. """ return self def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """ Always returns EmptyQuerySet. """ assert self.query.can_filter(), \ "Cannot change a query once a slice has been taken" return self def reverse(self): """ Always returns EmptyQuerySet. """ return self def defer(self, *fields): """ Always returns EmptyQuerySet. """ return self def only(self, *fields): """ Always returns EmptyQuerySet. """ return self def update(self, **kwargs): """ Don't update anything. """ return 0 def aggregate(self, *args, **kwargs): """ Return a dict mapping the aggregate names to None """ for arg in args: kwargs[arg.default_alias] = arg return dict([(key, None) for key in kwargs]) # EmptyQuerySet is always an empty result in where-clauses (and similar # situations). value_annotation = False def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None, only_load=None, local_only=False): """ Helper function that recursively returns an information for a klass, to be used in get_cached_row. It exists just to compute this information only once for entire queryset. Otherwise it would be computed for each row, which leads to poor perfomance on large querysets. Arguments: * klass - the class to retrieve (and instantiate) * max_depth - the maximum depth to which a select_related() relationship should be explored. * cur_depth - the current depth in the select_related() tree. Used in recursive calls to determin if we should dig deeper. * requested - A dictionary describing the select_related() tree that is to be retrieved. keys are field names; values are dictionaries describing the keys on that related object that are themselves to be select_related(). * only_load - if the query has had only() or defer() applied, this is the list of field names that will be returned. If None, the full field list for `klass` can be assumed. * local_only - Only populate local fields. This is used when following reverse select-related relations """ if max_depth and requested is None and cur_depth > max_depth: # We've recursed deeply enough; stop now. return None if only_load: load_fields = only_load.get(klass) or set() # When we create the object, we will also be creating populating # all the parent classes, so traverse the parent classes looking # for fields that must be included on load. for parent in klass._meta.get_parent_list(): fields = only_load.get(parent) if fields: load_fields.update(fields) else: load_fields = None if load_fields: # Handle deferred fields. skip = set() init_list = [] # Build the list of fields that *haven't* been requested for field, model in klass._meta.get_fields_with_model(): if field.name not in load_fields: skip.add(field.name) elif local_only and model is not None: continue else: init_list.append(field.attname) # Retrieve all the requested fields field_count = len(init_list) if skip: klass = deferred_class_factory(klass, skip) field_names = init_list else: field_names = () else: # Load all fields on klass # We trying to not populate field_names variable for perfomance reason. # If field_names variable is set, it is used to instantiate desired fields, # by passing **dict(zip(field_names, fields)) as kwargs to Model.__init__ method. # But kwargs version of Model.__init__ is slower, so we should avoid using # it when it is not really neccesary. if local_only and len(klass._meta.local_fields) != len(klass._meta.fields): field_count = len(klass._meta.local_fields) field_names = [f.attname for f in klass._meta.local_fields] else: field_count = len(klass._meta.fields) field_names = () restricted = requested is not None related_fields = [] for f in klass._meta.fields: if select_related_descend(f, restricted, requested): if restricted: next = requested[f.name] else: next = None klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1, requested=next, only_load=only_load) related_fields.append((f, klass_info)) reverse_related_fields = [] if restricted: for o in klass._meta.get_all_related_objects(): if o.field.unique and select_related_descend(o.field, restricted, requested, reverse=True): next = requested[o.field.related_query_name()] klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1, requested=next, only_load=only_load, local_only=True) reverse_related_fields.append((o.field, klass_info)) return klass, field_names, field_count, related_fields, reverse_related_fields def get_cached_row(row, index_start, using, klass_info, offset=0): """ Helper function that recursively returns an object with the specified related attributes already populated. This method may be called recursively to populate deep select_related() clauses. Arguments: * row - the row of data returned by the database cursor * index_start - the index of the row at which data for this object is known to start * offset - the number of additional fields that are known to exist in row for `klass`. This usually means the number of annotated results on `klass`. * using - the database alias on which the query is being executed. * klass_info - result of the get_klass_info function """ if klass_info is None: return None klass, field_names, field_count, related_fields, reverse_related_fields = klass_info fields = row[index_start : index_start + field_count] # If all the select_related columns are None, then the related # object must be non-existent - set the relation to None. # Otherwise, construct the related object. if fields == (None,) * field_count: obj = None else: if field_names: obj = klass(**dict(zip(field_names, fields))) else: obj = klass(*fields) # If an object was retrieved, set the database state. if obj: obj._state.db = using obj._state.adding = False # Instantiate related fields index_end = index_start + field_count + offset # Iterate over each related object, populating any # select_related() fields for f, klass_info in related_fields: # Recursively retrieve the data for the related object cached_row = get_cached_row(row, index_end, using, klass_info) # If the recursive descent found an object, populate the # descriptor caches relevant to the object if cached_row: rel_obj, index_end = cached_row if obj is not None: # If the base object exists, populate the # descriptor cache setattr(obj, f.get_cache_name(), rel_obj) if f.unique and rel_obj is not None: # If the field is unique, populate the # reverse descriptor cache on the related object setattr(rel_obj, f.related.get_cache_name(), obj) # Now do the same, but for reverse related objects. # Only handle the restricted case - i.e., don't do a depth # descent into reverse relations unless explicitly requested for f, klass_info in reverse_related_fields: # Recursively retrieve the data for the related object cached_row = get_cached_row(row, index_end, using, klass_info) # If the recursive descent found an object, populate the # descriptor caches relevant to the object if cached_row: rel_obj, index_end = cached_row if obj is not None: # If the field is unique, populate the # reverse descriptor cache setattr(obj, f.related.get_cache_name(), rel_obj) if rel_obj is not None: # If the related object exists, populate # the descriptor cache. setattr(rel_obj, f.get_cache_name(), obj) # Now populate all the non-local field values # on the related object for rel_field, rel_model in rel_obj._meta.get_fields_with_model(): if rel_model is not None: setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname)) # populate the field cache for any related object # that has already been retrieved if rel_field.rel: try: cached_obj = getattr(obj, rel_field.get_cache_name()) setattr(rel_obj, rel_field.get_cache_name(), cached_obj) except AttributeError: # Related object hasn't been cached yet pass return obj, index_end class RawQuerySet(object): """ Provides an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=None, translations=None, using=None): self.raw_query = raw_query self.model = model self._db = using self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params or () self.translations = translations or {} def __iter__(self): # Mapping of attrnames to row column positions. Used for constructing # the model using kwargs, needed when not all model's fields are present # in the query. model_init_field_names = {} # A list of tuples of (column name, column position). Used for # annotation fields. annotation_fields = [] # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) need_resolv_columns = hasattr(compiler, 'resolve_columns') query = iter(self.query) # Find out which columns are model's fields, and which ones should be # annotated to the model. for pos, column in enumerate(self.columns): if column in self.model_fields: model_init_field_names[self.model_fields[column].attname] = pos else: annotation_fields.append((column, pos)) # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.fields: if field.attname not in model_init_field_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model # All model's fields are present in the query. So, it is possible # to use *args based model instantation. For each field of the model, # record the query column position matching that field. model_init_field_pos = [] for field in self.model._meta.fields: model_init_field_pos.append(model_init_field_names[field.attname]) if need_resolv_columns: fields = [self.model_fields.get(c, None) for c in self.columns] # Begin looping through the query values. for values in query: if need_resolv_columns: values = compiler.resolve_columns(values, fields) # Associate fields to values if skip: model_init_kwargs = {} for attname, pos in model_init_field_names.iteritems(): model_init_kwargs[attname] = values[pos] instance = model_cls(**model_init_kwargs) else: model_init_args = [values[pos] for pos in model_init_field_pos] instance = model_cls(*model_init_args) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) instance._state.db = db instance._state.adding = False yield instance def __repr__(self): return "<RawQuerySet: %r>" % (self.raw_query % tuple(self.params)) def __getitem__(self, k): return list(self)[k] @property def db(self): "Return the database that will be used if this query is executed now" return self._db or router.db_for_read(self.model) def using(self, alias): """ Selects which database this Raw QuerySet should excecute it's query against. """ return RawQuerySet(self.raw_query, model=self.model, query=self.query.clone(using=alias), params=self.params, translations=self.translations, using=alias) @property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ if not hasattr(self, '_columns'): self._columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): try: index = self._columns.index(query_name) self._columns[index] = model_name except ValueError: # Ignore translations for non-existant column names pass return self._columns @property def model_fields(self): """ A dict mapping column names to model field names. """ if not hasattr(self, '_model_fields'): converter = connections[self.db].introspection.table_name_converter self._model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() self._model_fields[converter(column)] = field return self._model_fields def insert_query(model, objs, fields, return_id=False, raw=False, using=None): """ Inserts a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. It is not part of the public API. """ query = sql.InsertQuery(model) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(return_id) def prefetch_related_objects(result_cache, related_lookups): """ Helper function for prefetch_related functionality Populates prefetched objects caches for a list of results from a QuerySet """ from django.db.models.sql.constants import LOOKUP_SEP if len(result_cache) == 0: return # nothing to do model = result_cache[0].__class__ # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_lookups = set() # list of lookups like foo__bar__baz done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = [] # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = itertools.chain(related_lookups, auto_lookups) for lookup in all_lookups: if lookup in done_lookups: # We've done exactly this already, skip the whole thing continue done_lookups.add(lookup) # Top level, the list of objects to decorate is the the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = result_cache attrs = lookup.split(LOOKUP_SEP) for level, attr in enumerate(attrs): # Prepare main instances if len(obj_list) == 0: break good_objects = True for obj in obj_list: if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except AttributeError: # Must be in a QuerySet subclass that is not returning # Model instances, either in Django or 3rd # party. prefetch_related() doesn't make sense, so quit # now. good_objects = False break else: # We already did this list break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogenous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (attr, first_obj.__class__.__name__, lookup)) if level == len(attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to a item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup) if prefetcher is not None and not is_fetched: # Check we didn't do this already current_lookup = LOOKUP_SEP.join(attrs[0:level+1]) if current_lookup in done_queries: obj_list = done_queries[current_lookup] else: obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (lookup in auto_lookups and descriptor in followed_descriptors): for f in additional_prl: new_prl = LOOKUP_SEP.join([current_lookup, f]) auto_lookups.append(new_prl) done_queries[current_lookup] = obj_list followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: try: new_obj = getattr(obj, attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, attr): """ For the attribute 'attr' on the given instance, finds an object that has a get_prefetch_query_set(). Returns a 4 tuple containing: (the object with get_prefetch_query_set (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a boolean that is True if the attribute has already been fetched) """ prefetcher = None attr_found = False is_fetched = False # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, attr, None) if rel_obj_descriptor is None: try: rel_obj = getattr(instance, attr) attr_found = True except AttributeError: pass else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_query_set() method. if hasattr(rel_obj_descriptor, 'get_prefetch_query_set'): prefetcher = rel_obj_descriptor if rel_obj_descriptor.is_cached(instance): is_fetched = True else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, attr) if hasattr(rel_obj, 'get_prefetch_query_set'): prefetcher = rel_obj return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, attname): """ Helper function for prefetch_related_objects Runs prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. The prefetched objects are returned, along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_query_set() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache name to assign to). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name =\ prefetcher.get_prefetch_query_set(instances) # We have to handle the possibility that the default manager itself added # prefetch_related lookups to the QuerySet we just got back. We don't want to # trigger the prefetch_related functionality by evaluating the query. # Rather, we need to merge in the prefetch_related lookups. additional_prl = getattr(rel_qs, '_prefetch_related_lookups', []) if additional_prl: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = [] all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: # Need to assign to single cache on instance setattr(obj, cache_name, vals[0] if vals else None) else: # Multi, attribute represents a manager with an .all() method that # returns a QuerySet qs = getattr(obj, attname).all() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, since we # have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_prl
kennethlove/django
django/db/models/query.py
Python
bsd-3-clause
70,273
0.001352
# -*- coding: utf-8 -*- from __future__ import (unicode_literals, absolute_import, division, print_function) import sys import os import pytest from contextlib import contextmanager import genpac from genpac._compat import string_types, iterkeys, iteritems parametrize = pytest.mark.parametrize skipif = pytest.mark.skipif xfail = pytest.mark.xfail _ETC_DIR = os.path.join(os.path.dirname(__file__), 'etc') _TMP_DIR = os.path.join(os.path.dirname(__file__), 'tmp') # 是否是自己的机子 is_own = sys.platform.startswith('darwin') and \ ''.join(os.environ.values()).find('JinnLynn') >= 0 is_not_own = not is_own def join_etc(*args): return os.path.join(_ETC_DIR, *args) def join_tmp(*args): return os.path.join(_TMP_DIR, *args) @contextmanager def buildenv(envs=None, argv=None, **kwargs): envs = envs or {} argv = argv or [] if isinstance(argv, string_types): argv = argv.split(' ') if not argv or argv[0] != 'genpac': argv.insert(0, 'genpac') envs.setdefault('GENPAC_TEST_TMP', _TMP_DIR) envs.setdefault('GENPAC_TEST_ETC', _ETC_DIR) for k, v in iteritems(envs): os.environ[k] = v old_argv = sys.argv sys.argv = argv yield genpac.Generator._gfwlists.clear() for k in iterkeys(envs): if k in os.environ: del os.environ[k] sys.argv = old_argv
JinnLynn/genpac
tests/util.py
Python
mit
1,389
0.002185
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from module.plugins.Hoster import Hoster from module.plugins.internal.CaptchaService import ReCaptcha class FreakshareCom(Hoster): __name__ = "FreakshareCom" __type__ = "hoster" __pattern__ = r"http://(?:www\.)?freakshare\.(net|com)/files/\S*?/" __version__ = "0.38" __description__ = """Freakshare.com Download Hoster""" __author_name__ = ("sitacuisses", "spoob", "mkaay", "Toilal") __author_mail__ = ("[email protected]", "[email protected]", "[email protected]", "[email protected]") def setup(self): self.multiDL = False self.req_opts = [] def process(self, pyfile): self.pyfile = pyfile pyfile.url = pyfile.url.replace("freakshare.net/", "freakshare.com/") if self.account: self.html = self.load(pyfile.url, cookies=False) pyfile.name = self.get_file_name() self.download(pyfile.url) else: self.prepare() self.get_file_url() self.download(self.pyfile.url, post=self.req_opts) check = self.checkDownload({"bad": "bad try", "paralell": "> Sorry, you cant download more then 1 files at time. <", "empty": "Warning: Unknown: Filename cannot be empty", "wrong_captcha": "Wrong Captcha!", "downloadserver": "No Downloadserver. Please try again later!"}) if check == "bad": self.fail("Bad Try.") elif check == "paralell": self.setWait(300, True) self.wait() self.retry() elif check == "empty": self.fail("File not downloadable") elif check == "wrong_captcha": self.invalidCaptcha() self.retry() elif check == "downloadserver": self.retry(5, 900, 'No Download server') def prepare(self): pyfile = self.pyfile self.wantReconnect = False self.download_html() if not self.file_exists(): self.offline() self.setWait(self.get_waiting_time()) pyfile.name = self.get_file_name() pyfile.size = self.get_file_size() self.wait() return True def download_html(self): self.load("http://freakshare.com/index.php", {"language": "EN"}) # Set english language in server session self.html = self.load(self.pyfile.url) def get_file_url(self): """ returns the absolute downloadable filepath """ if self.html is None: self.download_html() if not self.wantReconnect: self.req_opts = self.get_download_options() # get the Post options for the Request #file_url = self.pyfile.url #return file_url else: self.offline() def get_file_name(self): if self.html is None: self.download_html() if not self.wantReconnect: file_name = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">([^ ]+)", self.html) if file_name is not None: file_name = file_name.group(1) else: file_name = self.pyfile.url return file_name else: return self.pyfile.url def get_file_size(self): size = 0 if self.html is None: self.download_html() if not self.wantReconnect: file_size_check = re.search( r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">[^ ]+ - ([^ ]+) (\w\w)yte", self.html) if file_size_check is not None: units = float(file_size_check.group(1).replace(",", "")) pow = {'KB': 1, 'MB': 2, 'GB': 3}[file_size_check.group(2)] size = int(units * 1024 ** pow) return size def get_waiting_time(self): if self.html is None: self.download_html() if "Your Traffic is used up for today" in self.html: self.wantReconnect = True return 24 * 3600 timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[.\d]*;', self.html) if timestring: return int(timestring.group(1)) + 1 # add 1 sec as tenths of seconds are cut off else: return 60 def file_exists(self): """ returns True or False """ if self.html is None: self.download_html() if re.search(r"This file does not exist!", self.html) is not None: return False else: return True def get_download_options(self): re_envelope = re.search(r".*?value=\"Free\sDownload\".*?\n*?(.*?<.*?>\n*)*?\n*\s*?</form>", self.html).group(0) # get the whole request to_sort = re.findall(r"<input\stype=\"hidden\"\svalue=\"(.*?)\"\sname=\"(.*?)\"\s\/>", re_envelope) request_options = dict((n, v) for (v, n) in to_sort) herewego = self.load(self.pyfile.url, None, request_options) # the actual download-Page # comment this in, when it doesnt work # with open("DUMP__FS_.HTML", "w") as fp: # fp.write(herewego) to_sort = re.findall(r"<input\stype=\".*?\"\svalue=\"(\S*?)\".*?name=\"(\S*?)\"\s.*?\/>", herewego) request_options = dict((n, v) for (v, n) in to_sort) # comment this in, when it doesnt work as well #print "\n\n%s\n\n" % ";".join(["%s=%s" % x for x in to_sort]) challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=([0-9A-Za-z]+)", herewego) if challenge: re_captcha = ReCaptcha(self) (request_options["recaptcha_challenge_field"], request_options["recaptcha_response_field"]) = re_captcha.challenge(challenge.group(1)) return request_options
wangjun/pyload
module/plugins/hoster/FreakshareCom.py
Python
gpl-3.0
6,038
0.004306
class TaskMappingSchemesFullyDyn: TASKMAPPINGSCHEMESFULLYDYN_NONE = 0 # this will give error TASKMAPPINGSCHEMESFULLYDYN_RANDOM = 1 TASKMAPPINGSCHEMESFULLYDYN_LOWESTUTIL_NEARESTPARENT = 2
roshantha9/AbstractManycoreSim
src/libMappingAndScheduling/FullyDynamic/TaskMappingSchemesFullyDyn.py
Python
gpl-3.0
320
0.028125
################################################################################ ## ## ## This file is a part of TADEK. ## ## ## ## TADEK - Test Automation in a Distributed Environment ## ## (http://tadek.comarch.com) ## ## ## ## Copyright (C) 2011 Comarch S.A. ## ## All rights reserved. ## ## ## ## TADEK is free software for non-commercial purposes. For commercial ones ## ## we offer a commercial license. Please check http://tadek.comarch.com for ## ## details or write to [email protected] ## ## ## ## You can redistribute it and/or modify it under the terms of the ## ## GNU General Public License as published by the Free Software Foundation, ## ## either version 3 of the License, or (at your option) any later version. ## ## ## ## TADEK is distributed in the hope that it will be useful, ## ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## ## GNU General Public License for more details. ## ## ## ## You should have received a copy of the GNU General Public License ## ## along with TADEK bundled with this file in the file LICENSE. ## ## If not, see http://www.gnu.org/licenses/. ## ## ## ## Please notice that Contributor Agreement applies to any contribution ## ## you make to TADEK. The Agreement must be completed, signed and sent ## ## to Comarch before any contribution is made. You should have received ## ## a copy of Contribution Agreement along with TADEK bundled with this file ## ## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ## ## or write to [email protected] ## ## ## ################################################################################ import os import sys from tadek import models from tadek import teststeps from tadek import testcases from tadek import testsuites from tadek.core import locale from tadek.core.structs import ErrorBox _DIRS_MAP = { "models": models, "teststeps": teststeps, "testcases": testcases, "testsuites": testsuites } _LOCALE_DIR = "locale" class NameConflictError(Exception): ''' Raised when a name conflict module takes place inside some. ''' def __init__(self, module, name): Exception.__init__(self, '.'.join([module.__name__, name])) def add(path, enabled=True): ''' Adds a location of models and test cases specified by the path. :param path: A path to a location directory :type path: string :param enabled: True if an added location should be enabled, False otherwise :type enabled: boolean ''' path = os.path.abspath(path) if path in _cache: return None _cache[path] = enabled if enabled: return enable(path) return [] def remove(path): ''' Removes a location of models and test cases specified by the path. :param path: A path to a location directory :type path: string ''' path = os.path.abspath(path) if path not in _cache: return disable(path) del _cache[path] def get(enabled=None): ''' Gets a list of all locations. ''' if enabled is None: return _cache.keys() elif enabled: return [path for path in _cache if _cache[path]] else: return [path for path in _cache if not _cache[path]] def enable(path): ''' Enables a location of models and test cases specified by the path. :param path: A path to a location directory :type path: string ''' path = os.path.abspath(path) if path not in _cache: return None _cache[path] = True errors = [] for dirname, module in _DIRS_MAP.iteritems(): errors.extend(_addModuleDir(module, os.path.join(path, dirname))) # Add a corresponding locale locale.add(os.path.join(path, _LOCALE_DIR)) if errors: disable(path) return errors def disable(path): ''' Disables a location of models and test cases specified by the path. :param path: A path to a location directory :type path: string ''' path = os.path.abspath(path) for dirname, module in _DIRS_MAP.iteritems(): _removeModuleDir(module, os.path.join(path, dirname)) # Remove a corresponding locale locale.remove(os.path.join(path, _LOCALE_DIR)) _cache[path] = False def clear(): ''' Clears imported modules from all locations. ''' for module in _DIRS_MAP.itervalues(): _clearModule(module) # A locations cache _cache = {} # Location directories oriented functions: def getModels(): ''' Gets a dictionary containing all currently avalaible models modules. :return: A dictionary with models modules :rtype: dictionary ''' content = _getModuleContent(models) content.pop("__init__", None) return content def getSteps(): ''' Gets a dictionary containing all currently avalaible root test steps modules. :return: A dictionary with test steps modules :rtype: dictionary ''' content = _getModuleContent(teststeps) content.pop("__init__", None) return content def getCases(): ''' Gets a dictionary containing all currently avalaible root test cases modules. :return: A dictionary with test cases modules :rtype: dictionary ''' content = _getModuleContent(testcases) content.pop("__init__", None) return content def getSuites(): ''' Gets a dictionary containing all currently avalaible root test suites modules. :return: A dictionary with test suites modules :rtype: dictionary ''' content = _getModuleContent(testsuites) content.pop("__init__", None) return content _MODULE_EXTS = (".py", ".pyc", ".pyo") def _getDirContent(dir, package=None): ''' Gets content of the given directory. ''' content = {} for file in sorted(os.listdir(dir)): name = None path = os.path.join(dir, file) if os.path.isfile(path): name, ext = os.path.splitext(file) if ext not in _MODULE_EXTS or (package and name == "__init__"): continue name = '.'.join([package, name]) if package else name elif os.path.isdir(path): pkg = False for ext in _MODULE_EXTS: if os.path.exists(os.path.join(path, "__init__" + ext)): pkg = True break if not pkg: continue name = '.'.join([package, file]) if package else file content.update(_getDirContent(path, name)) path = os.path.join(path, "__init__" + ext) if name and name not in content: content[name] = path return content def _getModuleContent(module): ''' Gets content of the given module from the specified directory. ''' content = {} for path in module.__path__: for name, path in _getDirContent(path).iteritems(): if name not in content: content[name] = path return content def _addModuleDir(module, path): ''' Adds a directory of the given path to the specified module object. ''' errors = [] if not os.path.isdir(path) or path is module.__path__: return errors content = _getModuleContent(module) for name in _getDirContent(path): try: if name in content: raise NameConflictError(module, name) except NameConflictError: errors.append(ErrorBox(name=name, path=path)) if not errors: module.__path__.append(path) return errors def _clearModule(module, path=None): ''' Clears the imported module. ''' patterns = [] if not path: patterns.append(module.__name__ + '.') elif path in module.__path__: for name in _getDirContent(path): patterns.append('.'.join([module.__name__, name])) for name in sys.modules.keys(): for pattern in patterns: if pattern in name: del sys.modules[name] break def _removeModuleDir(module, path): ''' Removes a directory of the given path from the specified module object. ''' if path not in module.__path__ or path == module.__path__[0]: return _clearModule(module, path) module.__path__.remove(path)
tadek-project/tadek-common
tadek/core/location.py
Python
gpl-3.0
9,460
0.008985
import numpy as np from menpo.model import PCAModel from menpo.visualize import print_progress def prune(weights, n_retained=50): w_norm = (weights[:, :n_retained] ** 2).sum(axis=1) # High weights here suggest problematic samples bad_to_good_index = np.argsort(w_norm)[::-1] return w_norm, bad_to_good_index def pca_and_weights(meshes, retain_eig_cum_val=0.997, verbose=False): model = PCAModel(meshes, verbose=verbose) n_comps_retained = (model.eigenvalues_cumulative_ratio() < retain_eig_cum_val).sum() if verbose: print('\nRetaining {:.2%} of eigenvalues keeps {} components'.format( retain_eig_cum_val, n_comps_retained)) model.trim_components(retain_eig_cum_val) if verbose: meshes = print_progress(meshes, prefix='Calculating weights') weights = (np.vstack([model.project(m) for m in meshes]) / np.sqrt(model.eigenvalues)) return model, weights
menpo/lsfm
lsfm/model.py
Python
bsd-3-clause
968
0
from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() from django.views.generic import TemplateView urlpatterns = patterns('', # Examples: url(r'^$', TemplateView.as_view(template_name='index.html'), name='index'), # url(r'^django_example/', include('django_example.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), )
fatrix/django-golive
project_examples/django_example/django_example/urls.py
Python
bsd-2-clause
801
0.007491
import urllib from django import template from django.utils.safestring import mark_safe register = template.Library() @register.tag def query_string(parser, token): """ Allows to manipulate the query string of a page by adding and removing keywords. If a given value is a context variable it will resolve it. Usage: http://www.url.com/{% query_string "param_to_add=value, param_to_add=value" "param_to_remove, params_to_remove" %} """ try: tag_name, add_string, remove_string = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires two arguments" % token.contents.split()[0] if not (add_string[0] == add_string[-1] and add_string[0] in ('"', "'")) or not (remove_string[0] == remove_string[-1] and remove_string[0] in ('"', "'")): raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name add = string_to_dict(add_string[1:-1]) remove = string_to_list(remove_string[1:-1]) return QueryStringNode(add, remove) class QueryStringNode(template.Node): def __init__(self, add, remove): self.add = add self.remove = remove def render(self, context): p = {} for k, v in context["request"].GET.items(): p[k] = v return get_query_string(p, self.add, self.remove, context) def get_query_string(p, new_params, remove, context): """ Add and remove query parameters. From `django.contrib.admin`. """ for r in remove: for k in p.keys(): if k.startswith(r): del p[k] for k, v in new_params.items(): if k in p and v is None: del p[k] elif v is not None: p[k] = v for k, v in p.items(): try: p[k] = template.Variable(v).resolve(context) except: p[k] = v return mark_safe('?' + '&amp;'.join([u'%s=%s' % (urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) for k, v in p.items()])) # Taken from lib/utils.py def string_to_dict(string): kwargs = {} if string: string = str(string) if ',' not in string: # ensure at least one ',' string += ',' for arg in string.split(','): arg = arg.strip() if arg == '': continue kw, val = arg.split('=', 1) kwargs[kw] = val return kwargs def string_to_list(string): args = [] if string: string = str(string) if ',' not in string: # ensure at least one ',' string += ',' for arg in string.split(','): arg = arg.strip() if arg == '': continue args.append(arg) return args
DigitalCampus/django-nurhi-oppia
oppia/templatetags/query_string.py
Python
gpl-3.0
2,788
0.002511
class Invalid_IP_exception(Exception): pass
mikesligo/distributed-search
Exceptions/Invalid_IP_exception.py
Python
mit
48
0
import os from setuptools import setup # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name = "PyAnno", version = "0.1a", author = "Whatever", author_email = "[email protected]", description = "Here we describe what we put in the github repo description ", license = "BSD", keywords = "labels voting annotation", url = "my own webpage", packages=['pyanno'], # This what is really needed, the rest is optional long_description=read('README.md'), classifiers=[ "Development Status :: 3 - Alpha", "Topic :: Utilities", "License :: OSI Approved :: BSD License", ], )
eemiliosl/pyanno_voting
setup.py
Python
bsd-2-clause
916
0.021834
''' WikiLinks Extension for Python-Markdown ====================================== Converts [[WikiLinks]] to relative links. See <https://pythonhosted.org/Markdown/extensions/wikilinks.html> for documentation. Original code Copyright [Waylan Limberg](http://achinghead.com/). All changes Copyright The Python Markdown Project License: [BSD](http://www.opensource.org/licenses/bsd-license.php) ''' from __future__ import absolute_import from __future__ import unicode_literals from . import Extension from ..inlinepatterns import Pattern from ..util import etree import re def build_url(label, base, end): """ Build a url from the label, a base, and an end. """ clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) return '%s%s%s'% (base, clean_label, end) class WikiLinkExtension(Extension): def __init__ (self, *args, **kwargs): self.config = { 'base_url' : ['/', 'String to append to beginning or URL.'], 'end_url' : ['/', 'String to append to end of URL.'], 'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'], 'build_url' : [build_url, 'Callable formats URL from label.'], } super(WikiLinkExtension, self).__init__(*args, **kwargs) def extendMarkdown(self, md, md_globals): self.md = md # append to end of inline patterns WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]' wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs()) wikilinkPattern.md = md md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong") class WikiLinks(Pattern): def __init__(self, pattern, config): super(WikiLinks, self).__init__(pattern) self.config = config def handleMatch(self, m): if m.group(2).strip(): base_url, end_url, html_class = self._getMeta() label = m.group(2).strip() url = self.config['build_url'](label, base_url, end_url) a = etree.Element('a') a.text = label a.set('href', url) if html_class: a.set('class', html_class) else: a = '' return a def _getMeta(self): """ Return meta data or config data. """ base_url = self.config['base_url'] end_url = self.config['end_url'] html_class = self.config['html_class'] if hasattr(self.md, 'Meta'): if 'wiki_base_url' in self.md.Meta: base_url = self.md.Meta['wiki_base_url'][0] if 'wiki_end_url' in self.md.Meta: end_url = self.md.Meta['wiki_end_url'][0] if 'wiki_html_class' in self.md.Meta: html_class = self.md.Meta['wiki_html_class'][0] return base_url, end_url, html_class def makeExtension(*args, **kwargs) : return WikiLinkExtension(*args, **kwargs)
andela-bojengwa/talk
venv/lib/python2.7/site-packages/markdown/extensions/wikilinks.py
Python
mit
2,901
0.005515
from flask import (Module, request, abort, current_app, session, flash, redirect, render_template) import re mod = Module(__name__, name="auth") def check_next(next): """return the value of next if next is a valid next param, it returns None if the next param is invalid""" # security check stolen from Django, thanks Django! # Try to get the next param # Light security check -- make sure redirect_to isn't garbage. if ' ' in next: return None # Heavier security check -- redirects to http://example.com should # not be allowed, but things like /view/?param=http://example.com # should be allowed. This regex checks if there is a '//' *before* a # question mark. elif '//' in next and re.match(r'[^\?]*//', next): return None else: return next @mod.route("/login/", methods=["GET", "POST"]) def login(): backend = current_app.config['AUTH_BACKEND'] next = request.args.get("next", "") next = check_next(next) # Try to authenticate error = None if request.method == "POST": # Try to authenticate based on the form data result = backend.authenticate(request.form) # If something is returned, use that as the auth_key in the session if result is not None: session["auth_key"] = result flash("Login successful.") if next: return redirect(next) else: flash("Login Invalid", "error") return render_template("auth/login.html") @mod.route("/logout/") def logout(): # Get the AUTH_BACKEND backend = current_app.config['AUTH_BACKEND'] auth_key = session.get("auth_key") if auth_key: next = request.args.get("next", "/") # Let the backend know about the logout backend.logout(auth_key) # Throw away the auth_key session.pop("auth_key", None) # Flash a pretty message flash("You are now logged out") return redirect(next)
ericmoritz/flask-auth
flaskext/auth/views.py
Python
bsd-2-clause
2,020
0.00198
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for loading the WikiSQL dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json from language.xsp.data_preprocessing import abstract_sql from language.xsp.data_preprocessing import abstract_sql_converters from language.xsp.data_preprocessing.nl_to_sql_example import NLToSQLExample from language.xsp.data_preprocessing.nl_to_sql_example import populate_utterance from language.xsp.data_preprocessing.sql_parsing import ParseError from language.xsp.data_preprocessing.sql_parsing import populate_sql from language.xsp.data_preprocessing.sql_utils import preprocess_sql import sqlparse import tensorflow.compat.v1.gfile as gfile def normalize_sql(sql, replace_period=True): """Normalizes WikiSQL SQL queries.""" sql = sql.replace('_/_', '_OR_') sql = sql.replace('/', '_OR_') sql = sql.replace('?', '') if replace_period: sql = sql.replace('.', '') sql = sql.replace('(', '') sql = sql.replace(')', '') sql = sql.replace('%', '') return sql def normalize_entities(entity_name): """Normalizes database entities (table and column names).""" entity_name = normalize_sql(entity_name) entity_name = entity_name.replace(' ', '_').upper() return entity_name def convert_wikisql(input_example, schema, tokenizer, generate_sql, anonymize_values, use_abstract_sql, tables_schema=None, allow_value_generation=False): """Converts a WikiSQL example into a NLToSQLExample.""" example = NLToSQLExample() try: try: example = populate_utterance(example, input_example[0], schema, tokenizer) except ValueError as e: print(e) return None # WikiSQL databases have a single table. assert len(schema) == 1 # Some preprocessing of the WikiSQL SQL queries. sql = input_example[1].rstrip('; ') sql = sql.replace('TABLE', list(schema.keys())[0]) sql = sql.replace('_FIELD', '') string_split_sql = sql.split(' ') if string_split_sql[1].lower() in {'count', 'min', 'max', 'avg', 'sum'}: # Add parentheses around the column that's an argument of any of these # aggregate functions (because gold annotations don't have it). sql = ' '.join(string_split_sql[0:2] + ['(', string_split_sql[2], ')'] + string_split_sql[3:]) sql = normalize_sql(sql, replace_period=False) try: sql = preprocess_sql(sql) except UnicodeDecodeError as e: return None sql = sql.lower() parsed_sql = sqlparse.parse(sql)[0] successful_copy = True if generate_sql: try: if use_abstract_sql: successful_copy = abstract_sql_converters.populate_abstract_sql( example, sql, tables_schema, anonymize_values) else: successful_copy = populate_sql(parsed_sql, example, anonymize_values) except (ParseError, ValueError, AssertionError, KeyError, IndexError, abstract_sql.ParseError, abstract_sql.UnsupportedSqlError) as e: return None if not successful_copy and not allow_value_generation: return None if not example.gold_sql_query.actions: return None elif example.gold_sql_query.actions[-1].symbol == '=': return None except UnicodeEncodeError as e: print(e) return None return example def load_wikisql_tables(filepath): """Loads the WikiSQL tables from a path and reformats as the format.""" dbs = dict() with gfile.Open(filepath) as infile: tables = [json.loads(line) for line in infile if line] for table in tables: db_dict = dict() table_name = table['section_title'] if 'section_title' in table and table[ 'section_title'] else ( table['name'] if 'name' in table else table['page_title']) table_name = normalize_entities(table_name) db_dict[table_name] = list() for column_name, column_type in zip(table['header'], table['types']): if column_type == 'real': column_type = 'number' assert column_type in {'text', 'number'}, column_type column_name = normalize_entities(column_name) db_dict[table_name].append({ 'field name': column_name, 'is primary key': False, 'is foreign key': False, 'type': column_type }) if table['id'] not in dbs: dbs[table['id']] = db_dict return dbs
google-research/language
language/xsp/data_preprocessing/wikisql_preprocessing.py
Python
apache-2.0
5,129
0.008384
#!/usr/bin/python # -*- coding: utf-8 -*- class MiClase: @staticmethod def metodo(entrada): return entrada objeto = MiClase print objeto.metodo(5)
psicobyte/ejemplos-python
ApendiceI/p202.py
Python
gpl-3.0
167
0.005988
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Load plugin assets from disk.""" import os.path from tensorboard.compat import tf _PLUGINS_DIR = "plugins" def _IsDirectory(parent, item): """Helper that returns if parent/item is a directory.""" return tf.io.gfile.isdir(os.path.join(parent, item)) def PluginDirectory(logdir, plugin_name): """Returns the plugin directory for plugin_name.""" return os.path.join(logdir, _PLUGINS_DIR, plugin_name) def ListPlugins(logdir): """List all the plugins that have registered assets in logdir. If the plugins_dir does not exist, it returns an empty list. This maintains compatibility with old directories that have no plugins written. Args: logdir: A directory that was created by a TensorFlow events writer. Returns: a list of plugin names, as strings """ plugins_dir = os.path.join(logdir, _PLUGINS_DIR) try: entries = tf.io.gfile.listdir(plugins_dir) except tf.errors.NotFoundError: return [] # Strip trailing slashes, which listdir() includes for some filesystems # for subdirectories, after using them to bypass IsDirectory(). return [ x.rstrip("/") for x in entries if x.endswith("/") or _IsDirectory(plugins_dir, x) ] def ListAssets(logdir, plugin_name): """List all the assets that are available for given plugin in a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: A string name of a plugin to list assets for. Returns: A string list of available plugin assets. If the plugin subdirectory does not exist (either because the logdir doesn't exist, or because the plugin didn't register) an empty list is returned. """ plugin_dir = PluginDirectory(logdir, plugin_name) try: # Strip trailing slashes, which listdir() includes for some filesystems. return [x.rstrip("/") for x in tf.io.gfile.listdir(plugin_dir)] except tf.errors.NotFoundError: return [] def RetrieveAsset(logdir, plugin_name, asset_name): """Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist. """ asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) try: with tf.io.gfile.GFile(asset_path, "r") as f: return f.read() except tf.errors.NotFoundError: raise KeyError("Asset path %s not found" % asset_path) except tf.errors.OpError as e: raise KeyError( "Couldn't read asset path: %s, OpError %s" % (asset_path, e) )
tensorflow/tensorboard
tensorboard/backend/event_processing/plugin_asset_util.py
Python
apache-2.0
3,555
0.000281
#!/usr/bin/python # # Copyright (c) 2011 The Chromium OS Authors. # # See file CREDITS for list of people who contributed to this # project. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # """See README for more information""" from optparse import OptionParser import os import re import sys import unittest # Our modules import checkpatch import command import gitutil import patchstream import project import settings import terminal import test parser = OptionParser() parser.add_option('-a', '--no-apply', action='store_false', dest='apply_patches', default=True, help="Don't test-apply patches with git am") parser.add_option('-H', '--full-help', action='store_true', dest='full_help', default=False, help='Display the README file') parser.add_option('-c', '--count', dest='count', type='int', default=-1, help='Automatically create patches from top n commits') parser.add_option('-i', '--ignore-errors', action='store_true', dest='ignore_errors', default=False, help='Send patches email even if patch errors are found') parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run', default=False, help="Do a dry run (create but don't email patches)") parser.add_option('-p', '--project', default=project.DetectProject(), help="Project name; affects default option values and " "aliases [default: %default]") parser.add_option('-r', '--in-reply-to', type='string', action='store', help="Message ID that this series is in reply to") parser.add_option('-s', '--start', dest='start', type='int', default=0, help='Commit to start creating patches from (0 = HEAD)') parser.add_option('-t', '--ignore-bad-tags', action='store_true', default=False, help='Ignore bad tags / aliases') parser.add_option('--test', action='store_true', dest='test', default=False, help='run tests') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='Verbose output of errors and warnings') parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store', default=None, help='Output cc list for patch file (used by git)') parser.add_option('--no-check', action='store_false', dest='check_patch', default=True, help="Don't check for patch compliance") parser.add_option('--no-tags', action='store_false', dest='process_tags', default=True, help="Don't process subject tags as aliaes") parser.usage = """patman [options] Create patches from commits in a branch, check them and email them as specified by tags you place in the commits. Use -n to do a dry run first.""" # Parse options twice: first to get the project and second to handle # defaults properly (which depends on project). (options, args) = parser.parse_args() settings.Setup(parser, options.project, '') (options, args) = parser.parse_args() # Run our meagre tests if options.test: import doctest sys.argv = [sys.argv[0]] suite = unittest.TestLoader().loadTestsFromTestCase(test.TestPatch) result = unittest.TestResult() suite.run(result) for module in ['gitutil', 'settings']: suite = doctest.DocTestSuite(module) suite.run(result) # TODO: Surely we can just 'print' result? print result for test, err in result.errors: print err for test, err in result.failures: print err # Called from git with a patch filename as argument # Printout a list of additional CC recipients for this patch elif options.cc_cmd: fd = open(options.cc_cmd, 'r') re_line = re.compile('(\S*) (.*)') for line in fd.readlines(): match = re_line.match(line) if match and match.group(1) == args[0]: for cc in match.group(2).split(', '): cc = cc.strip() if cc: print cc fd.close() elif options.full_help: pager = os.getenv('PAGER') if not pager: pager = 'more' fname = os.path.join(os.path.dirname(sys.argv[0]), 'README') command.Run(pager, fname) # Process commits, produce patches files, check them, email them else: gitutil.Setup() if options.count == -1: # Work out how many patches to send if we can options.count = gitutil.CountCommitsToBranch() - options.start col = terminal.Color() if not options.count: str = 'No commits found to process - please use -c flag' print col.Color(col.RED, str) sys.exit(1) # Read the metadata from the commits if options.count: series = patchstream.GetMetaData(options.start, options.count) cover_fname, args = gitutil.CreatePatches(options.start, options.count, series) # Fix up the patch files to our liking, and insert the cover letter series = patchstream.FixPatches(series, args) if series and cover_fname and series.get('cover'): patchstream.InsertCoverLetter(cover_fname, series, options.count) # Do a few checks on the series series.DoChecks() # Check the patches, and run them through 'git am' just to be sure if options.check_patch: ok = checkpatch.CheckPatches(options.verbose, args) else: ok = True if options.apply_patches: if not gitutil.ApplyPatches(options.verbose, args, options.count + options.start): ok = False cc_file = series.MakeCcFile(options.process_tags, cover_fname, not options.ignore_bad_tags) # Email the patches out (giving the user time to check / cancel) cmd = '' if ok or options.ignore_errors: cmd = gitutil.EmailPatches(series, cover_fname, args, options.dry_run, not options.ignore_bad_tags, cc_file, in_reply_to=options.in_reply_to) # For a dry run, just show our actions as a sanity check if options.dry_run: series.ShowActions(args, cmd, options.process_tags) os.remove(cc_file)
renesas-rz/u-boot-2013.04
tools/patman/patman.py
Python
gpl-2.0
6,763
0.001774
""" Django settings """ from django.core.urlresolvers import reverse_lazy DEBUG = True TEMPLATE_DEBUG = DEBUG SESSION_COOKIE_SECURE = False CSRF_COOKIE_SECURE = False TEMPLATE_STRING_IF_INVALID = '**** INVALID EXPRESSION: %s ****' ADMINS = ( ('admin', '[email protected]'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/London' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-gb' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = False # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = 'web_static/' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', #'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'w@t8%tdwyi-n$u_s#4_+cwnq&6)1n)l3p-qe(ziala0j^vo12d' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', #'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'reversion.middleware.RevisionMiddleware', # 'debug_toolbar.middleware.DebugToolbarMiddleware', ) ROOT_URLCONF = 'example.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'example.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'reversion', 'south', 'example', 'base', 'login', 'moderate', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # User Profile. AUTH_PROFILE_MODULE = 'example.UserProfile' # URL where requests are redirected after login when the contrib.auth.login # view gets no next parameter. LOGIN_REDIRECT_URL = reverse_lazy('project.home') # Login URL. Used with login_required decorators when a user # must be logged in before accessing the view otherwise this URL # will be called. LOGIN_URL = reverse_lazy('login.login') # Login URL. Used with login_required decorators when a user # must be logged in before accessing the view otherwise this URL # will be called. # LOGIN_URL = reverse_lazy('login.login') # https://github.com/johnsensible/django-sendfile SENDFILE_BACKEND = 'sendfile.backends.development' SENDFILE_ROOT = 'media-private' FTP_STATIC_DIR = None FTP_STATIC_URL = None
pkimber/old_moderate
example/base.py
Python
apache-2.0
5,826
0.000515
from chainer.functions.activation import relu from chainer import link from chainer.links.connection import convolution_2d class MLPConvolution2D(link.ChainList): """Two-dimensional MLP convolution layer of Network in Network. This is an "mlpconv" layer from the Network in Network paper. This layer is a two-dimensional convolution layer followed by 1x1 convolution layers and interleaved activation functions. Note that it does not apply the activation function to the output of the last 1x1 convolution layer. Args: in_channels (int): Number of channels of input arrays. out_channels (tuple of ints): Tuple of number of channels. The i-th integer indicates the number of filters of the i-th convolution. ksize (int or pair of ints): Size of filters (a.k.a. kernels) of the first convolution layer. ``ksize=k`` and ``ksize=(k, k)`` are equivalent. stride (int or pair of ints): Stride of filter applications at the first convolution layer. ``stride=s`` and ``stride=(s, s)`` are equivalent. pad (int or pair of ints): Spatial padding width for input arrays at the first convolution layer. ``pad=p`` and ``pad=(p, p)`` are equivalent. activation (function): Activation function for internal hidden units. Note that this function is not applied to the output of this link. use_cudnn (bool): If ``True``, then this link uses cuDNN if available. conv_init: An initializer of weight matrices passed to the convolution layers. bias_init: An initializer of bias vectors passed to the convolution layers. See: `Network in Network <http://arxiv.org/abs/1312.4400v3>`. Attributes: activation (function): Activation function. """ def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0, wscale=1, activation=relu.relu, use_cudnn=True, conv_init=None, bias_init=None): assert len(out_channels) > 0 convs = [convolution_2d.Convolution2D( in_channels, out_channels[0], ksize, stride, pad, wscale=wscale, use_cudnn=use_cudnn, initialW=conv_init, initial_bias=bias_init)] for n_in, n_out in zip(out_channels, out_channels[1:]): convs.append(convolution_2d.Convolution2D( n_in, n_out, 1, wscale=wscale, initialW=conv_init, initial_bias=bias_init, use_cudnn=use_cudnn)) super(MLPConvolution2D, self).__init__(*convs) self.activation = activation def __call__(self, x): """Computes the output of the mlpconv layer. Args: x (~chainer.Variable): Input image. Returns: ~chainer.Variable: Output of the mlpconv layer. """ f = self.activation for l in self[:-1]: x = f(l(x)) return self[-1](x)
benob/chainer
chainer/links/connection/mlp_convolution_2d.py
Python
mit
3,009
0.000332
import pygame from fish import Fish from seaweed import Seaweed class Water: def __init__(self): # color, pos_x, pos_y, width, height self.mOrangeFish = Fish((255, 152, 0), 50, 175, 175, 100) self.mGreyFish = Fish((96, 125, 139), 350, 130, 125, 200) self.mRedFish = Fish((183, 28, 28), 200, 300, 175, 500) # color, start-point x, start-point y, end-point x, end-point y, width self.mSeaweed = Seaweed((104, 159, 56), 450, 450, 450, 500, 3) self.mSeaweed2 = Seaweed((104, 159, 56), 400, 450, 400, 500, 3) self.mSeaweed3 = Seaweed((104, 159, 56), 370, 430, 370, 500, 3) self.mSeaweed4 = Seaweed((104, 159, 56), 390, 430, 390, 500, 3) self.mSeaweed5 = Seaweed((104, 159, 56), 320, 450, 320, 500, 3) return def draw(self, surface): color = (1, 87, 155) pointlist = [(0, 80), (100, 100), (200, 80), (300, 100), (400, 80), (500, 100), (600, 80), (600, 500), (0, 500)] pygame.draw.polygon(surface, color, pointlist, 0) self.mOrangeFish.draw(surface) self.mGreyFish.draw(surface) self.mRedFish.draw(surface) self.mSeaweed.draw(surface) self.mSeaweed2.draw(surface) self.mSeaweed3.draw(surface) self.mSeaweed4.draw(surface) self.mSeaweed5.draw(surface) return
joshl8n/school-projects
illustrate/water.py
Python
gpl-3.0
1,287
0.034188
""" Tests for module recommendation. """
hypermindr/barbante
barbante/recommendation/tests/__init__.py
Python
mit
41
0
from pycket import values from pycket.error import SchemeException from pycket.hash.base import ( W_MutableHashTable, W_ImmutableHashTable, w_missing, get_dict_item) from pycket.hash.persistent_hash_map import make_persistent_hash_type from rpython.rlib.objectmodel import compute_hash, r_dict, specialize from rpython.rlib.rarithmetic import r_uint @specialize.arg(0) def make_simple_mutable_table(cls, keys=None, vals=None): data = r_dict(cls.cmp_value, cls.hash_value, force_non_null=True) if keys is not None and vals is not None: assert len(keys) == len(vals) for i, k in enumerate(keys): data[k] = vals[i] return cls(data) @specialize.arg(0) def make_simple_mutable_table_assocs(cls, assocs, who): if not assocs.is_proper_list(): raise SchemeException("%s: not given proper list" % who) data = r_dict(cls.cmp_value, cls.hash_value, force_non_null=True) while isinstance(assocs, values.W_Cons): entry, assocs = assocs.car(), assocs.cdr() if not isinstance(entry, values.W_Cons): raise SchemeException("%s: expected list of pairs" % who) key, val = entry.car(), entry.cdr() data[key] = val return cls(data) @specialize.arg(0) def make_simple_immutable_table(cls, keys=None, vals=None): table = cls.EMPTY if keys is not None and vals is not None: assert len(keys) == len(vals) for i, k in enumerate(keys): table = table.assoc(k, vals[i]) return table @specialize.arg(0) def make_simple_immutable_table_assocs(cls, assocs, who): if not assocs.is_proper_list(): raise SchemeException("%s: not given proper list" % who) table = cls.EMPTY while isinstance(assocs, values.W_Cons): entry, assocs = assocs.car(), assocs.cdr() if not isinstance(entry, values.W_Cons): raise SchemeException("%s: expected list of pairs" % who) key, val = entry.car(), entry.cdr() table = table.assoc(key, val) return table class W_SimpleMutableHashTable(W_MutableHashTable): _attrs_ = ['data'] _immutable_fields_ = ["data"] @staticmethod def hash_value(v): raise NotImplementedError("abstract method") @staticmethod def cmp_value(a, b): raise NotImplementedError("abstract method") def __init__(self, data): self.data = data def make_copy(self): raise NotImplementedError("abstract method") def hash_items(self): return self.data.items() def tostring(self): lst = [values.W_Cons.make(k, v).tostring() for k, v in self.data.iteritems()] return "#hash(%s)" % " ".join(lst) def hash_set(self, k, v, env, cont): from pycket.interpreter import return_value self.data[k] = v return return_value(values.w_void, env, cont) def hash_remove_inplace(self, k, env, cont): from pycket.interpreter import return_value del self.data[k] return return_value(values.w_void, env, cont) def hash_ref(self, k, env, cont): from pycket.interpreter import return_value return return_value(self.data.get(k, w_missing), env, cont) def length(self): return len(self.data) class W_EqvMutableHashTable(W_SimpleMutableHashTable): def make_empty(self): return make_simple_mutable_table(W_EqvMutableHashTable) def make_copy(self): return W_EqvMutableHashTable(self.data.copy(), immutable=False) @staticmethod def hash_value(k): return k.hash_eqv() @staticmethod def cmp_value(a, b): return a.eqv(b) def get_item(self, i): return get_dict_item(self.data, i) class W_EqMutableHashTable(W_SimpleMutableHashTable): def make_copy(self): return W_EqMutableHashTable(self.data.copy()) def make_empty(self): return make_simple_mutable_table(W_EqMutableHashTable) @staticmethod def hash_value(k): if isinstance(k, values.W_Fixnum): return compute_hash(k.value) if isinstance(k, values.W_Character): return ord(k.value) return compute_hash(k) @staticmethod def cmp_value(a, b): from pycket.prims.equal import eqp_logic return eqp_logic(a, b) def get_item(self, i): return get_dict_item(self.data, i) W_EqvImmutableHashTable = make_persistent_hash_type( super=W_ImmutableHashTable, keytype=values.W_Object, valtype=values.W_Object, name="W_EqvImmutableHashTable", hashfun=lambda x: r_uint(W_EqvMutableHashTable.hash_value(x)), equal=W_EqvMutableHashTable.cmp_value) W_EqImmutableHashTable = make_persistent_hash_type( super=W_ImmutableHashTable, keytype=values.W_Object, valtype=values.W_Object, name="W_EqImmutableHashTable", hashfun=lambda x: r_uint(W_EqMutableHashTable.hash_value(x)), equal=W_EqMutableHashTable.cmp_value) class __extend__(W_EqvImmutableHashTable): def length(self): return len(self) def make_copy(self): return self def make_empty(self): return W_EqvImmutableHashTable.EMPTY def hash_ref(self, k, env, cont): from pycket.interpreter import return_value result = self.val_at(k, w_missing) return return_value(result, env, cont) def hash_remove(self, key, env, cont): from pycket.interpreter import return_value removed = self.without(key) return return_value(removed, env, cont) def tostring(self): assert type(self) is W_EqvImmutableHashTable entries = [None] * len(self) i = 0 for k, v in self.iteritems(): entries[i] = "(%s . %s)" % (k.tostring(), v.tostring()) i += 1 return "#hasheqv(%s)" % " ".join(entries) class __extend__(W_EqImmutableHashTable): def length(self): return len(self) def make_copy(self): return self def make_empty(self): return W_EqImmutableHashTable.EMPTY def hash_ref(self, key, env, cont): from pycket.interpreter import return_value result = self.val_at(key, w_missing) return return_value(result, env, cont) def hash_remove(self, key, env, cont): from pycket.interpreter import return_value removed = self.without(key) return return_value(removed, env, cont) def tostring(self): assert type(self) is W_EqImmutableHashTable entries = [None] * len(self) i = 0 for k, v in self.iteritems(): entries[i] = "(%s . %s)" % (k.tostring(), v.tostring()) i += 1 return "#hasheq(%s)" % " ".join(entries)
magnusmorton/pycket
pycket/hash/simple.py
Python
mit
6,811
0.002496
import sys if sys.version_info[0] == 2: from urlparse import urljoin string_types = basestring, else: from urllib.parse import urljoin string_types = str, def atoi(string, default=0): if (isinstance(string, int)): return string try: return int(string) except (TypeError, ValueError): return default
ZipFile/papi.py
papi/helpers.py
Python
bsd-2-clause
356
0
import hmac import config from jinja2 import Environment, FileSystemLoader jinja2_env = Environment(loader=FileSystemLoader( config.TEMPLATE_DIRS), autoescape=True) class BaseHandler(object): def __init__(self): self.request = None self.response = None def make_secure_value(self, value): return "{}|{}".format(value, hmac.new(config.SECRET_KEY.encode(), value.encode()).hexdigest()) def check_secure_value(self, secure_value): val, hashed = secure_value.split('|', 1) if secure_value == self.make_secure_value(val): return True return False def set_secure_cookie(self, name, val): cookie_val = self.make_secure_value(val) self.response.set_cookie(name, cookie_val) def get_secure_cookie(self, name): cookie_val = self.request.cookies.get(name) if cookie_val and self.check_secure_value(cookie_val): return cookie_val.split('|', 1)[0] return None def login(self, user): self.set_secure_cookie('username', str(user['username'])) def logout(self, user): self.response.delete_cookie('username') def write(self, text): self.response.write(text) def redirect(self, url, status=301): self.response.status = status self.response.location = url def render(self, filename, **context): template = jinja2_env.get_template(filename) self.write(template.render(**context)) def __call__(self, request, response): self.request = request self.response = response action = request.method.lower() try: method = getattr(self, action) except AttributeError: raise AttributeError("No action for {}".format(action)) method(**request.urlvars)
xstrengthofonex/code-live-tutorials
python_web_development/database/handlers/base_handler.py
Python
mit
1,866
0.000536
""" Tests third_party_auth admin views """ import unittest from django.contrib.admin.sites import AdminSite from django.core.files.uploadedfile import SimpleUploadedFile from django.core.urlresolvers import reverse from django.forms import models from student.tests.factories import UserFactory from third_party_auth.admin import OAuth2ProviderConfigAdmin from third_party_auth.models import OAuth2ProviderConfig from third_party_auth.tests import testutil # This is necessary because cms does not implement third party auth @unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled') class Oauth2ProviderConfigAdminTest(testutil.TestCase): """ Tests for oauth2 provider config admin """ def test_oauth2_provider_edit_icon_image(self): """ Test that we can update an OAuth provider's icon image from the admin form. OAuth providers are updated using KeyedConfigurationModelAdmin, which updates models by adding a new instance that replaces the old one, instead of editing the old instance directly. Updating the icon image is tricky here because KeyedConfigurationModelAdmin copies data over from the previous version by injecting its attributes into request.GET, but the icon ends up in request.FILES. We need to ensure that the value is prepopulated correctly, and that we can clear and update the image. """ # Login as a super user user = UserFactory.create(is_staff=True, is_superuser=True) user.save() self.client.login(username=user.username, password='test') # Get baseline provider count providers = OAuth2ProviderConfig.objects.all() pcount = len(providers) # Create a provider provider1 = self.configure_dummy_provider( enabled=True, icon_class='', icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'), ) # Get the provider instance with active flag providers = OAuth2ProviderConfig.objects.all() self.assertEquals(len(providers), 1) self.assertEquals(providers[pcount].id, provider1.id) # Edit the provider via the admin edit link admin = OAuth2ProviderConfigAdmin(provider1, AdminSite()) # pylint: disable=protected-access update_url = reverse('admin:{}_{}_add'.format(admin.model._meta.app_label, admin.model._meta.model_name)) update_url += "?source={}".format(provider1.pk) # Remove the icon_image from the POST data, to simulate unchanged icon_image post_data = models.model_to_dict(provider1) del post_data['icon_image'] # Remove max_session_length; it has a default null value which must be POSTed # back as an absent value, rather than as a "null-like" included value. del post_data['max_session_length'] # Change the name, to verify POST post_data['name'] = 'Another name' # Post the edit form: expecting redirect response = self.client.post(update_url, post_data) self.assertEquals(response.status_code, 302) # Editing the existing provider creates a new provider instance providers = OAuth2ProviderConfig.objects.all() self.assertEquals(len(providers), pcount + 2) self.assertEquals(providers[pcount].id, provider1.id) provider2 = providers[pcount + 1] # Ensure the icon_image was preserved on the new provider instance self.assertEquals(provider2.icon_image, provider1.icon_image) self.assertEquals(provider2.name, post_data['name'])
lduarte1991/edx-platform
common/djangoapps/third_party_auth/tests/test_admin.py
Python
agpl-3.0
3,696
0.001353
# Copyright (C) 2019-2021 Dmitry Marakasov <[email protected]> # # This file is part of repology # # repology is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # repology is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with repology. If not, see <http://www.gnu.org/licenses/>. import os import re from collections import defaultdict from typing import Iterable from repology.logger import Logger from repology.package import PackageFlags from repology.packagemaker import NameType, PackageFactory, PackageMaker from repology.parsers import Parser from repology.parsers.maintainers import extract_maintainers from repology.parsers.patches import add_patch_files from repology.parsers.walk import walk_tree def _parse_descfile(path: str, logger: Logger) -> dict[str, list[str]]: data: dict[str, list[str]] = defaultdict(list) # http://t2sde.org/handbook/html/t2.package.desc.html tag_map = { 'i': 'title', 't': 'text', 'u': 'url', 'a': 'author', 'm': 'maintainer', 'c': 'category', 'f': 'flag', 'r': 'architecture', 'arch': 'architecture', 'k': 'kernel', 'kern': 'kernel', 'e': 'dependency', 'dep': 'dependency', 'l': 'license', 's': 'status', 'v': 'version', 'ver': 'version', 'p': 'priority', 'pri': 'priority', 'o': 'conf', 'd': 'download', 'down': 'download', #'s': 'source', # duplicate - documentation is incorrect? 'src': 'source', } with open(path, 'r', encoding='latin1') as descfile: for line in descfile: line = line.strip() if line.startswith('#'): continue match = re.fullmatch(r'\[([^\[\]]+)\]\s*(.*?)', line, re.DOTALL) if match: tag = match.group(1).lower() tag = tag_map.get(tag, tag) data[tag].append(match.group(2)) elif line: logger.log('unexpected line "{}"'.format(line), Logger.WARNING) return data class T2DescParser(Parser): def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for desc_path in walk_tree(path, suffix='.desc'): rel_desc_path = os.path.relpath(desc_path, path) with factory.begin(rel_desc_path) as pkg: pkgpath = os.path.dirname(rel_desc_path) name = os.path.basename(pkgpath) if name + '.desc' != os.path.basename(rel_desc_path): raise RuntimeError('Path inconsistency (expected .../foo/foo.desc)') data = _parse_descfile(desc_path, pkg) pkg.add_name(name, NameType.T2_NAME) pkg.add_name(pkgpath, NameType.T2_FULL_NAME) pkg.set_version(data['version'][0]) pkg.set_summary(data['title'][0]) pkg.add_homepages((url.split()[0] for url in data.get('url', []) if url)) #pkg.add_homepages(data.get('cv-url')) # url used by version checker; may be garbage pkg.add_licenses(data['license']) pkg.add_maintainers(map(extract_maintainers, data['maintainer'])) pkg.add_categories(data['category']) for cksum, filename, url, *rest in (line.split() for line in data.get('download', [])): url = url.lstrip('-!') if url.endswith('/'): url += filename if url.startswith('cvs') or url.startswith('git') or url.startswith('svn') or url.startswith('hg'): # snapshots basically pkg.set_flags(PackageFlags.UNTRUSTED) pkg.add_downloads(url) add_patch_files(pkg, os.path.dirname(desc_path), '*.patch') yield pkg
repology/repology
repology/parsers/parsers/t2.py
Python
gpl-3.0
4,354
0.002067
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # For license information, please see license.txt """ # Integrating PayPal ### 1. Validate Currency Support Example: from frappe.integrations.utils import get_payment_gateway_controller controller = get_payment_gateway_controller("PayPal") controller().validate_transaction_currency(currency) ### 2. Redirect for payment Example: payment_details = { "amount": 600, "title": "Payment for bill : 111", "description": "payment via cart", "reference_doctype": "Payment Request", "reference_docname": "PR0001", "payer_email": "[email protected]", "payer_name": "Nuran Verkleij", "order_id": "111", "currency": "USD", "payment_gateway": "Razorpay", "subscription_details": { "plan_id": "plan_12313", # if Required "start_date": "2018-08-30", "billing_period": "Month" #(Day, Week, SemiMonth, Month, Year), "billing_frequency": 1, "customer_notify": 1, "upfront_amount": 1000 } } # redirect the user to this url url = controller().get_payment_url(**payment_details) ### 3. On Completion of Payment Write a method for `on_payment_authorized` in the reference doctype Example: def on_payment_authorized(payment_status): # your code to handle callback ##### Note: payment_status - payment gateway will put payment status on callback. For paypal payment status parameter is one from: [Completed, Cancelled, Failed] More Details: <div class="small">For details on how to get your API credentials, follow this link: <a href="https://developer.paypal.com/docs/classic/api/apiCredentials/" target="_blank">https://developer.paypal.com/docs/classic/api/apiCredentials/</a></div> """ from __future__ import unicode_literals import frappe import json from frappe import _ from datetime import datetime from frappe.utils import get_url, call_hook_method, cint, get_timestamp, cstr, now, date_diff, get_datetime from six.moves.urllib.parse import urlencode from frappe.model.document import Document from frappe.integrations.utils import create_request_log, make_post_request, create_payment_gateway api_path = '/api/method/frappe.integrations.doctype.paypal_settings.paypal_settings' class PayPalSettings(Document): supported_currencies = ["AUD", "BRL", "CAD", "CZK", "DKK", "EUR", "HKD", "HUF", "ILS", "JPY", "MYR", "MXN", "TWD", "NZD", "NOK", "PHP", "PLN", "GBP", "RUB", "SGD", "SEK", "CHF", "THB", "TRY", "USD"] def __setup__(self): setattr(self, "use_sandbox", 0) def setup_sandbox_env(self, token): data = json.loads(frappe.db.get_value("Integration Request", token, "data")) setattr(self, "use_sandbox", cint(frappe._dict(data).use_sandbox) or 0) def validate(self): create_payment_gateway("PayPal") call_hook_method('payment_gateway_enabled', gateway="PayPal") if not self.flags.ignore_mandatory: self.validate_paypal_credentails() def on_update(self): pass def validate_transaction_currency(self, currency): if currency not in self.supported_currencies: frappe.throw(_("Please select another payment method. PayPal does not support transactions in currency '{0}'").format(currency)) def get_paypal_params_and_url(self): params = { "USER": self.api_username, "PWD": self.get_password(fieldname="api_password", raise_exception=False), "SIGNATURE": self.signature, "VERSION": "98", "METHOD": "GetPalDetails" } if hasattr(self, "use_sandbox") and self.use_sandbox: params.update({ "USER": frappe.conf.sandbox_api_username, "PWD": frappe.conf.sandbox_api_password, "SIGNATURE": frappe.conf.sandbox_signature }) api_url = "https://api-3t.sandbox.paypal.com/nvp" if (self.paypal_sandbox or self.use_sandbox) else "https://api-3t.paypal.com/nvp" return params, api_url def validate_paypal_credentails(self): params, url = self.get_paypal_params_and_url() params = urlencode(params) try: res = make_post_request(url=url, data=params.encode("utf-8")) if res["ACK"][0] == "Failure": raise Exception except Exception: frappe.throw(_("Invalid payment gateway credentials")) def get_payment_url(self, **kwargs): setattr(self, "use_sandbox", cint(kwargs.get("use_sandbox", 0))) response = self.execute_set_express_checkout(**kwargs) if self.paypal_sandbox or self.use_sandbox: return_url = "https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}" else: return_url = "https://www.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}" kwargs.update({ "token": response.get("TOKEN")[0], "correlation_id": response.get("CORRELATIONID")[0] }) self.integration_request = create_request_log(kwargs, "Remote", "PayPal", response.get("TOKEN")[0]) return return_url.format(kwargs["token"]) def execute_set_express_checkout(self, **kwargs): params, url = self.get_paypal_params_and_url() params.update({ "METHOD": "SetExpressCheckout", "returnUrl": get_url("{0}.get_express_checkout_details".format(api_path)), "cancelUrl": get_url("/payment-cancel"), "PAYMENTREQUEST_0_PAYMENTACTION": "SALE", "PAYMENTREQUEST_0_AMT": kwargs['amount'], "PAYMENTREQUEST_0_CURRENCYCODE": kwargs['currency'].upper() }) if kwargs.get('subscription_details'): self.configure_recurring_payments(params, kwargs) params = urlencode(params) response = make_post_request(url, data=params.encode("utf-8")) if response.get("ACK")[0] != "Success": frappe.throw(_("Looks like something is wrong with this site's Paypal configuration.")) return response def configure_recurring_payments(self, params, kwargs): # removing the params as we have to setup rucurring payments for param in ('PAYMENTREQUEST_0_PAYMENTACTION', 'PAYMENTREQUEST_0_AMT', 'PAYMENTREQUEST_0_CURRENCYCODE'): del params[param] params.update({ "L_BILLINGTYPE0": "RecurringPayments", #The type of billing agreement "L_BILLINGAGREEMENTDESCRIPTION0": kwargs['description'] }) def get_paypal_and_transaction_details(token): doc = frappe.get_doc("PayPal Settings") doc.setup_sandbox_env(token) params, url = doc.get_paypal_params_and_url() integration_request = frappe.get_doc("Integration Request", token) data = json.loads(integration_request.data) return data, params, url def setup_redirect(data, redirect_url, custom_redirect_to=None, redirect=True): redirect_to = data.get('redirect_to') or None redirect_message = data.get('redirect_message') or None if custom_redirect_to: redirect_to = custom_redirect_to if redirect_to: redirect_url += '?' + urlencode({'redirect_to': redirect_to}) if redirect_message: redirect_url += '&' + urlencode({'redirect_message': redirect_message}) # this is done so that functions called via hooks can update flags.redirect_to if redirect: frappe.local.response["type"] = "redirect" frappe.local.response["location"] = get_url(redirect_url) @frappe.whitelist(allow_guest=True, xss_safe=True) def get_express_checkout_details(token): try: doc = frappe.get_doc("PayPal Settings") doc.setup_sandbox_env(token) params, url = doc.get_paypal_params_and_url() params.update({ "METHOD": "GetExpressCheckoutDetails", "TOKEN": token }) response = make_post_request(url, data=params) if response.get("ACK")[0] != "Success": frappe.respond_as_web_page(_("Something went wrong"), _("Looks like something went wrong during the transaction. Since we haven't confirmed the payment, Paypal will automatically refund you this amount. If it doesn't, please send us an email and mention the Correlation ID: {0}.").format(response.get("CORRELATIONID", [None])[0]), indicator_color='red', http_status_code=frappe.ValidationError.http_status_code) return doc = frappe.get_doc("Integration Request", token) update_integration_request_status(token, { "payerid": response.get("PAYERID")[0], "payer_email": response.get("EMAIL")[0] }, "Authorized", doc=doc) frappe.local.response["type"] = "redirect" frappe.local.response["location"] = get_redirect_uri(doc, token, response.get("PAYERID")[0]) except Exception: frappe.log_error(frappe.get_traceback()) @frappe.whitelist(allow_guest=True, xss_safe=True) def confirm_payment(token): try: custom_redirect_to = None data, params, url = get_paypal_and_transaction_details(token) params.update({ "METHOD": "DoExpressCheckoutPayment", "PAYERID": data.get("payerid"), "TOKEN": token, "PAYMENTREQUEST_0_PAYMENTACTION": "SALE", "PAYMENTREQUEST_0_AMT": data.get("amount"), "PAYMENTREQUEST_0_CURRENCYCODE": data.get("currency").upper() }) response = make_post_request(url, data=params) if response.get("ACK")[0] == "Success": update_integration_request_status(token, { "transaction_id": response.get("PAYMENTINFO_0_TRANSACTIONID")[0], "correlation_id": response.get("CORRELATIONID")[0] }, "Completed") if data.get("reference_doctype") and data.get("reference_docname"): custom_redirect_to = frappe.get_doc(data.get("reference_doctype"), data.get("reference_docname")).run_method("on_payment_authorized", "Completed") frappe.db.commit() redirect_url = '/integrations/payment-success' else: redirect_url = "/integrations/payment-failed" setup_redirect(data, redirect_url, custom_redirect_to) except Exception: frappe.log_error(frappe.get_traceback()) @frappe.whitelist(allow_guest=True, xss_safe=True) def create_recurring_profile(token, payerid): try: custom_redirect_to = None updating = False data, params, url = get_paypal_and_transaction_details(token) addons = data.get("addons") subscription_details = data.get("subscription_details") if data.get('subscription_id') and addons: updating = True manage_recurring_payment_profile_status(data['subscription_id'], 'Cancel', params, url) params.update({ "METHOD": "CreateRecurringPaymentsProfile", "PAYERID": payerid, "TOKEN": token, "DESC": data.get("description"), "BILLINGPERIOD": subscription_details.get("billing_period"), "BILLINGFREQUENCY": subscription_details.get("billing_frequency"), "AMT": data.get("amount") if data.get("subscription_amount") == data.get("amount") else data.get("subscription_amount"), "CURRENCYCODE": data.get("currency").upper(), "INITAMT": data.get("upfront_amount") }) starts_at = get_datetime(subscription_details.get("start_date")) or frappe.utils.now_datetime() status_changed_to = 'Completed' if data.get("starting_immediately") or updating else 'Verified' #"PROFILESTARTDATE": datetime.utcfromtimestamp(get_timestamp(starts_at)).isoformat() params.update({ "PROFILESTARTDATE": starts_at.isoformat() }) response = make_post_request(url, data=params) if response.get("ACK")[0] == "Success": update_integration_request_status(token, { "profile_id": response.get("PROFILEID")[0], }, "Completed") if data.get("reference_doctype") and data.get("reference_docname"): data['subscription_id'] = response.get("PROFILEID")[0] frappe.flags.data = data custom_redirect_to = frappe.get_doc(data.get("reference_doctype"), data.get("reference_docname")).run_method("on_payment_authorized", status_changed_to) frappe.db.commit() redirect_url = '/integrations/payment-success' else: redirect_url = "/integrations/payment-failed" setup_redirect(data, redirect_url, custom_redirect_to) except Exception: frappe.log_error(frappe.get_traceback()) def update_integration_request_status(token, data, status, error=False, doc=None): if not doc: doc = frappe.get_doc("Integration Request", token) doc.update_status(data, status) def get_redirect_uri(doc, token, payerid): data = json.loads(doc.data) if data.get("subscription_details") or data.get("subscription_id"): return get_url("{0}.create_recurring_profile?token={1}&payerid={2}".format(api_path, token, payerid)) else: return get_url("{0}.confirm_payment?token={1}".format(api_path, token)) def manage_recurring_payment_profile_status(profile_id, action, args, url): args.update({ "METHOD": "ManageRecurringPaymentsProfileStatus", "PROFILEID": profile_id, "ACTION": action }) response = make_post_request(url, data=args) if response.get("ACK")[0] != "Success": frappe.throw(_("Failed while amending subscription"))
ESS-LLP/frappe
frappe/integrations/doctype/paypal_settings/paypal_settings.py
Python
mit
12,324
0.022558
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: vmware_guest_find short_description: Find the folder path(s) for a virtual machine by name or UUID description: - Find the folder path(s) for a virtual machine by name or UUID version_added: 2.4 author: - James Tanner <[email protected]> - Abhijeet Kasurde <[email protected]> notes: - Tested on vSphere 6.5 requirements: - "python >= 2.6" - PyVmomi options: name: description: - Name of the VM to work with. - This is required if C(uuid) parameter is not supplied. uuid: description: - UUID of the instance to manage if known, this is VMware's BIOS UUID. - This is required if C(name) parameter is not supplied. datacenter: description: - Destination datacenter for the find operation. - Deprecated in 2.5, will be removed in 2.9 release. extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Find Guest's Folder using name vmware_guest_find: hostname: 192.168.1.209 username: [email protected] password: vmware validate_certs: no name: testvm register: vm_folder - name: Find Guest's Folder using UUID vmware_guest_find: hostname: 192.168.1.209 username: [email protected] password: vmware validate_certs: no uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49 register: vm_folder ''' RETURN = r""" folders: description: List of folders for user specified virtual machine returned: on success type: list sample: [ '/DC0/vm', ] """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.vmware import PyVmomi, get_all_objs, vmware_argument_spec try: from pyVmomi import vim except ImportError: pass class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) self.name = self.params['name'] self.uuid = self.params['uuid'] def getvm_folder_paths(self): results = [] # compare the folder path of each VM against the search path vmList = get_all_objs(self.content, [vim.VirtualMachine]) for item in vmList.items(): vobj = item[0] if not isinstance(vobj.parent, vim.Folder): continue # Match by name or uuid if vobj.config.name == self.name or vobj.config.uuid == self.uuid: folderpath = self.get_vm_path(self.content, vobj) results.append(folderpath) return results def main(): argument_spec = vmware_argument_spec() argument_spec.update( name=dict(type='str'), uuid=dict(type='str'), datacenter=dict(removed_in_version=2.9, type='str') ) module = AnsibleModule(argument_spec=argument_spec, required_one_of=[['name', 'uuid']], mutually_exclusive=[['name', 'uuid']], ) pyv = PyVmomiHelper(module) # Check if the VM exists before continuing folders = pyv.getvm_folder_paths() # VM already exists if folders: try: module.exit_json(folders=folders) except Exception as exc: module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc)) else: module.fail_json(msg="Unable to find folders for virtual machine %s" % (module.params.get('name') or module.params.get('uuid'))) if __name__ == '__main__': main()
dataxu/ansible
lib/ansible/modules/cloud/vmware/vmware_guest_find.py
Python
gpl-3.0
4,032
0.002232
#coding=utf-8 from docker import Client import time import logging from envir import config import ast import re log = logging.getLogger(__name__) class DockerOpt: def __init__(self): app_config = config.read_app_config() self.app = app_config self.url = app_config['docker']['url'] log.info("create docker with %s", self.url) def gen_tag(self, branch, app_version, api_version): now = time.localtime() now_str = time.strftime("%Y%m%d%H%M%S", now) if str(branch).startswith("develop"): tag_name = api_version + "-" + app_version + "-d" + now_str elif str(branch).startswith("feature/"): tag_name = api_version + "-" + app_version + "-f" + now_str elif str(branch).startswith("release/"): tag_name = api_version + "-" + app_version + "-r" + now_str elif str(branch).startswith("hotfix/"): tag_name = api_version + "-" + app_version + "-h" + now_str else: raise Exception('unsupported branch') return tag_name def gen_repository(self, registry, project_key, app_name): return str(registry) + "/" + str(project_key) + "/" + str(app_name) def build(self, path, tag): """ Similar to the `docker interpreter`, interpreter a docker image :param path: context path include Dockerfile :param tag: image's tag :return: None """ self.read_port() version = self.app['docker']['api']['version'] cli = Client(base_url=self.url, version=str(version)) response = cli.build(path, tag, rm=True) for line in response: rp = {key: str(item.strip().decode('unicode_escape')) for key, item in ast.literal_eval(line).items()} log.info(rp) log.info("successful build image with dockerImageTag=%s", str(tag).split(':')[1]) def push_images(self, repository, tag=None): version = self.app['docker']['api']['version'] cli = Client(base_url=self.url, version=str(version)) response = cli.push(repository, tag=tag, stream=True) for line in response: log.info(line) def read_port(self): with open('Dockerfile') as s: con = s.read() m = re.search('EXPOSE\s(.+)', con) if m: port = m.group(1) ports = ','.join(port.split(' ')) log.info('read portsSymbol=%s', ports) else: raise Exception('Docker file not exists') def rm_image(self, repo): version = self.app['docker']['api']['version'] cli = Client(base_url=self.url, version=str(version)) cli.remove_image(repo)
xyalan/build-interpreter
docker_tools/docker_opt.py
Python
apache-2.0
2,738
0.002191
#!python2 from random import randint from boids import * import sys,pygame,time,copy screenx = 800 screeny = 600 ticktime = 0.01 fps = 80 clock = pygame.time.Clock() size = screenx,screeny pygame.init() screen = pygame.display.set_mode(size) time = 0 def gen_boids(x,y,low,upper): nboids = randint(low,upper) boids = [] while nboids > 0: boids.append(Boid(nboids, complex(randint(0, x), randint(0, y)), complex(randint(-100, 100),randint(-100,100)), 100, 100, 50)) nboids -= 1 return boids boids = gen_boids(screenx,screeny,100,200) while 1: n=clock.tick(fps) background = pygame.Surface(screen.get_size()) background = background.convert() background.fill((250,250,250)) for i in xrange(len(boids)): # step the boids boidaccel = boids[i].step(boids,ticktime) # returns the accel boids[i].pos = complex(boids[i].pos.real % screenx, boids[i].pos.imag % screeny) # wrap around # draw the acceleration vector #thisarect = pygame.draw.aaline(background, (0,0,255), (boids[i].pos.real, boids[i].pos.imag), # (boids[i].pos.real + boidaccel.real, boids[i].pos.imag + boidaccel.imag)) drawx = int(boids[i].pos.real) drawy = int(boids[i].pos.imag) endp = 9*(boids[i].v/abs(boids[i].v)) thisrect = pygame.draw.line(background, (0,0,0), (drawx,drawy), (drawx+endp.real,drawy+endp.imag), 3) # draw the boid # draw all flock relationships #for flockboid in boids[i].flock: # linerect = pygame.draw.aaline(background, (255,0,0), (drawx, drawy), # ((flockboid.pos.real),(flockboid.pos.imag))) screen.blit(background, (0,0)) pygame.display.update() x= pygame.quit()
jsfyfield/pyboids
gfx_boids.py
Python
gpl-2.0
1,867
0.024103
from datetime import datetime from casexml.apps.stock.models import StockReport, StockTransaction from corehq.apps.commtrack.const import RequisitionStatus from corehq.apps.commtrack.models import RequisitionCase from casexml.apps.case.models import CommCareCase from corehq.apps.commtrack.tests.util import CommTrackTest, bootstrap_user, FIXED_USER, ROAMING_USER from corehq.apps.commtrack.sms import handle, SMSError class StockReportTest(CommTrackTest): user_definitions = [ROAMING_USER, FIXED_USER] def setUp(self): super(StockReportTest, self).setUp() def testStockReportRoaming(self): self.assertEqual(0, len(self.get_commtrack_forms())) amounts = { 'pp': 10, 'pq': 20, 'pr': 30, } # soh loc1 pp 10 pq 20... handled = handle(self.users[0].get_verified_number(), 'soh {loc} {report}'.format( loc='loc1', report=' '.join('%s %s' % (k, v) for k, v in amounts.items()) )) self.assertTrue(handled) forms = list(self.get_commtrack_forms()) self.assertEqual(1, len(forms)) self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0])) # todo: right now this makes one report per balance when really they should all be in the same one self.assertEqual(3, StockReport.objects.count()) for report in StockReport.objects.all(): self.assertEqual(forms[0]._id, report.form_id) self.assertEqual('balance', report.type) self.assertEqual(1, report.stocktransaction_set.count()) for code, amt in amounts.items(): [product] = filter(lambda p: p.code_ == code, self.products) trans = StockTransaction.objects.get(product_id=product._id) self.assertEqual(self.sp._id, trans.case_id) self.assertEqual(0, trans.quantity) self.assertEqual(amt, trans.stock_on_hand) def testStockReportFixed(self): self.assertEqual(0, len(self.get_commtrack_forms())) amounts = { 'pp': 10, 'pq': 20, 'pr': 30, } # soh loc1 pp 10 pq 20... handled = handle(self.users[1].get_verified_number(), 'soh {report}'.format( report=' '.join('%s %s' % (k, v) for k, v in amounts.items()) )) self.assertTrue(handled) forms = list(self.get_commtrack_forms()) self.assertEqual(1, len(forms)) self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0])) for code, amt in amounts.items(): [product] = filter(lambda p: p.code_ == code, self.products) trans = StockTransaction.objects.get(product_id=product._id) self.assertEqual(self.sp._id, trans.case_id) self.assertEqual(0, trans.quantity) self.assertEqual(amt, trans.stock_on_hand) class StockRequisitionTest(object): requisitions_enabled = True user_definitions = [ROAMING_USER] def setUp(self): super(CommTrackTest, self).setUp() self.user = self.users[0] def testRequisition(self): self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id))) self.assertEqual(0, len(self.get_commtrack_forms())) amounts = { 'pp': 10, 'pq': 20, 'pr': 30, } # req loc1 pp 10 pq 20... handled = handle(self.user.get_verified_number(), 'req {loc} {report}'.format( loc='loc1', report=' '.join('%s %s' % (k, v) for k, v in amounts.items()) )) self.assertTrue(handled) # make sure we got the updated requisitions reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id) self.assertEqual(3, len(reqs)) forms = list(self.get_commtrack_forms()) self.assertEqual(1, len(forms)) self.assertEqual(self.sp.location_, forms[0].location_) # check updated status for code, amt in amounts.items(): spp = CommCareCase.get(self.spps[code]._id) # make sure the index was created [req_ref] = spp.reverse_indices req_case = RequisitionCase.get(req_ref.referenced_id) self.assertEqual(str(amt), req_case.amount_requested) self.assertEqual(self.user._id, req_case.requested_by) self.assertEqual(req_case.location_, self.sp.location_) self.assertTrue(req_case._id in reqs) self.assertEqual(spp._id, req_case.get_product_case()._id) def testApprovalBadLocations(self): self.testRequisition() try: handle(self.user.get_verified_number(), 'approve') self.fail("empty locations should fail") except SMSError, e: self.assertEqual('must specify a location code', str(e)) try: handle(self.user.get_verified_number(), 'approve notareallocation') self.fail("unknown locations should fail") except SMSError, e: self.assertTrue('invalid location code' in str(e)) def testSimpleApproval(self): self.testRequisition() # approve loc1 handled = handle(self.user.get_verified_number(), 'approve {loc}'.format( loc='loc1', )) self.assertTrue(handled) reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id) self.assertEqual(3, len(reqs)) for req_id in reqs: req_case = RequisitionCase.get(req_id) self.assertEqual(RequisitionStatus.APPROVED, req_case.requisition_status) self.assertEqual(req_case.amount_requested, req_case.amount_approved) self.assertEqual(self.user._id, req_case.approved_by) self.assertIsNotNone(req_case.approved_on) self.assertTrue(isinstance(req_case.approved_on, datetime)) self.assertEqual(req_case.product_id, req_case.get_product_case().product) def testSimplePack(self): self.testRequisition() # pack loc1 handled = handle(self.user.get_verified_number(), 'pack {loc}'.format( loc='loc1', )) self.assertTrue(handled) reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id) self.assertEqual(3, len(reqs)) for req_id in reqs: req_case = RequisitionCase.get(req_id) self.assertEqual(RequisitionStatus.PACKED, req_case.requisition_status) self.assertEqual(req_case.amount_requested, req_case.amount_packed) self.assertEqual(self.user._id, req_case.packed_by) self.assertIsNotNone(req_case.packed_on) self.assertTrue(isinstance(req_case.packed_on, datetime)) self.assertEqual(req_case.product_id, req_case.get_product_case().product) def testReceipts(self): # this tests the requisition specific receipt keyword. not to be confused # with the standard stock receipt keyword self.testRequisition() reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id) self.assertEqual(3, len(reqs)) req_ids_by_product_code = dict(((RequisitionCase.get(id).get_product().code, id) for id in reqs)) rec_amounts = { 'pp': 30, 'pq': 20, 'pr': 10, } # rec loc1 pp 10 pq 20... handled = handle(self.user.get_verified_number(), 'rec {loc} {report}'.format( loc='loc1', report=' '.join('%s %s' % (k, v) for k, v in rec_amounts.items()) )) self.assertTrue(handled) # we should have closed the requisitions self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id))) forms = list(self.get_commtrack_forms()) self.assertEqual(2, len(forms)) self.assertEqual(self.sp.location_, forms[1].location_) # check updated status for code, amt in rec_amounts.items(): req_case = RequisitionCase.get(req_ids_by_product_code[code]) self.assertTrue(req_case.closed) self.assertEqual(str(amt), req_case.amount_received) self.assertEqual(self.user._id, req_case.received_by) self.assertTrue(req_case._id in reqs, 'requisition %s should be in %s' % (req_case._id, reqs)) def testReceiptsWithNoOpenRequisition(self): # make sure we don't have any open requisitions self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id))) rec_amounts = { 'pp': 30, 'pq': 20, 'pr': 10, } # rec loc1 pp 10 pq 20... handled = handle(self.user.get_verified_number(), 'rec {loc} {report}'.format( loc='loc1', report=' '.join('%s %s' % (k, v) for k, v in rec_amounts.items()) )) self.assertTrue(handled) # should still be no open requisitions self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id))) def _get_location_from_form(form): return form.form['location'] def _get_location_from_sp(sp): return sp.location_[-1]
gmimano/commcaretest
corehq/apps/commtrack/tests/test_sms_reporting.py
Python
bsd-3-clause
9,297
0.003442
__author__ = 'Dominik Krupke, dserv01.de' # # While you want to listen to lossless music on your computer you may not be able to also listen to it mobile because # it takes too much space. A 32GB-SDCard does not suffice for your full music library so you only have the options to # either only hearing to a subset mobile or converting the music to a lossy format. This script is for the second # option. # * Of course you don't want the lossy files on your computer because you already have your perfect lossless music there. # * If you extend your music library you want a simple way to also have them mobile. # * You don't want to convert already converted music twice # This script synchronizes a lossless library folder (e.g. your music folder on your computer) to a lossy library # folder (e.g. the music folder of your mobile device) by checking if for all music files in your lossless folder there # is a converted version in your lossy folder. If this is not the case the file is converted. On the other side it # checks if you still have the lossless file for each lossy file in your library, otherwise this file is removed (so # removing a file from your lossless library also removes it from your lossy library) # # You can use your own commands for converting specific files. These commands have to convert a single file (check the # commands-array). # # The configuration is done with the from_path and the to_path, as well as with the commands-array. import os import subprocess ##### CONFIGURATION ########################################################################################### # This is the path of your lossless libray, e.g. '/home/YOURNAME/Music/' FROM_PATH = '/home/doms/THINKPAD-L450/Music/' # This is the path of your lossy library, e.g. /mnt/SDCARD0/Music/' TO_PATH = '/home/foldersync/MotoX-Play/Music/' # Use [INPUT] and [OUTPUT] to build your commands. Both will be replaced by the full path but without the file extension, # e.g. /home/doms/Music/Beethoven/FuerElise.flac -> /home/doms/Music/Beethoven/FuerElise # You need to add the new and old fileextension for checking if the file is already converted and to remove old files COMMANDS = [['flac', 'ogg', 'oggenc -q 8 [INPUT].flac -o [OUTPUT].ogg'], ['mp3', 'mp3', 'cp [INPUT].mp3 [OUTPUT].mp3'] # ,['jpg', 'jpg', 'cp [INPUT].jpg [OUTPUT].jpg'] ] # Remove files that are not in the original library SYNC_DELETIONS = True ASK_BEFORE_DELETE = False ############################################################################################################### # Check if vorbis-tools are installed output = subprocess.check_output("whereis oggenc", shell=True) if (len(output) < 10): print "You need to install vorbis-tools first (Debian/Ubuntu: sudo apt-get install vorbis-tools)" print "If you don't use it, remove this check from the code" exit(1) # Check path format if (FROM_PATH[-1] != '/' or TO_PATH[-1] != '/'): print "Paths should end with \'/\'" exit(1) # Create library paths if not existence try: if (not os.path.exists(TO_PATH)): os.makedirs(TO_PATH) elif (os.path.isfile(TO_PATH)): raise Exception("Directory is file?!") except Exception as e: print "Could not create " + TO_PATH + " because " + str(e) print "Aborting" exit(1) # Create folders if not existing def createFolder(subpath): if (os.path.exists(TO_PATH + subpath) and os.path.isdir(TO_PATH + subpath)): return True try: os.makedirs(TO_PATH + subpath) return True except Exception as e: print "Could not create directory " + subpath+" because "+str(e) return False # Escape the paths for the os.system def escapePath(s): return s.replace(" ", "\ ").replace(")", "\)").replace("(", "\(").replace("&", "\&").replace("'", "\\\'") # Go through all files and convert for root, dirs, files in os.walk(FROM_PATH, topdown=False): subpath = root[len(FROM_PATH):] + "/" if (createFolder(subpath)): for name in files: filename_without_extension = os.path.splitext(name)[0] file_extension = os.path.splitext(name)[1][1:] source_path_without_extension = FROM_PATH + subpath + filename_without_extension converted_path_without_extension = TO_PATH + subpath + filename_without_extension # Get command tripple - sure you can do this more efficient with a hashmap but there will only be a few entries command_tripple = None for tripple in COMMANDS: if (tripple[0] == file_extension): command_tripple = tripple break if (not command_tripple): continue source_path = source_path_without_extension + "." + command_tripple[0] goal_path = converted_path_without_extension + "." + command_tripple[1] if (os.path.isfile(source_path)): # If goal file does not exists or is older than source if (not os.path.exists(goal_path) or os.path.getctime(source_path) > os.path.getctime(goal_path)): print "Processing " + subpath + name os.system(command_tripple[2].replace("[INPUT]", escapePath(source_path_without_extension)).replace( "[OUTPUT]", escapePath(converted_path_without_extension))) else: print "Could not find " + subpath + name # Remove old files if (SYNC_DELETIONS): for root, dirs, files in os.walk(TO_PATH, topdown=False): subpath = root[len(TO_PATH):] + "/" for name in files: filename_without_extension = os.path.splitext(name)[0] file_extension = os.path.splitext(name)[1][1:] source_path_without_extension = FROM_PATH + subpath + filename_without_extension converted_path_without_extension = TO_PATH + subpath + filename_without_extension original_exists = False for tripple in COMMANDS: if (tripple[1] == file_extension and os.path.exists(source_path_without_extension + "." + tripple[0])): original_exists = True break if (not original_exists): filepath_to_delete = escapePath(converted_path_without_extension) + "." + file_extension print "Deleting "+filepath_to_delete os.system("rm " + ("-i " if ASK_BEFORE_DELETE else "") + filepath_to_delete) # Remove old empty folders for folder in dirs: subpath = root[len(TO_PATH):] + "/" if not os.path.exists(FROM_PATH + subpath + folder): os.system("rmdir " + escapePath(TO_PATH + subpath + folder))
dserv01/SyncLosslessToLossyMusicLibrary
SyncLosslessToLossyMusicLibrary.py
Python
gpl-2.0
6,785
0.005601
# -*- coding: utf-8 -*- # # This file is part of the bliss project # # Copyright (c) 2016 Beamline Control Unit, ESRF # Distributed under the GNU LGPLv3. See LICENSE for more info. import sys import time import numpy import struct import logging import threading # tango imports import tango from tango import GreenMode from tango import DebugIt from tango.server import run from tango.server import Device from tango.server import attribute, command from tango.server import device_property # Add additional imports import gevent from gevent import lock from functools import wraps from bliss.controllers.nano_bpm import NanoBpm as nanoBpm def is_cmd_allowed(fisallowed): def is_allowed(func): @wraps(func) def rfunc(self, *args, **keys): if getattr(self, fisallowed)(): return func(self, *args, **keys) else: raise Exception("Command not allowed") return rfunc return is_allowed class NanoBpm(Device): CONTINUOUS, STREAMING = range(2) BPP8, BPP16, BPP32 = range(3) # ------------------------------------------------------------------------- # Device Properties # ------------------------------------------------------------------------- CommandUrl = device_property(dtype=str,doc='use socket://192.999.999.999:2222') ControlUrl = device_property(dtype=str, doc='use socket://192.999.999.999:2223') Name = device_property(dtype=str, default_value="NanoBpm") # ------------------------------------------------------------------------- # General methods # ------------------------------------------------------------------------- def __init__(self, *args, **kwargs): self.__nanobpm = None Device.__init__(self, *args, **kwargs) @DebugIt() def delete_device(self): self._nanobpm = None @DebugIt() def init_device(self): Device.init_device(self) kwargs = { 'command_url': self.CommandUrl, 'control_url': self.ControlUrl, } self._nanoBpm = nanoBpm(self.Name, kwargs) self._AcqMode2String = { self.CONTINUOUS : 'continuous', self.STREAMING : 'stream' } self.imageDepth2String = { self.BPP8 : "bpp8", self.BPP16 : "bpp16", self.BPP32 : "bpp32" } self._logger = logging.getLogger(str(self)) logging.basicConfig(level=logging.INFO) self._logger.setLevel(logging.DEBUG) self._imageDepth = self.BPP8 self._imageData = None self._lock = lock.Semaphore() self._acqMode = self.STREAMING self._CoG = None self._xprofile = None self._yprofile = None self._xfit = None self._yfit = None # set up change events for Tango clients self.set_change_event("Centre", True, False) self.set_change_event("Xprofile", True, False) self.set_change_event("Yprofile", True, False) self.set_change_event("Xfit", True, False) self.set_change_event("Yfit", True, False) self.set_change_event("ReadImage8", True, False) self.set_change_event("ReadImage16", True, False) self.set_change_event("ReadImage32", True, False) self._nanoBpm.subscribe(self.bpmCallback) attr = self.get_device_attr().get_attr_by_name("acqMode") attr.set_write_value(self._AcqMode2String[self._acqMode]) attr = self.get_device_attr().get_attr_by_name("imageDepth") attr.set_write_value(self.imageDepth2String[self._imageDepth]) if self._nanoBpm is not None: attr = self.get_device_attr().get_attr_by_name("gain") attr.set_write_value(self._nanoBpm.GAIN) attr = self.get_device_attr().get_attr_by_name("offset") attr.set_write_value(self._nanoBpm.OFFSET) attr = self.get_device_attr().get_attr_by_name("horizMinAmp") attr.set_write_value(self._nanoBpm.H_MINAMP) attr = self.get_device_attr().get_attr_by_name("vertMinAmp") attr.set_write_value(self._nanoBpm.V_MINAMP) attr = self.get_device_attr().get_attr_by_name("vertMinRSQ") attr.set_write_value(self._nanoBpm.V_MINRSQ) attr = self.get_device_attr().get_attr_by_name("horizMinRSQ") attr.set_write_value(self._nanoBpm.H_MINRSQ) attr = self.get_device_attr().get_attr_by_name("maxIter") attr.set_write_value(self._nanoBpm.MAXITER) self.set_state(tango.DevState.ON) def always_executed_hook(self): pass # ------------------------------------------------------------------------- # Attributes # ------------------------------------------------------------------------- @attribute(label="AcqMode", dtype=str, description="Acquisition mode (continuous/stream)") @DebugIt() def acqMode(self): return self._AcqMode2String[self._acqMode] @acqMode.write @DebugIt() def acqMode(self, mode): ind = self._AcqMode2String.values().index(mode) self._acqMode = self._AcqMode2String.keys()[ind] @attribute(label="Integration time", dtype=float, unit="s", min_value="0.0", memorized=True, description="Integration time in seconds", fisallowed="is_attr_rw_allowed") @DebugIt() def integrationTime(self): return self._nanoBpm.getIntegrationTime() @integrationTime.write @DebugIt() def integrationTime(self, time): self._nanoBpm.setIntegrationTime(time) @attribute(label=" Subtract Background", dtype=bool, memorized=True, fisallowed="is_attr_rw_allowed", description="To activate background subtraction (true = ON)") @DebugIt() def subtractBackground(self): return self._nanoBpm.SUBTRACTDARK @subtractBackground.write @DebugIt() def subtractBackground(self, enable): self._nanoBpm.SUBTRACTDARK = 1 if enable else 0 @attribute(label="NbFramesToSum", dtype=int, hw_memorized=False, memorized=True, fisallowed="is_attr_rw_allowed", description="Number frames to average or sum (must be power of 2. default=4") @DebugIt() def nbFramesToSum(self): return self._nanoBpm.nbFramesToSum @nbFramesToSum.write @DebugIt() def nbFramesToSum(self, num): self._nanoBpm.nbFramesToSum = num @attribute(label="Gain", dtype=int, fisallowed="is_attr_rw_allowed", description="Gain of the device") def gain(self): return self._nanoBpm.GAIN @gain.write def gain(self, val): self._nanoBpm.GAIN = val @attribute(label="Offset", dtype=int, fisallowed="is_attr_rw_allowed", description="Offset of the device") def offset(self): return self._nanoBpm.OFFSET @offset.write def offset(self, val): self._nanoBpm.OFFSET = val @attribute(label="Maximum Iterations", dtype=int, fisallowed="is_attr_rw_allowed", description="Maximum number of iterations for the fitting algorithm") def maxIter(self): return self._nanoBpm.MAXITER @maxIter.write def maxIter(self, val): self._nanoBpm.MAXITER = val @attribute(label="Horizontal Minimum Amplitude", dtype=float, fisallowed="is_attr_rw_allowed", description="") def horizMinAmp(self): return self._nanoBpm.H_MINAMP @horizMinAmp.write def horizMinAmp(self, val): self._nanoBpm.H_MINAMP = val @attribute(label="Vertical Minimum Amplitude", dtype=float, fisallowed="is_attr_rw_allowed", description="Fitting minimum amplitude in vertical direction") def vertMinAmp(self): return self._nanoBpm.V_MINAMP @vertMinAmp.write def vertMinAmp(self, val): self._nanoBpm.V_MINAMP = val @attribute(label="Vertical Minimum Chi-squared", dtype=float, fisallowed="is_attr_rw_allowed", description="Minimum chi-squared value for fitting in vertical direction") def vertMinRSQ(self): return self._nanoBpm.V_MINRSQ @vertMinRSQ.write def vertMinRSQ(self, val): self._nanoBpm.V_MINRSQ = val @attribute(label="Horizontal Minimum Chi-squared", dtype=float, fisallowed="is_attr_rw_allowed", description="Minimum chi-squared value for fitting in horizontal direction") def horizMinRSQ(self): return self._nanoBpm.H_MINRSQ @horizMinRSQ.write def horizMinRSQ(self, val): self._nanoBpm.H_MINRSQ = val # @attribute(label="Last frame number acquired", dtype=int, fisallowed="is_attr_allowed", # description="") # @DebugIt() # def last_image_acquired(self): # return -1 if self._imageData is None else self._imageData[0] @attribute(label="Image depth",dtype=str, fisallowed="is_attr_allowed", description="") @DebugIt() def imageDepth(self): return self.imageDepth2String[self._imageDepth] @imageDepth.write def imageDepth(self, depth): try: ind = self.imageDepth2String.values().index(depth) self._imageDepth = self.imageDepth2String.keys()[ind] except ValueError: pass @attribute(label="Centre",dtype=[float,], fisallowed="is_attr_allowed",max_dim_x=2, max_dim_y=1, description="Centre of Gravity [x,y]") @DebugIt() def centre(self): if self._CoG is None: raise AttributeError("No valid centre of gravity has been collected") return self._CoG @attribute(label="XProfile",dtype=[float,], fisallowed="is_attr_allowed",max_dim_x=2000, max_dim_y=1, description="X Profile") @DebugIt() def xprofile(self): if self._yprofile is None: raise AttributeError("No valid x profile has been collected") return self._xprofile @attribute(label="YProfile",dtype=[float,], fisallowed="is_attr_allowed",max_dim_x=2000, max_dim_y=1, description="Y Profile") @DebugIt() def yprofile(self): if self._yprofile is None: raise AttributeError("No valid y profile has been collected") return self._yprofile @attribute(label="XFit",dtype=[float,], fisallowed="is_attr_allowed",max_dim_x=20, max_dim_y=1, description="X fit gaussian parameters") @DebugIt() def xfit(self): if self._xfit is None: raise AttributeError("No valid x fit has been collected") return self._xfit @attribute(label="YFit",dtype=[float,], fisallowed="is_attr_allowed",max_dim_x=20, max_dim_y=1, description="Y Fit gaussian parameters") @DebugIt() def yfit(self): if self._yfit is None: raise AttributeError("No valid y fit has been collected") return self._yfit @attribute(label="Image8",dtype=[['byte']], fisallowed="is_attr_allowed", max_dim_x=10000, max_dim_y=10000, description="") @DebugIt() def readImage8(self): if self._imageData is None: raise AttributeError("No valid image collected") if self._imageData[0] != self.BPP8: raise AttributeError("This is not a 8 bit image") return self._imageData[1] @attribute(label="Image16",dtype=[['uint16']], fisallowed="is_attr_allowed", max_dim_x=2000, max_dim_y=2000, description="") @DebugIt() def readImage16(self): if self._imageData is None: raise AttributeError("No valid image collected") if self._imageData[0] != self.BPP16: raise AttributeError("This is not a 16 bit image") return self._imageData[1] @attribute(label="Image32",dtype=[['uint32']], fisallowed="is_attr_allowed", max_dim_x=2000, max_dim_y=2000, description="") @DebugIt() def readImage32(self): if self._imageData is None: raise AttributeError("No valid image collected") if self._imageData[0] != self.BPP32: raise AttributeError("This is not a 16 bit image") return self._imageData[1] @DebugIt() def is_attr_allowed(self, attr): """ Allow reading but not writing of attributes whilst running """ if attr==tango.AttReqType.READ_REQ: return self.get_state() not in [tango.DevState.UNKNOWN, tango.DevState.FAULT] else: return self.get_state() not in [tango.DevState.UNKNOWN, tango.DevState.FAULT, tango.DevState.RUNNING] @DebugIt() def is_attr_rw_allowed(self, attr): """ Prohibit reading & writing of attributes whilst running """ if attr==tango.AttReqType.READ_REQ: return self.get_state() not in [tango.DevState.UNKNOWN, tango.DevState.FAULT, tango.DevState.RUNNING] else: return self.get_state() not in [tango.DevState.UNKNOWN, tango.DevState.FAULT, tango.DevState.RUNNING] def bpmCallback(self, cog, xprofile, yprofile, xfit, yfit, imageData): if cog is not None : if self._CoG is None or int(self._CoG[0]) != int(cog[0]) or int(self._CoG[1]) != int(cog[1]): self._logger.debug("bpmCallback(): pushing COG {0}".format(cog)) self.push_change_event("Centre", cog) with self._lock: self._CoG = cog else: self._logger.debug("bpmCallback(): CoG is the same {0}".format(cog)) if xprofile is not None: xp = [float(p) for p in xprofile] self.push_change_event("XProfile", xp) with self._lock: self._xprofile = xp if yprofile is not None: yp = [float(p) for p in yprofile] self.push_change_event("YProfile", yp) with self._lock: self._yprofile = yp if xfit is not None: self.push_change_event("Xfit", xfit) with self._lock: self._xfit = xfit if yfit is not None: self.push_change_event("Yfit", yfit) with self._lock: self._yfit = yfit if imageData is not None: depth = imageData[0] image = imageData[1] if depth == self.BPP32: self.push_change_event("ReadImage32",image) elif depth == self.BPP16: self.push_change_event("ReadImage16",image) else: self.push_change_event("ReadImage8",image) with self._lock: self._imageData = imageData # ------------------------------------------------------------------------- # commands # ------------------------------------------------------------------------- @command @DebugIt() def Reset(self): """ Reset will force a stop, reload the last saved configuration. """ self._nanoBpm.deviceReset() @command(dtype_out=(str,), doc_out="Get the hardware and software configuration of the device") @DebugIt() @is_cmd_allowed("is_command_allowed") def GetDeviceInfo(self): """ Get the hardware and software configuration of the device. """ deviceInfo = self._nanoBpm.getDeviceInfo() return ["{0}={1}".format(key,value) for key, value in deviceInfo.iteritems()] @command(dtype_out=(str,), doc_out="Get the current device configuration") @DebugIt() @is_cmd_allowed("is_command_allowed") def GetDeviceConfig(self): """ Get the current device configuration. """ deviceConfig = self._nanoBpm.getDeviceConfig() return ["{0}={1}".format(key,value) for key, value in deviceConfig.iteritems()] @command(dtype_out=(str,), doc_out="Get the current device parameters") @DebugIt() @is_cmd_allowed("is_command_allowed") def GetDeviceParameters(self): """ Get the current device parameters. """ deviceParameters = self._nanoBpm.getDeviceParameters() return ["{0}={1}".format(key,value) for key, value in deviceParameters.iteritems()] @command @DebugIt() @is_cmd_allowed("is_command_allowed") def CollectDark(self): """ Collect and store a dark current image. """ self.set_state(tango.DevState.RUNNING) gevent.spawn(self._doCollectDark) def _doCollectDark(self): self._logger.info("CollectDark(): Starting dark current image collection") self._nanoBpm.storeDark = True self._nanoBpm.readAve16Sum32() self._nanoBpm.storeDark = False self._logger.info("CollectDark(): Dark current image collection complete") with self._lock: if self._imageData is not None: self.set_state(tango.DevState.ON) else: self.set_state(tango.DevState.FAULT) @command @DebugIt() @is_cmd_allowed("is_command_allowed") def Collect(self): self.set_state(tango.DevState.RUNNING) gevent.spawn(self._doCollect) def _doCollect(self): if self._imageDepth == self.BPP32: self._logger.info("Collect(): collecting Ave16/sum32 image") self._nanoBpm.readAve16Sum32() elif self._imageDepth == self.BPP16: self._logger.info("Collect(): collecting 16 bit image") self._nanoBpm.readImage16() else: self._logger.info("Collect(): collecting 8 bit image") self._nanoBpm.readImage8() self._logger.info("Collect(): collection complete") with self._lock: if self._imageData is not None: self.set_state(tango.DevState.ON) else: self.set_state(tango.DevState.FAULT) @command @DebugIt() @is_cmd_allowed("is_command_allowed") def Start(self): self.set_state(tango.DevState.RUNNING) if self._acqMode == self.CONTINUOUS: self._nanoBpm.startContinuousFrame() else: self._nanoBpm.startDataStreaming() @command @DebugIt() def Stop(self): if self._acqMode == self.CONTINUOUS: self._nanoBpm.stopContinuousFrame() else: self._nanoBpm.stopDataStreaming() self.set_state(tango.DevState.ON) @DebugIt() def is_command_allowed(self): return self.get_state() not in [tango.DevState.UNKNOWN, tango.DevState.FAULT, tango.DevState.RUNNING] # ------------------------------------------------------------------------- # Run server # ------------------------------------------------------------------------- def main(): from tango import GreenMode from tango.server import run run([NanoBpm,], green_mode=GreenMode.Gevent) if __name__ == "__main__": main()
tiagocoutinho/bliss
bliss/tango/servers/nanobpm_ds.py
Python
lgpl-3.0
19,010
0.00526
import os import re from os import system, popen, path as os_path, listdir from Screens.Screen import Screen from Components.Harddisk import * from Components.Sources.StaticText import StaticText from Components.ActionMap import ActionMap, NumberActionMap from FactoryTestPublic import * import time from enigma import eTimer from Components.Network import Network,iNetwork from Components.Label import Label,MultiColorLabel from Components.Pixmap import Pixmap,MultiPixmap class NetworkTest(Screen): skin = """ <screen name="About" position="220,57" size="840,605" title="About" flags="wfNoBorder"> <ePixmap position="0,0" zPosition="-10" size="1100,605" pixmap="DMConcinnity-HD-Transp/menu/setupbg.png" /> <widget source="global.CurrentTime" render="Label" position="20,20" size="80,25" font="Regular;23" foregroundColor="black" backgroundColor="grey" transparent="1"> <convert type="ClockToText">Default</convert> </widget> <widget source="global.CurrentTime" render="Label" position="110,20" size="140,25" font="Regular;23" foregroundColor="blue" backgroundColor="grey" transparent="1"> <convert type="ClockToText">Format:%d.%m.%Y</convert> </widget> <eLabel text="Network Test" position="270,20" size="540,43" font="Regular;35" halign="right" foregroundColor="black" backgroundColor="grey" transparent="1" /> <widget source="workstatus" render="Label" position="110,145" size="700,70" font="Regular;26" foregroundColor="yellow" backgroundColor="transpBlack" transparent="1" /> <widget source="testinfo" render="Label" position="120,230" size="660,330" font="Regular;15" backgroundColor="transpBlack" transparent="1" /> </screen>""" def __init__(self,session,testitem): Screen.__init__(self, session) self.testserver = "8.8.8.8" self.testitem = testitem self._runing = False self.result = False self["workstatus"] = StaticText("Check the network cable is connected,Press OK\n key Start Test") self["testinfo"] = StaticText("") self["teststatus"] = StaticText("Start") self["actions"] = ActionMap(["SetupActions", "ColorActions"], { "cancel": self.doclose, "ok": self.startTest }) self.testTimer = eTimer() self.testTimer.callback.append(self.pingServer) def startTest(self): print "key green" if self._runing == False: self["workstatus"].setText("Network Connecting ....") self["testinfo"].setText("") self.testTimer.start(1000) def doclose(self): self.updeteResult() self.close() def updeteResult(self): if self.result: self.testitem.setTestResult(FactoryTestItem.TESTRESULT_OK) else: self.testitem.setTestResult(FactoryTestItem.TESTRESULT_ERROR) def dhcp(self): cmd = "ifconfig eth0 down" p = os.popen(cmd) info = p.read() print "info read",info def pingServer(self): self.testTimer.stop() self._runing = True lost_packet = 100 try: cmd = "ping -c 3 %s" % self.testserver p = os.popen(cmd) info = p.read() print "info read",info p.close() self["testinfo"].setText(info) except: print "exception" print "Network Connection Error!!! Check cable and Hardware" self.result = False if info == "": self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware") print "Network Connection Error!!! Check cable and Hardware" self.result = False else: try: re_lost_str = '(\d+)% packet loss' lost_packet = int(re.search(re_lost_str,info).group(1)) print "lost package is :",lost_packet except: self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware") print "Network Connection Error!!! Check cable and Hardware" self.result = False if lost_packet == 100: self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware") print "Network Connection Error!!! Check cable and Hardware" self.result = False else: self["workstatus"].setText("Network Connection OK") print "Network Connection OK" self.result = True self._runing = False return self.result class WifiTest(Screen): skin = """ <screen name="WifiTest" position="220,57" size="840,605" title="WifiTest" flags="wfNoBorder"> <ePixmap position="0,0" zPosition="-10" size="1100,605" pixmap="DMConcinnity-HD-Transp/menu/setupbg.png" /> <widget source="global.CurrentTime" render="Label" position="20,20" size="80,25" font="Regular;23" foregroundColor="black" backgroundColor="grey" transparent="1"> <convert type="ClockToText">Default</convert> </widget> <widget source="global.CurrentTime" render="Label" position="110,20" size="140,25" font="Regular;23" foregroundColor="blue" backgroundColor="grey" transparent="1"> <convert type="ClockToText">Format:%d.%m.%Y</convert> </widget> <eLabel text="Network test" position="270,20" size="540,43" font="Regular;35" halign="right" foregroundColor="black" backgroundColor="grey" transparent="1" /> <widget name="ConfigWifiText" position="70,100" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" /> <widget name="ConfigTestInfo" position="70,130" size="600,25" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" /> <widget name="ConfigTest_OK" position="730,100" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" /> <eLabel position="70,168" size="700,2" backgroundColor="darkgrey" /> <widget name="DhcpText" position="70,180" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" /> <widget name="DhcpTestInfo" position="70,210" size="600,55" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" /> <widget name="DhcpTest_OK" position="730,180" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" /> <eLabel position="70,278" size="700,2" backgroundColor="darkgrey" /> <widget name="connectText" position="70,290" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" /> <widget name="connectTestInfo" position="70,320" size="600,25" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" /> <widget name="connectTest_OK" position="730,290" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" /> <eLabel position="70,358" size="700,2" backgroundColor="darkgrey" /> <widget name="opreateInfo" position="170,450" size="400,200" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" /> </screen>""" def __init__(self,session,testitem,testiface): Screen.__init__(self, session) self.testitem = testitem self._runing = False self.result = False self.ipConsole = Console() self.testiface = testiface self.testitem = testitem self.interfacename = "/etc/network/interfaces" self.interfacebackupname = "/etc/network/interfacesbackup" self.wlanconfigname = "/etc/wpa_supplicant.wlan0.test.conf" self.testitem.setTestResult(FactoryTestItem.TESTRESULT_ERROR) self.creatLables() self.teststep = 0 self.retest = False self["actions"] = ActionMap(["SetupActions", "ColorActions"], { "cancel": self.doclose, "ok": self.startTest }) self.testTimer = eTimer() self.testTimer.callback.append(self.doTest) def creatLables(self): if iNetwork.isWirelessInterface(self.testiface): self["ConfigWifiText"] = MultiColorLabel(_("WLAN connection config"))# else: self["ConfigWifiText"] = MultiColorLabel(_("LAN connection config"))# self["ConfigTestInfo"] = MultiColorLabel()#Teststatus # self["ConfigTestInfo"] = StaticText("") self["ConfigTest_OK"] = MultiPixmap()#testicon self["ConfigTest_OK"].hide() self["DhcpText"] = MultiColorLabel(_("DHCP"))# self["DhcpTestInfo"] = MultiColorLabel()#Teststatus self["DhcpTest_OK"] = MultiPixmap()#testicon self["DhcpTest_OK"].hide() self["connectText"] = MultiColorLabel(_("Network Connection Test"))# self["connectTestInfo"] = MultiColorLabel()#Teststatus self["connectTest_OK"] = MultiPixmap()#testicon self["connectTest_OK"].hide() self["opreateInfo"] = Label(_("Press 'OK' start")) def initLables(self): self.checkip = False self.teststep = 0 self["ConfigTestInfo"].setText(_(" ")) self["ConfigTestInfo"].setForegroundColorNum(0) self["ConfigTest_OK"].hide() self["DhcpTestInfo"].setText(_(" ")) self["DhcpTestInfo"].setForegroundColorNum(0) self["DhcpTest_OK"].hide() self["connectTestInfo"].setText(_(" ")) self["connectTestInfo"].setForegroundColorNum(0) self["connectTest_OK"].hide() self["opreateInfo"].setText(_("please wait,network testing .....")) def doclose(self): if self._runing == False: # self.updeteResult() self.close() # self.testTimer = eTimer() # self.testTimer.callback.append(self.pingServer) def startTest(self): if self._runing == False: self._runing = True self.initLables() self.doTest() else: pass def doTest(self): print "timer runin doTest()" self.testTimer.stop() self.runTest() def runTest(self): print "tun in runTest()" step = self.teststep if step == 0: self.test() elif step == 1: self["ConfigTestInfo"].setText(_("config Wifi network,please wait...")) self["ConfigTestInfo"].setForegroundColorNum(0) self.test() elif step == 2: self["DhcpTestInfo"].setText(_("dhcp get ip address,please wait...")) self["DhcpTestInfo"].setForegroundColorNum(0) self.test() elif step == 3: self["connectTestInfo"].setText(_("ping network server,please wait...")) self["connectTestInfo"].setForegroundColorNum(0) self.test() else: self._runing = False self.teststep = 0 self.teststep += 1 def test(self): if self.teststep == 0: self.testTimer.start(100) if self.teststep == 1: self.doNetworkconfig() if self.teststep == 2: self.checkipaddr() if self.teststep == 3: self.pingserver() else: pass def doNetworkconfig(self): if iNetwork.isWirelessInterface(self.testiface): #back up /etc/network/interfaces ret = os.system("cp "+self.interfacename+" "+self.interfacebackupname) if ret != 0: print "backup interfaces file fail ret:\n",ret self["ConfigTestInfo"].setText("backup file(interfaces) fail,Wifi test stop!!") self["ConfigTestInfo"].setForegroundColorNum(1) self["ConfigTest_OK"].setPixmapNum(1) self["ConfigTest_OK"].show() return "backup interfaces file fail" #config interfaces self.writeTestInterfaces() interfaces = ["eth0"] self.writeWifiConfig(self.testiface) iNetwork.deactivateInterface(interfaces,self.activeNetworkCB) else: interfaces = ["wlan0"] iNetwork.deactivateInterface(interfaces,self.activeNetworkCB) # self.doLanconfig(true) def writeTestInterfaces(self): fp = file(self.interfacename, 'w') fp.write("# automatically generated by enigma 2\n# do NOT change manually!\n\n") fp.write("auto lo\n") fp.write("iface lo inet loopback\n\n") fp.write("iface wlan0 inet dhcp\n") fp.write("\tpre-up wpa_supplicant -i wlan0 -c /etc/wpa_supplicant.wlan0.test.conf -B -dd -D wext || true\n") fp.write("\tpre-down wpa_cli -i wlan0 terminate || true\n\n") fp.write("auto eth0\n") fp.write("iface eth0 inet dhcp\n") fp.close() def getWlanConfigName(self,iface): return self.wlanconfigname def writeWifiConfig(self, iface): print "do write wifi config(wpa_supplicant.wlan.config)..." essid = "wifi-test" hiddenessid = False encrypted = True encryption = "WPA/WPA2" wepkeytype = "ASCII" psk = "12345678" fp = file(self.getWlanConfigName(iface), 'w') fp.write('#WPA Supplicant Configuration by enigma2\n') fp.write('ctrl_interface=/var/run/wpa_supplicant\n') fp.write('eapol_version=1\n') fp.write('fast_reauth=1\n') if hiddenessid: fp.write('ap_scan=2\n') else: fp.write('ap_scan=1\n') fp.write('network={\n') fp.write('\tssid="'+essid+'"\n') fp.write('\tscan_ssid=0\n') if encrypted: if encryption in ('WPA', 'WPA2', 'WPA/WPA2'): fp.write('\tkey_mgmt=WPA-PSK\n') if encryption == 'WPA': fp.write('\tproto=WPA\n') fp.write('\tpairwise=TKIP\n') fp.write('\tgroup=TKIP\n') elif encryption == 'WPA2': fp.write('\tproto=RSN\n') fp.write('\tpairwise=CCMP\n') fp.write('\tgroup=CCMP\n') else: fp.write('\tproto=WPA RSN\n') fp.write('\tpairwise=CCMP TKIP\n') fp.write('\tgroup=CCMP TKIP\n') fp.write('\tpsk="'+psk+'"\n') elif encryption == 'WEP': fp.write('\tkey_mgmt=NONE\n') if wepkeytype == 'ASCII': fp.write('\twep_key0="'+psk+'"\n') else: fp.write('\twep_key0='+psk+'\n') else: fp.write('\tkey_mgmt=NONE\n') fp.write('}') fp.write('\n') fp.close() #system('cat ' + getWlanConfigName(iface)) def activeNetwork(self,data): if data is True: iNetwork.activateInterface(self.testiface,self.activeNetworkCB) def activeNetworkCB(self,data): if data is True: print "active network done" self["ConfigTestInfo"].setText("Network config ok") self["ConfigTestInfo"].setForegroundColorNum(2) self["ConfigTest_OK"].setPixmapNum(0) self["ConfigTest_OK"].show() self.testTimer.start(500) else: print "active network fail" #####DHCP def checkipaddr(self): cmd = "ifconfig " + self.testiface self.ipConsole.ePopen(cmd, self.checkipaddrCB) def checkipaddrCB(self, result, retval, extra_args): ipadd = None for line in result.splitlines(): line = line.strip() if "inet addr" in line: ipadd = line self["DhcpTestInfo"].setText(ipadd) self["DhcpTestInfo"].setForegroundColorNum(2) self["DhcpTest_OK"].setPixmapNum(0) self["DhcpTest_OK"].show() self.testTimer.start(1000) break if ipadd is None: if self.checkip is False: cmds = [] cmds.append("ifdown " + self.testiface) cmds.append("ip addr flush dev " + self.testiface) # HACK: wpa_supplicant sometimes doesn't quit properly on SIGTERM if os_path.exists('/var/run/wpa_supplicant/'+ self.testiface): cmds.append("wpa_cli -i" + self.testiface + " terminate") cmds.append("ifup " + self.testiface) self.ipConsole.eBatch(cmds, self.getipaddrCB) else: self["DhcpTestInfo"].setText("Get ip fail! ") self["DhcpTestInfo"].setForegroundColorNum(1) self["DhcpTest_OK"].setPixmapNum(1) self["DhcpTest_OK"].show() self.testTimer.start(1000) def getipaddrCB(self, result, retval=None, extra_args=None): self.checkip = True cmd = "ifconfig " + self.testiface self.ipConsole.ePopen(cmd, self.checkipaddrCB) def pingserver(self): iNetwork.checkNetworkState(self.checkNetworkCB) def checkNetworkCB(self,data): if data <= 2:#OK self["connectTestInfo"].setText(_("Network connection ok ")) self["connectTestInfo"].setForegroundColorNum(2) self["connectTest_OK"].setPixmapNum(0) self["connectTest_OK"].show() self.testitem.setTestResult(FactoryTestItem.TESTRESULT_OK) self["opreateInfo"].setText(_("Press 'OK' restart Press 'Exit' exit")) self._runing = False else: self["connectTestInfo"].setText(_("ping server fail! check network and check hardware!")) self["connectTestInfo"].setForegroundColorNum(1) self["connectTest_OK"].setPixmapNum(1) self["connectTest_OK"].show() self["opreateInfo"].setText(_("Press 'OK' restart Press 'Exit' exit")) self._runing = False if iNetwork.isWirelessInterface(self.testiface): os.system("mv "+self.interfacebackupname+" "+self.interfacename) os.system("rm "+self.wlanconfigname) def checkWifiIface(): wiressface = [] interfaces = iNetwork.getAdapterList() for face in interfaces: if iNetwork.isWirelessInterface(face): wiressface.append(face) return wiressface
openpli-arm/enigma2-arm
lib/python/Plugins/Extensions/FactoryTest/NetworkTest.py
Python
gpl-2.0
16,179
0.033191
import os path = os.path.dirname(os.path.realpath(__file__)) sbmlFilePath = os.path.join(path, 'BIOMD0000000370.xml') with open(sbmlFilePath,'r') as f: sbmlString = f.read() def module_exists(module_name): try: __import__(module_name) except ImportError: return False else: return True if module_exists('libsbml'): import libsbml sbml = libsbml.readSBMLFromString(sbmlString)
biomodels/BIOMD0000000370
BIOMD0000000370/model.py
Python
cc0-1.0
427
0.009368
#!/usr/bin/env python """ Regular Expression Matching Implement regular expression matching with support for '.' and '*'. '.' Matches any single character. '*' Matches zero or more of the preceding element. The matching should cover the entire input string (not partial). The function prototype should be: bool isMatch(const char *s, const char *p) Some examples: isMatch("aa","a") → false isMatch("aa","aa") → true isMatch("aaa","aa") → false isMatch("aa", "a*") → true isMatch("aa", ".*") → true isMatch("ab", ".*") → true isMatch("aab", "c*a*b") → true """ class Solution(object): """ O(n^2) """ def isMatch(self, s, p): """ :type s: str :type p: str :rtype: bool """ m, n = len(p), len(s) table = [[False for j in xrange(n + 1)] for i in xrange(m + 1)] table[0][0] = True # assue * is zero for i in range(2, m + 1): if p[i - 1] == '*': table[i][0] = table[i - 2][0] for i in range(1, m + 1): for j in range(1, n + 1): if p[i - 1] != '*': if p[i - 1] == '.' or p[i - 1] == s[j - 1]: table[i][j] = table[i - 1][j - 1] else: if table[i - 2][j] == True: table[i][j] = True else: table[i][j] = table[i - 1][j] if p[i - 2] == s[j - 1] or p[i - 2] == '.': table[i][j] |= table[i][j - 1] return table[-1][-1]
weixsong/algorithm
leetcode/10.py
Python
mit
1,638
0.002463
# nvprof --print-gpu-trace python examples/stream/thrust.py import cupy x = cupy.array([1, 3, 2]) expected = x.sort() cupy.cuda.Device().synchronize() stream = cupy.cuda.stream.Stream() with stream: y = x.sort() stream.synchronize() cupy.testing.assert_array_equal(y, expected) stream = cupy.cuda.stream.Stream() stream.use() y = x.sort() stream.synchronize() cupy.testing.assert_array_equal(y, expected)
cupy/cupy
examples/stream/thrust.py
Python
mit
412
0
import logging import os import site import time import typing from argparse import ArgumentParser import waitress from flask import Flask import cauldron as cd from cauldron import environ from cauldron import templating from cauldron.render.encoding import ComplexFlaskJsonEncoder from cauldron.session import writing APPLICATION = Flask('Cauldron') APPLICATION.json_encoder = ComplexFlaskJsonEncoder SERVER_VERSION = [0, 0, 1, 1] try: site_packages = list(site.getsitepackages()) except Exception: # pragma: no cover site_packages = [] active_execution_responses = dict() # type: typing.Dict[str, environ.Response] server_data = dict( version=SERVER_VERSION, user=os.environ.get('USER'), test=1, pid=os.getpid() ) authorization = {'code': ''} def get_server_data() -> dict: """...""" out = dict( uptime=environ.run_time().total_seconds(), cauldron_settings=environ.package_settings ) out.update(server_data) out.update(environ.systems.get_system_data()) return out def get_running_step_changes(write: bool = False) -> list: """...""" project = cd.project.get_internal_project() running_steps = list(filter( lambda step: step.is_running, project.steps )) def get_changes(step): step_data = writing.step_writer.serialize(step) if write: writing.save(project, step_data.file_writes) return dict( name=step.definition.name, action='updated', step=step_data._asdict(), timestamp=time.time(), written=write ) return [get_changes(step) for step in running_steps] def parse( args: typing.List[str] = None, arg_parser: ArgumentParser = None ) -> dict: """Parses the arguments for the cauldron server""" parser = arg_parser or create_parser() return vars(parser.parse_args(args)) def create_parser(arg_parser: ArgumentParser = None) -> ArgumentParser: """ Creates an argument parser populated with the arg formats for the server command. """ parser = arg_parser or ArgumentParser() parser.description = 'Cauldron kernel server' parser.add_argument( '-p', '--port', dest='port', type=int, default=5010 ) parser.add_argument( '-d', '--debug', dest='debug', default=False, action='store_true' ) parser.add_argument( '-v', '--version', dest='version', default=False, action='store_true' ) parser.add_argument( '-c', '--code', dest='authentication_code', type=str, default='' ) parser.add_argument( '-n', '--name', dest='host', type=str, default=None ) parser.add_argument( '--basic', action='store_true', help=""" When specified a basic Flask server will be used to serve the kernel instead of a waitress WSGI server. Use only when necessary as the Flask server isn't as robust. """ ) return parser def create_application( port: int = 5010, debug: bool = False, public: bool = False, host=None, authentication_code: str = '', quiet: bool = False, **kwargs ) -> dict: """...""" if kwargs.get('version'): environ.log('VERSION: {}'.format(environ.version)) return environ.systems.end(0) if host is None and public: host = '0.0.0.0' server_data['host'] = host server_data['port'] = port server_data['debug'] = debug server_data['id'] = environ.start_time.isoformat() authorization['code'] = authentication_code if authentication_code else '' if not debug: log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) if not quiet: templating.render_splash() environ.modes.add(environ.modes.INTERACTIVE) return {'application': APPLICATION, **server_data} def execute( port: int = 5010, debug: bool = False, public: bool = False, host=None, authentication_code: str = '', quiet: bool = False, **kwargs ): """...""" populated_server_data = create_application( port=port, debug=debug, public=public, host=host, authentication_code=authentication_code, quiet=quiet, **kwargs ) app = populated_server_data['application'] if kwargs.get('basic'): app.run(port=port, debug=debug, host=host) else: waitress.serve(app, port=port, host=host or 'localhost') environ.modes.remove(environ.modes.INTERACTIVE)
sernst/cauldron
cauldron/cli/server/run.py
Python
mit
4,787
0
"""User-friendly exception handler for swood.""" import http.client import traceback import sys import os __file__ = os.path.abspath(__file__) class ComplainToUser(Exception): """When used with ComplaintFormatter, tells the user what error (of theirs) caused the failure and exits.""" pass def can_submit(): if not os.path.isdir(os.path.expanduser("~/.swood")): os.mkdir(os.path.expanduser("~/.swood")) sbpath = os.path.expanduser("~/.swood/submit-bugs") if os.path.isfile(sbpath): try: with open(sbpath) as sb: resp = sb.read(1) if resp == "1": return 1 elif resp == "0": return 0 except: pass while True: resp = input( "Something went wrong. Do you want to send an anonymous bug report? (Type Y or N): ").lower() if resp in ("yes", "y", "true"): try: with open(sbpath, "w") as sb: sb.write("1") except: pass return 1 elif resp in ("no", "n", "false"): try: with open(sbpath, "w") as sb: sb.write("0") except: pass return 0 class ComplaintFormatter: """Notifies the user when the program fails predictably and uploads bug reports. When used in a with statement, ComplaintFormatter catches all exceptions. If the exception is a ComplainToUser exception, it will simply print the error message and exit (with an exit code of 1). If the exception is something else (i.e. an actual, unexpected exception), it will upload the traceback to the swood debug server (unless the user has opted out of sending bug reports.) """ def __init__(self, version=None): self.version = version def __enter__(self): pass def __exit__(self, exc_type, exc, tb): if isinstance(exc, ComplainToUser): print("Error: {}".format(exc), file=sys.stderr) sys.exit(1) elif isinstance(exc, Exception): # scrub stack of full path names for extra privacy # also normalizes the paths, helping to detect dupes scrubbed_stack = traceback.extract_tb(tb) # cut off traces of stuff that isn't ours others_cutoff = next(idx for idx, fs in enumerate(scrubbed_stack) if os.path.samefile( os.path.dirname(fs.filename), os.path.dirname(__file__))) scrubbed_stack = scrubbed_stack[others_cutoff:] # rewrite paths so they contain only relative directories # (hides username on Windows and Linux) dirstart = os.path.abspath( os.path.join(os.path.dirname(__file__), "..")) for fs in scrubbed_stack: fs.filename = os.path.relpath( fs.filename, start=dirstart).replace("\\", "/") str_tb = "Traceback (most recent call last):\n" + \ "".join(traceback.format_list(scrubbed_stack)) + \ "".join(traceback.format_exception_only(exc_type, exc)) if self.version is not None: str_tb = "# " + self.version + "\n" + str_tb if "--optout" in sys.argv or "-o" in sys.argv: print( "Something went wrong. A bug report will not be sent because of your command-line flag.", file=sys.stderr) return False elif os.environ.get("SWOOD_OPTOUT") == "1": print( "Something went wrong. A bug report will not be sent because of your environment variable.", file=sys.stderr) return False elif not can_submit(): print( "Something went wrong. A bug report will not be sent because of your config setting.", file=sys.stderr) return False else: print( "Something went wrong. A bug report will be sent to help figure it out. (see --optout)", file=sys.stderr) try: conn = http.client.HTTPSConnection("meme.institute") conn.request("POST", "/swood/bugs/submit", str_tb) resp = conn.getresponse().read().decode("utf-8") if resp == "done": print("New bug submitted!", file=sys.stderr) elif resp == "dupe": print( "This bug is already in the queue to be fixed.", file=sys.stderr) else: raise Exception except Exception: print("Submission of bug report failed.", file=sys.stderr) traceback.print_exc() return True
milkey-mouse/swood
swood/complain.py
Python
mit
4,905
0.003262
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + #pylint: disable=no-init,attribute-defined-outside-init import systemtesting from mantid.simpleapi import * from reduction_workflow.instruments.sans.sns_command_interface import * from reduction_workflow.instruments.sans.hfir_command_interface import * FILE_LOCATION = "/SNS/EQSANS/IPTS-5636/data/" class EQSANSFlatTest(systemtesting.MantidSystemTest): def requiredFiles(self): files = [] files.append(FILE_LOCATION+"EQSANS_5704_event.nxs") files.append(FILE_LOCATION+"EQSANS_5734_event.nxs") files.append(FILE_LOCATION+"EQSANS_5732_event.nxs") files.append(FILE_LOCATION+"EQSANS_5738_event.nxs") files.append(FILE_LOCATION+"EQSANS_5729_event.nxs") files.append(FILE_LOCATION+"EQSANS_5737_event.nxs") files.append(FILE_LOCATION+"EQSANS_5703_event.nxs") files.append("bl6_flux_at_sample") return files def runTest(self): """ System test for EQSANS. This test is meant to be run at SNS and takes a long time. It is used to verify that the complete reduction chain works and reproduces reference results. """ configI = ConfigService.Instance() configI["facilityName"]='SNS' EQSANS() SolidAngle() DarkCurrent(FILE_LOCATION+"EQSANS_5704_event.nxs") TotalChargeNormalization(beam_file="bl6_flux_at_sample") AzimuthalAverage(n_bins=100, n_subpix=1, log_binning=False) IQxQy(nbins=100) UseConfigTOFTailsCutoff(True) PerformFlightPathCorrection(True) UseConfigMask(True) SetBeamCenter(89.6749, 129.693) SensitivityCorrection(FILE_LOCATION+'EQSANS_5703_event.nxs', min_sensitivity=0.5, max_sensitivity=1.5, use_sample_dc=True) DirectBeamTransmission(FILE_LOCATION+"EQSANS_5734_event.nxs", FILE_LOCATION+"EQSANS_5738_event.nxs", beam_radius=3) ThetaDependentTransmission(False) AppendDataFile([FILE_LOCATION+"EQSANS_5729_event.nxs"]) CombineTransmissionFits(True) Background(FILE_LOCATION+"EQSANS_5732_event.nxs") BckDirectBeamTransmission(FILE_LOCATION+"EQSANS_5737_event.nxs", FILE_LOCATION+"EQSANS_5738_event.nxs", beam_radius=3) BckThetaDependentTransmission(False) BckCombineTransmissionFits(True) SaveIqAscii(process='None') SetAbsoluteScale(277.781) Reduce1D() # This reference is old, ignore the first non-zero point and # give the comparison a reasonable tolerance (less than 0.5%). mtd['EQSANS_5729_event_frame1_Iq'].dataY(0)[1] = 856.30028119108 def validate(self): self.tolerance = 5.0 self.disableChecking.append('Instrument') self.disableChecking.append('Sample') self.disableChecking.append('SpectraMap') self.disableChecking.append('Axes') return "EQSANS_5729_event_frame1_Iq", 'EQSANSFlatTest.nxs'
mganeva/mantid
Testing/SystemTests/tests/analysis/EQSANSFlatTestAPIv2.py
Python
gpl-3.0
3,330
0.001201
"""Functions for workloads.""" from utils.conf import cfme_performance def get_capacity_and_utilization_replication_scenarios(): if 'test_cap_and_util_rep' in cfme_performance.get('tests', {}).get('workloads', []): if (cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios'] and len( cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios']) > 0): # Add Replication Master into Scenario(s): for scn in cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios']: scn['replication_master'] = cfme_performance['replication_master'] return cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios'] return [] def get_capacity_and_utilization_scenarios(): if 'test_cap_and_util' in cfme_performance.get('tests', {}).get('workloads', []): if (cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios'] and len(cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios'] return [] def get_idle_scenarios(): if 'test_idle' in cfme_performance.get('tests', {}).get('workloads', []): if(cfme_performance['tests']['workloads']['test_idle']['scenarios'] and len(cfme_performance['tests']['workloads']['test_idle']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_idle']['scenarios'] return [] def get_provisioning_scenarios(): if 'test_provisioning' in cfme_performance.get('tests', {}).get('workloads', []): if(cfme_performance['tests']['workloads']['test_provisioning']['scenarios'] and len(cfme_performance['tests']['workloads']['test_provisioning']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_provisioning']['scenarios'] return [] def get_refresh_providers_scenarios(): if 'test_refresh_providers' in cfme_performance.get('tests', {}).get('workloads', []): if (cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios'] and len( cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios'] return [] def get_refresh_vms_scenarios(): if 'test_refresh_vms' in cfme_performance.get('tests', {}).get('workloads', []): if (cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios'] and len(cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios'] return [] def get_smartstate_analysis_scenarios(): if 'test_smartstate' in cfme_performance.get('tests', {}).get('workloads', []): if(cfme_performance['tests']['workloads']['test_smartstate']['scenarios'] and len(cfme_performance['tests']['workloads']['test_smartstate']['scenarios']) > 0): return cfme_performance['tests']['workloads']['test_smartstate']['scenarios'] return [] def get_ui_single_page_scenarios(): if 'test_ui_single_page' in cfme_performance.get('tests', {}).get('ui_workloads', []): if(cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios'] and len(cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios']) > 0): return cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios'] return []
dajohnso/cfme_tests
utils/workloads.py
Python
gpl-2.0
3,720
0.008602
# -*- coding: utf-8 -*- # # This class was auto-generated from the API references found at # https://epayments-api.developer-ingenico.com/s2sapi/v1/ # from ingenico.connect.sdk.data_object import DataObject from ingenico.connect.sdk.domain.payment.definitions.customer_account_authentication import CustomerAccountAuthentication from ingenico.connect.sdk.domain.payment.definitions.customer_payment_activity import CustomerPaymentActivity from ingenico.connect.sdk.domain.payment.definitions.payment_account_on_file import PaymentAccountOnFile class CustomerAccount(DataObject): """ | Object containing data related to the account the customer has with you """ __authentication = None __change_date = None __changed_during_checkout = None __create_date = None __had_suspicious_activity = None __has_forgotten_password = None __has_password = None __password_change_date = None __password_changed_during_checkout = None __payment_account_on_file = None __payment_account_on_file_type = None __payment_activity = None @property def authentication(self): """ | Object containing data on the authentication used by the customer to access their account Type: :class:`ingenico.connect.sdk.domain.payment.definitions.customer_account_authentication.CustomerAccountAuthentication` """ return self.__authentication @authentication.setter def authentication(self, value): self.__authentication = value @property def change_date(self): """ | The last date (YYYYMMDD) on which the customer made changes to their account with you. These are changes to billing & shipping address details, new payment account (tokens), or new users(s) added. Type: str """ return self.__change_date @change_date.setter def change_date(self, value): self.__change_date = value @property def changed_during_checkout(self): """ | true = the customer made changes to their account during this checkout | false = the customer didn't change anything to their account during this checkout/n | The changes ment here are changes to billing & shipping address details, new payment account (tokens), or new users(s) added. Type: bool """ return self.__changed_during_checkout @changed_during_checkout.setter def changed_during_checkout(self, value): self.__changed_during_checkout = value @property def create_date(self): """ | The date (YYYYMMDD) on which the customer created their account with you Type: str """ return self.__create_date @create_date.setter def create_date(self, value): self.__create_date = value @property def had_suspicious_activity(self): """ | Specifies if you have experienced suspicious activity on the account of the customer | true = you have experienced suspicious activity (including previous fraud) on the customer account used for this transaction | false = you have experienced no suspicious activity (including previous fraud) on the customer account used for this transaction Type: bool """ return self.__had_suspicious_activity @had_suspicious_activity.setter def had_suspicious_activity(self, value): self.__had_suspicious_activity = value @property def has_forgotten_password(self): """ | Specifies if the customer (initially) had forgotten their password * true - The customer has forgotten their password * false - The customer has not forgotten their password Type: bool """ return self.__has_forgotten_password @has_forgotten_password.setter def has_forgotten_password(self, value): self.__has_forgotten_password = value @property def has_password(self): """ | Specifies if the customer entered a password to gain access to an account registered with the you * true - The customer has used a password to gain access * false - The customer has not used a password to gain access Type: bool """ return self.__has_password @has_password.setter def has_password(self, value): self.__has_password = value @property def password_change_date(self): """ | The last date (YYYYMMDD) on which the customer changed their password for the account used in this transaction Type: str """ return self.__password_change_date @password_change_date.setter def password_change_date(self, value): self.__password_change_date = value @property def password_changed_during_checkout(self): """ | Indicates if the password of an account is changed during this checkout | true = the customer made changes to their password of the account used during this checkout | alse = the customer didn't change anything to their password of the account used during this checkout Type: bool """ return self.__password_changed_during_checkout @password_changed_during_checkout.setter def password_changed_during_checkout(self, value): self.__password_changed_during_checkout = value @property def payment_account_on_file(self): """ | Object containing information on the payment account data on file (tokens) Type: :class:`ingenico.connect.sdk.domain.payment.definitions.payment_account_on_file.PaymentAccountOnFile` """ return self.__payment_account_on_file @payment_account_on_file.setter def payment_account_on_file(self, value): self.__payment_account_on_file = value @property def payment_account_on_file_type(self): """ | Indicates the type of account. For example, for a multi-account card product. * not-applicable = the card used doesn't support multiple card products * credit = the card used is a credit card * debit = the card used is a debit card Type: str """ return self.__payment_account_on_file_type @payment_account_on_file_type.setter def payment_account_on_file_type(self, value): self.__payment_account_on_file_type = value @property def payment_activity(self): """ | Object containing data on the purchase history of the customer with you Type: :class:`ingenico.connect.sdk.domain.payment.definitions.customer_payment_activity.CustomerPaymentActivity` """ return self.__payment_activity @payment_activity.setter def payment_activity(self, value): self.__payment_activity = value def to_dictionary(self): dictionary = super(CustomerAccount, self).to_dictionary() if self.authentication is not None: dictionary['authentication'] = self.authentication.to_dictionary() if self.change_date is not None: dictionary['changeDate'] = self.change_date if self.changed_during_checkout is not None: dictionary['changedDuringCheckout'] = self.changed_during_checkout if self.create_date is not None: dictionary['createDate'] = self.create_date if self.had_suspicious_activity is not None: dictionary['hadSuspiciousActivity'] = self.had_suspicious_activity if self.has_forgotten_password is not None: dictionary['hasForgottenPassword'] = self.has_forgotten_password if self.has_password is not None: dictionary['hasPassword'] = self.has_password if self.password_change_date is not None: dictionary['passwordChangeDate'] = self.password_change_date if self.password_changed_during_checkout is not None: dictionary['passwordChangedDuringCheckout'] = self.password_changed_during_checkout if self.payment_account_on_file is not None: dictionary['paymentAccountOnFile'] = self.payment_account_on_file.to_dictionary() if self.payment_account_on_file_type is not None: dictionary['paymentAccountOnFileType'] = self.payment_account_on_file_type if self.payment_activity is not None: dictionary['paymentActivity'] = self.payment_activity.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CustomerAccount, self).from_dictionary(dictionary) if 'authentication' in dictionary: if not isinstance(dictionary['authentication'], dict): raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['authentication'])) value = CustomerAccountAuthentication() self.authentication = value.from_dictionary(dictionary['authentication']) if 'changeDate' in dictionary: self.change_date = dictionary['changeDate'] if 'changedDuringCheckout' in dictionary: self.changed_during_checkout = dictionary['changedDuringCheckout'] if 'createDate' in dictionary: self.create_date = dictionary['createDate'] if 'hadSuspiciousActivity' in dictionary: self.had_suspicious_activity = dictionary['hadSuspiciousActivity'] if 'hasForgottenPassword' in dictionary: self.has_forgotten_password = dictionary['hasForgottenPassword'] if 'hasPassword' in dictionary: self.has_password = dictionary['hasPassword'] if 'passwordChangeDate' in dictionary: self.password_change_date = dictionary['passwordChangeDate'] if 'passwordChangedDuringCheckout' in dictionary: self.password_changed_during_checkout = dictionary['passwordChangedDuringCheckout'] if 'paymentAccountOnFile' in dictionary: if not isinstance(dictionary['paymentAccountOnFile'], dict): raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['paymentAccountOnFile'])) value = PaymentAccountOnFile() self.payment_account_on_file = value.from_dictionary(dictionary['paymentAccountOnFile']) if 'paymentAccountOnFileType' in dictionary: self.payment_account_on_file_type = dictionary['paymentAccountOnFileType'] if 'paymentActivity' in dictionary: if not isinstance(dictionary['paymentActivity'], dict): raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['paymentActivity'])) value = CustomerPaymentActivity() self.payment_activity = value.from_dictionary(dictionary['paymentActivity']) return self
Ingenico-ePayments/connect-sdk-python2
ingenico/connect/sdk/domain/payment/definitions/customer_account.py
Python
mit
10,943
0.005209
from sympy import Basic from sympy.printing.mathml import mathml import tempfile import os def print_gtk(x, start_viewer=True): """Print to Gtkmathview, a gtk widget capable of rendering MathML. Needs libgtkmathview-bin""" from sympy.utilities.mathml import c2p tmp = tempfile.mktemp() # create a temp file to store the result file = open(tmp, 'wb') file.write( c2p(mathml(x), simple=True) ) file.close() if start_viewer: os.system("mathmlviewer " + tmp)
hazelnusse/sympy-old
sympy/printing/gtk.py
Python
bsd-3-clause
498
0.008032
"""Add rtp_task_multiple_process_event table Revision ID: 5feda4ca9935 Revises: 9d9af47e64c8 Create Date: 2021-09-30 16:22:30.118641+00:00 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = "5feda4ca9935" down_revision = "9d9af47e64c8" branch_labels = None depends_on = None def upgrade(): op.create_table( "rtp_task_multiple_process_event", sa.Column("time", sa.BigInteger(), nullable=False), sa.Column("obsid_start", sa.BigInteger(), nullable=False), sa.Column("task_name", sa.Text(), nullable=False), sa.Column( "event", sa.Enum( "started", "finished", "error", name="rtp_task_multiple_process_enum" ), nullable=False, ), sa.ForeignKeyConstraint( ["obsid_start"], ["hera_obs.obsid"], ), sa.PrimaryKeyConstraint("time", "obsid_start", "task_name"), ) def downgrade(): op.drop_table("rtp_task_multiple_process_event")
HERA-Team/hera_mc
alembic/versions/5feda4ca9935_add_rtp_task_multiple_process_event_table.py
Python
bsd-2-clause
1,093
0.000915
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class AzureFirewallsOperations(object): """AzureFirewallsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2019_08_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _delete_initial( self, resource_group_name, # type: str azure_firewall_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str azure_firewall_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified Azure Firewall. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param azure_firewall_name: The name of the Azure Firewall. :type azure_firewall_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def get( self, resource_group_name, # type: str azure_firewall_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.AzureFirewall" """Gets the specified Azure Firewall. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param azure_firewall_name: The name of the Azure Firewall. :type azure_firewall_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AzureFirewall, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('AzureFirewall', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def _create_or_update_initial( self, resource_group_name, # type: str azure_firewall_name, # type: str parameters, # type: "_models.AzureFirewall" **kwargs # type: Any ): # type: (...) -> "_models.AzureFirewall" cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'AzureFirewall') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('AzureFirewall', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('AzureFirewall', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str azure_firewall_name, # type: str parameters, # type: "_models.AzureFirewall" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.AzureFirewall"] """Creates or updates the specified Azure Firewall. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param azure_firewall_name: The name of the Azure Firewall. :type azure_firewall_name: str :param parameters: Parameters supplied to the create or update Azure Firewall operation. :type parameters: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either AzureFirewall or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.AzureFirewall] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AzureFirewall', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def update_tags( self, resource_group_name, # type: str azure_firewall_name, # type: str parameters, # type: "_models.AzureFirewall" **kwargs # type: Any ): # type: (...) -> "_models.AzureFirewall" """Updates tags for an Azure Firewall resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param azure_firewall_name: The name of the Azure Firewall. :type azure_firewall_name: str :param parameters: Parameters supplied to the create or update Azure Firewall operation. :type parameters: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall :keyword callable cls: A custom type or function that will be passed the direct response :return: AzureFirewall, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'AzureFirewall') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('AzureFirewall', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore def list( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.AzureFirewallListResult"] """Lists all Azure Firewalls in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AzureFirewallListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.AzureFirewallListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('AzureFirewallListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore def list_all( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.AzureFirewallListResult"] """Gets all the Azure Firewalls in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AzureFirewallListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.AzureFirewallListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('AzureFirewallListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_azure_firewalls_operations.py
Python
mit
26,909
0.004645
#!/usr/bin/env python # -*- coding: utf-8 -*- # from httpwookiee.config import ConfigFactory from httpwookiee.core.tools import Tools, outmsg, inmsg from httpwookiee.http.parser.responses import Responses import socket import ipaddress import ssl import six class ClosedSocketError(Exception): """Raise this when the tcp/ip connection is unexpectedly closed.""" class Client(object): """Main HTTP Client, HTTP request launcher.""" hostip = None port = None host = b'' https = False _sock = None _hostip = False def __init__(self, host=None, port=None, hostip=None): """Ensure settings are ready.""" self.config = ConfigFactory.getConfig() if host is None: self.host = self.config.get('SERVER_HOST') else: self.host = host if port is None: self.port = self.config.getint('SERVER_PORT') else: self.port = port self.hostip = hostip if self.hostip is None and '' != self.config.get('SERVER_IP'): self.hostip = self.config.get('SERVER_IP') self.https = self.config.getboolean('SERVER_SSL') self._sock = None def __enter__(self): """Launch the socket opening.""" self.open() return self def __exit__(self, exc_type, exc_val, exc_tb): """Send a socket close.""" return self.close() def open(self): """Open client socket connection.""" if self.hostip is None: outmsg('# searching host IP (DNS) for {0} '.format(self.host)) self.hostip = socket.getaddrinfo(self.host, self.port)[0][4][0] self._ci() try: if not self._hostip: raise Exception(u'\u0262\u0046\u0059') outmsg( '# Connecting to Host: {0} IP: {1} PORT: {2}'.format( self.host, self.hostip, self.port)) self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.settimeout(10) except socket.error as msg: outmsg("[ERROR] {0}".format(str(msg))) raise Exception('error creating socket') outmsg('# socket ok') if self.https: try: outmsg('# Establishing SSL layer') self._sock = ssl.wrap_socket(self._sock, cert_reqs=ssl.CERT_NONE) except: outmsg("[SSL ERROR]") raise Exception('error establishing SSL connection') try: self._sock.connect((self.hostip, self.port)) except socket.error as msg: outmsg("[ERROR] {0}".format(str(msg))) raise Exception('error establishing socket connect') outmsg('# client connection established.') def close_sending(self): """First closing step, cut the sending part of the socket.""" try: outmsg('# closing client connection send canal ' '(can still receive).') self._sock.shutdown(socket.SHUT_WR) except OSError: raise ClosedSocketError('closed socket detected on send close') def close(self): """Ensure the tcp/ip socket is really closed.""" if self._sock is not None: outmsg('# closing client connection.') try: self._sock.shutdown(socket.SHUT_RDWR) except Exception: # already closed pass self._sock.close() self._sock = None def _ci(self): self._hostip = ipaddress.ip_address(self.hostip).is_private def send(self, request): """Send given request on the socket, support delayed emission.""" msg = request.getBytesStream() msglen = len(msg) outmsg('# SENDING ({0}) =====>'.format(msglen)) # here we use the not-so real format (special bytes are not # replaced in str(), only in getBytesStream()) Tools.print_message(six.text_type(request), cleanup=True) try: self._socket_send(msg) except socket.error as errmsg: outmsg('#<====ABORTED COMMUNICATION WHILE' ' SENDING {0}\n#{1}'.format(six.text_type(msg), errmsg)) return while request.is_delayed: msg = request.getDelayedOutput() msglen = len(msg) outmsg('# SENDING Delayed ({0}) =====>'.format(msglen)) # hopefully we do not use strange bytes in delayed chunks for now Tools.print_message(six.text_type(msg), cleanup=True) try: self._socket_send(msg) except socket.error as errmsg: outmsg('#<====ABORTED COMMUNICATION WHILE' ' SENDING (delayed) ' '{0}\r\n#{1}'.format(six.text_type(msg), errmsg)) return def read_all(self, timeout=None, buffsize=None): """Read all the stream, waiting for EOS, return all responses.""" output = '' if timeout is None: timeout = float(self.config.getint( 'CLIENT_SOCKET_READ_TIMEOUT_MS')) timeout = timeout / 1000 if buffsize is None: buffsize = self.config.getint('CLIENT_SOCKET_READ_SIZE') try: output = self._socket_read(timeout, buffsize) except socket.error as msg: inmsg('#<====ABORTED RESPONSE WHILE READING: {0}'.format(str(msg))) inmsg('# <====FINAL RESPONSE===============') inmsg(output) responses = Responses().parse(output) return responses def _socket_send(self, message): msglen = len(message) totalsent = 0 outmsg('# ====================>') while totalsent < msglen: outmsg('# ...') sent = self._sock.send(message[totalsent:]) if sent == 0: raise RuntimeError("socket connection broken") totalsent = totalsent + sent def _socket_read(self, timeout, buffsize): inmsg('# <==== READING <===========') read = b'' # we use blocking socket, set short timeouts if you want # to detect end of response streams if 0 == timeout: self._sock.settimeout(None) else: self._sock.settimeout(timeout) try: # blocking read data = self._sock.recv(buffsize) while (len(data)): inmsg('# ...') read += data data = self._sock.recv(buffsize) except socket.timeout: inmsg('# read timeout({0}), nothing more is coming'.format( timeout)) return read
regilero/HTTPWookiee
httpwookiee/http/client.py
Python
gpl-3.0
6,832
0.000146
from nose.tools import * # flake8: noqa from api.base import settings from tests.base import ApiTestCase # The versions below are specifically for testing purposes and do not reflect the actual versioning of the API. # If changes are made to this list, or to DEFAULT_VERSION, please reflect those changes in: # api/base/settings/local-travis.py so that travis tests will pass. TESTING_ALLOWED_VERSIONS = ( '2.0', '2.0.1', '2.1', '2.2', '3.0', '3.0.1', ) DEFAULT_VERSION = '2.0' class VersioningTestCase(ApiTestCase): def setUp(self): super(VersioningTestCase, self).setUp() self.valid_url_path_version = '2.0' self.valid_header_version = '2.0.1' self.valid_query_parameter_version = '2.1' self.invalid_url_path_version = '1.0' self.invalid_header_version = '1.0.1' self.invalid_query_parameter_version = '1.1' self.valid_url_path_version_url = '/v2/' self.invalid_url_path_version_url = '/v1/' self.valid_query_parameter_version_url = '/v2/?version={}'.format(self.valid_query_parameter_version) self.invalid_query_parameter_version_url = '/v2/?version={}'.format(self.invalid_query_parameter_version) self._ALLOWED_VERSIONS = settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] self._DEFAULT_VERSION = settings.REST_FRAMEWORK['DEFAULT_VERSION'] settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = TESTING_ALLOWED_VERSIONS settings.REST_FRAMEWORK['DEFAULT_VERSION'] = DEFAULT_VERSION def tearDown(self): super(VersioningTestCase, self).tearDown() settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = self._ALLOWED_VERSIONS settings.REST_FRAMEWORK['DEFAULT_VERSION'] = self._DEFAULT_VERSION class TestBaseVersioning(VersioningTestCase): def setUp(self): super(TestBaseVersioning, self).setUp() def test_url_path_version(self): res = self.app.get(self.valid_url_path_version_url) assert_equal(res.status_code, 200) assert_equal(res.json['meta']['version'], self.valid_url_path_version) def test_header_version(self): headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)} res = self.app.get(self.valid_url_path_version_url, headers=headers) assert_equal(res.status_code, 200) assert_equal(res.json['meta']['version'], self.valid_header_version) def test_query_param_version(self): res = self.app.get(self.valid_query_parameter_version_url) assert_equal(res.status_code, 200) assert_equal(res.json['meta']['version'], self.valid_query_parameter_version) def test_url_path_version_not_in_allowed_versions(self): res = self.app.get(self.invalid_url_path_version_url, expect_errors=True) assert_equal(res.status_code, 404) def test_header_version_not_in_allowed_versions(self): headers = {'accept': 'application/vnd.api+json;version={}'.format(self.invalid_header_version)} res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True) assert_equal(res.status_code, 406) assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.') def test_query_param_version_not_in_allowed_versions(self): res = self.app.get(self.invalid_query_parameter_version_url, expect_errors=True) assert_equal(res.status_code, 404) assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.') def test_query_parameter_version_not_within_url_path_major_version(self): url = '/v2/?version=3.0.1' res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 409) assert_equal( res.json['errors'][0]['detail'], 'Version {} specified in query parameter does not fall within URL path version {}'.format( '3.0.1', self.valid_url_path_version ) ) def test_header_version_not_within_url_path_major_version(self): headers = {'accept': 'application/vnd.api+json;version=3.0.1'} res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True) assert_equal(res.status_code, 409) assert_equal( res.json['errors'][0]['detail'], 'Version {} specified in "Accept" header does not fall within URL path version {}'.format( '3.0.1', self.valid_url_path_version ) ) def test_header_version_and_query_parameter_version_match(self): headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)} url = '/v2/?version={}'.format(self.valid_header_version) res = self.app.get(url, headers=headers) assert_equal(res.status_code, 200) assert_equal(res.json['meta']['version'], self.valid_header_version) def test_header_version_and_query_parameter_version_mismatch(self): headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)} url = '/v2/?version={}'.format(self.valid_query_parameter_version) res = self.app.get(url, headers=headers, expect_errors=True) assert_equal(res.status_code, 409) assert_equal( res.json['errors'][0]['detail'], 'Version {} specified in "Accept" header does not match version {} specified in query parameter'.format( self.valid_header_version, self.valid_query_parameter_version ) ) def test_header_version_bad_format(self): headers = {'accept': 'application/vnd.api+json;version=not_at_all_a_version'} res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True) assert_equal(res.status_code, 406) assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.') def test_query_version_bad_format(self): url = '/v2/?version=not_at_all_a_version' res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 404) assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
acshi/osf.io
api_tests/base/test_versioning.py
Python
apache-2.0
6,291
0.003656
from django.conf import settings as dj_settings from django.db import models, transaction from django.core.signals import got_request_exception from django.http import Http404 from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ from djangodblog import settings from djangodblog.manager import DBLogManager, ErrorBatchManager from djangodblog.utils import JSONDictField from djangodblog.helpers import construct_checksum import datetime import warnings import logging import sys try: from idmapper.models import SharedMemoryModel as Model except ImportError: Model = models.Model logger = logging.getLogger('dblog') __all__ = ('Error', 'ErrorBatch') LOG_LEVELS = ( (logging.INFO, _('info')), (logging.WARNING, _('warning')), (logging.DEBUG, _('debug')), (logging.ERROR, _('error')), (logging.FATAL, _('fatal')), ) STATUS_LEVELS = ( (0, _('unresolved')), (1, _('resolved')), ) class ErrorBase(Model): logger = models.CharField(max_length=64, blank=True, default='root', db_index=True) class_name = models.CharField(_('type'), max_length=128, blank=True, null=True, db_index=True) level = models.PositiveIntegerField(choices=LOG_LEVELS, default=logging.ERROR, blank=True, db_index=True) message = models.TextField() traceback = models.TextField(blank=True, null=True) url = models.URLField(null=True, blank=True) server_name = models.CharField(max_length=128, db_index=True) checksum = models.CharField(max_length=32, db_index=True) objects = DBLogManager() class Meta: abstract = True def get_absolute_url(self): return self.url def shortened_url(self): if not self.url: return _('no data') url = self.url if len(url) > 60: url = url[:60] + '...' return url shortened_url.short_description = _('url') shortened_url.admin_order_field = 'url' def full_url(self): return self.data.get('url') or self.url full_url.short_description = _('url') full_url.admin_order_field = 'url' def error(self): message = smart_unicode(self.message) if len(message) > 100: message = message[:97] + '...' if self.class_name: return "%s: %s" % (self.class_name, message) return message error.short_description = _('error') def description(self): return self.traceback or '' description.short_description = _('description') class ErrorBatch(ErrorBase): # XXX: We're using the legacy column for `is_resolved` for status status = models.PositiveIntegerField(default=0, db_column="is_resolved", choices=STATUS_LEVELS) times_seen = models.PositiveIntegerField(default=1) last_seen = models.DateTimeField(default=datetime.datetime.now, db_index=True) first_seen = models.DateTimeField(default=datetime.datetime.now, db_index=True) objects = ErrorBatchManager() class Meta: unique_together = (('logger', 'server_name', 'checksum'),) verbose_name_plural = _('summaries') verbose_name = _('summary') def __unicode__(self): return "(%s) %s: %s" % (self.times_seen, self.class_name, self.error()) def natural_key(self): return (self.logger, self.server_name, self.checksum) @staticmethod @transaction.commit_on_success def handle_exception(sender, request=None, **kwargs): try: exc_type, exc_value, traceback = sys.exc_info() if not settings.CATCH_404_ERRORS \ and issubclass(exc_type, Http404): return if dj_settings.DEBUG or getattr(exc_type, 'skip_dblog', False): return if transaction.is_dirty(): transaction.rollback() if request: data = dict( META=request.META, POST=request.POST, GET=request.GET, COOKIES=request.COOKIES, ) else: data = dict() extra = dict( url=request and request.build_absolute_uri() or None, data=data, ) if settings.USE_LOGGING: logging.getLogger('dblog').critical(exc_value, exc_info=sys.exc_info(), extra=extra) else: Error.objects.create_from_exception(**extra) except Exception, exc: try: logger.exception(u'Unable to process log entry: %s' % (exc,)) except Exception, exc: warnings.warn(u'Unable to process log entry: %s' % (exc,)) class Error(ErrorBase): datetime = models.DateTimeField(default=datetime.datetime.now, db_index=True) data = JSONDictField(blank=True, null=True) class Meta: verbose_name = _('message') verbose_name_plural = _('messages') def __unicode__(self): return "%s: %s" % (self.class_name, smart_unicode(self.message)) def save(self, *args, **kwargs): if not self.checksum: self.checksum = construct_checksum(self) super(Error, self).save(*args, **kwargs) got_request_exception.connect(ErrorBatch.handle_exception)
alvinkatojr/django-db-log
djangodblog/models.py
Python
bsd-3-clause
5,442
0.006248
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class PyNestAsyncio(PythonPackage): """Patch asyncio to allow nested event loops.""" homepage = "https://github.com/erdewit/nest_asyncio" url = "https://pypi.io/packages/source/n/nest-asyncio/nest_asyncio-1.4.0.tar.gz" version('1.4.0', sha256='5773054bbc14579b000236f85bc01ecced7ffd045ec8ca4a9809371ec65a59c8') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build')
iulian787/spack
var/spack/repos/builtin/packages/py-nest-asyncio/package.py
Python
lgpl-2.1
633
0.004739
#! /usr/bin/env python ''' Generates Inkscape SVG file containing box components needed to create several different types of laser cut tabbed boxes. Derived from original version authored by elliot white - [email protected] This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' __version__ = "0.1" ### please report bugs at https://github.com/zackurtz/box-maker/issues ### import sys import inkex, simplestyle, gettext _ = gettext.gettext def drawS(XYstring): # Draw lines from a list name='part' style = { 'stroke': '#000000', 'fill': 'none' } drw = { 'style':simplestyle.formatStyle(style), inkex.addNS('label','inkscape'):name, 'd':XYstring} inkex.etree.SubElement(parent, inkex.addNS('path','svg'), drw ) return class BoxMaker(inkex.Effect): def __init__(self): # Call the base class constructor. inkex.Effect.__init__(self) # Define options self.OptionParser.add_option('--unit',action='store',type='string', dest='unit',default='mm',help='Measure Units') self.OptionParser.add_option('--inside',action='store',type='int', dest='inside',default=0,help='Int/Ext Dimension') self.OptionParser.add_option('--length',action='store',type='float', dest='length',default=100,help='Length of Box') self.OptionParser.add_option('--width',action='store',type='float', dest='width',default=100,help='Width of Box') self.OptionParser.add_option('--depth',action='store',type='float', dest='height',default=100,help='Height of Box') self.OptionParser.add_option('--tab',action='store',type='float', dest='tab',default=25,help='Nominal Tab Width') self.OptionParser.add_option('--equal',action='store',type='int', dest='equal',default=0,help='Equal/Prop Tabs') self.OptionParser.add_option('--thickness',action='store',type='float', dest='thickness',default=10,help='Thickness of Material') self.OptionParser.add_option('--kerf',action='store',type='float', dest='kerf',default=0.5,help='Kerf (width) of cut') self.OptionParser.add_option('--clearance',action='store',type='float', dest='clearance',default=0.01,help='Clearance of joints') self.OptionParser.add_option('--style',action='store',type='int', dest='style',default=25,help='Layout/Style') self.OptionParser.add_option('--spacing',action='store',type='float', dest='spacing',default=25,help='Part Spacing') def tabbed_side(self, (rx,ry), (sox,soy), (eox,eoy), tabVec, length, (dirx,diry), isTab): # root startOffset endOffset tabVec length direction isTab num_divisions = int(length/nomTab) # divisions if num_divisions % 2 == 0: num_divisions -= 1 # make divs odd num_divisions = float(num_divisions) tabs = (num_divisions-1)/2 # tabs for side if equalTabs: gapWidth = tabWidth = length/num_divisions else: tabWidth = nomTab gapWidth = (length-tabs*nomTab)/(num_divisions-tabs) # kerf correction if isTab: gapWidth -= correction tabWidth += correction first = correction/2 else: gapWidth += correction tabWidth -= correction first =- correction/2 s = [] firstVec = 0 secondVec = tabVec # used to select operation on x or y dirxN = 0 if dirx else 1 diryN = 0 if diry else 1 (Vx, Vy) = (rx+sox*self.thickness,ry+soy*self.thickness) s = 'M ' + str(Vx) + ',' + str(Vy) + ' ' if dirxN: Vy = ry # set correct line start if diryN: Vx = rx # generate line as tab or hole using: # last co-ord:Vx,Vy ; tab dir:tabVec ; direction:dirx,diry ; thickness:thickness # divisions:num_divisions ; gap width:gapWidth ; tab width:tabWidth for n in range(1, int(num_divisions)): if n % 2 == 1: Vx = Vx + dirx*gapWidth + dirxN*firstVec + first*dirx Vy = Vy + diry*gapWidth + diryN*firstVec + first*diry s += 'L ' + str(Vx) + ',' + str(Vy) + ' ' Vx = Vx + dirxN*secondVec Vy = Vy + diryN*secondVec s += 'L ' + str(Vx) + ',' + str(Vy) + ' ' else: Vx = Vx+dirx*tabWidth+dirxN*firstVec Vy = Vy+diry*tabWidth+diryN*firstVec s += 'L ' + str(Vx) + ',' + str(Vy) + ' ' Vx = Vx + dirxN*secondVec Vy = Vy + diryN*secondVec s += 'L ' + str(Vx) + ',' + str(Vy) + ' ' (secondVec,firstVec) = (-secondVec,-firstVec) # swap tab direction first = 0 s += 'L ' + str(rx+eox*self.thickness+dirx*length) + ',' + str(ry+eoy*self.thickness+diry*length) + ' ' return s def flat_side(self, root, start_offset, end_offset, direction, length): current_x = root[0] + start_offset[0]*self.thickness current_y = root[1] + start_offset[1]*self.thickness draw_cmd = 'M' + str(current_x) + ',' + str(current_y) + ' ' draw_cmd += 'L ' + str(root[0] + end_offset[0]*self.thickness+direction[0]*length) + ',' + str(root[1] + end_offset[1]*self.thickness+direction[1]*length) + ' ' return draw_cmd def draw_pieces(self, pieces, thickness, spacing): for piece in pieces: # generate and draw each piece of the box (xs,xx,xy,xz) = piece[0] (ys,yx,yy,yz) = piece[1] x = xs*spacing + xx*self.x_dim + xy*self.y_dim + xz*self.z_dim # root x co-ord for piece y = ys*spacing + yx*self.x_dim +yy*self.y_dim + yz*self.z_dim # root y co-ord for piece dx = piece[2] dy = piece[3] tabs = piece[4] # extract tab status for each side a = tabs>>3 & 1 b= tabs>>2 & 1 c= tabs>>1 & 1 d= tabs & 1 # generate and draw the sides of each piece drawS(self.tabbed_side((x,y), (d,a), (-b,a), -thickness if a else thickness, dx, (1,0), a)) # side a drawS(self.tabbed_side((x+dx,y), (-b,a), (-b,-c), thickness if b else -thickness, dy, (0,1), b)) # side b drawS(self.tabbed_side((x+dx,y+dy), (-b,-c), (d,-c), thickness if c else -thickness, dx, (-1,0), c)) # side c drawS(self.tabbed_side((x,y+dy), (d,-c), (d,a), -thickness if d else thickness, dy, (0,-1), d)) # side d def effect(self): global parent, nomTab, equalTabs, correction # Get access to main SVG document element and get its dimensions. svg = self.document.getroot() # Get the attibutes: widthDoc = inkex.unittouu(svg.get('width')) heightDoc = inkex.unittouu(svg.get('height')) # Create a new layer. layer = inkex.etree.SubElement(svg, 'g') layer.set(inkex.addNS('label', 'inkscape'), 'newlayer') layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer') parent = self.current_layer # Get script's option values. unit = self.options.unit inside = self.options.inside self.x_dim = inkex.unittouu( str(self.options.length) + unit ) self.y_dim = inkex.unittouu( str(self.options.width) + unit ) self.z_dim = inkex.unittouu( str(self.options.height) + unit ) thickness = inkex.unittouu( str(self.options.thickness) + unit ) nomTab = inkex.unittouu( str(self.options.tab) + unit ) equalTabs = self.options.equal kerf = inkex.unittouu( str(self.options.kerf) + unit ) clearance = inkex.unittouu( str(self.options.clearance) + unit ) layout = self.options.style spacing = inkex.unittouu( str(self.options.spacing) + unit ) self.thickness = thickness if inside: # convert inside dimension to outside dimension self.x_dim += thickness*2 self.y_dim += thickness*2 self.z_dim += thickness*2 correction = kerf - clearance # check input values mainly to avoid python errors # TODO restrict values to *correct* solutions error = 0 X = self.x_dim Y = self.y_dim Z = self.z_dim if min(X,Y,Z) == 0: inkex.errormsg(_('Error: Dimensions must be non zero')) error = 1 if max(X,Y,Z) > max(widthDoc,heightDoc)*10: # crude test inkex.errormsg(_('Error: Dimensions Too Large')) error = 1 if min(X,Y,Z) < 3*nomTab: inkex.errormsg(_('Error: Tab size too large')) error = 1 if nomTab < thickness: inkex.errormsg(_('Error: Tab size too small')) error = 1 if thickness == 0: inkex.errormsg(_('Error: Thickness is zero')) error = 1 if thickness > min(X,Y,Z)/3: # crude test inkex.errormsg(_('Error: Material too thick')) error = 1 if correction > min(X,Y,Z)/3: # crude test inkex.errormsg(_('Error: Kerf/Clearence too large')) error = 1 if spacing > max(X,Y,Z)*10: # crude test inkex.errormsg(_('Error: Spacing too large')) error = 1 if spacing < kerf: inkex.errormsg(_('Error: Spacing too small')) error = 1 if error: exit() # layout format:(rootx),(rooty),Xlength,Ylength,tabInfo # root= (spacing,X,Y,Z) * values in tuple # tabInfo= <abcd> 0=holes 1=tabs if layout==1: # Diagramatic Layout pieces=[[(2,0,0,1),(3,0,1,1),X,Z,0b1010], [(1,0,0,0),(2,0,0,1),Z,Y,0b1111], [(2,0,0,1),(2,0,0,1),X,Y,0b0000], [(3,1,0,1),(2,0,0,1),Z,Y,0b1111], [(4,1,0,2),(2,0,0,1),X,Y,0b0000], [(2,0,0,1),(1,0,0,0),X,Z,0b1010]] elif layout==2: # 3 Piece Layout pieces=[[(2,0,0,1),(2,0,1,0),X,Z,0b1010], [(1,0,0,0),(1,0,0,0),Z,Y,0b1111], [(2,0,0,1),(1,0,0,0),X,Y,0b0000]] elif layout==3: # Inline(compact) Layout pieces=[[(1,0,0,0),(1,0,0,0),X,Y,0b0000], [(2,1,0,0),(1,0,0,0),X,Y,0b0000], [(3,2,0,0),(1,0,0,0),Z,Y,0b0101], [(4,2,0,1),(1,0,0,0),Z,Y,0b0101], [(5,2,0,2),(1,0,0,0),X,Z,0b1111], [(6,3,0,2),(1,0,0,0),X,Z,0b1111]] elif layout==4: # Diagramatic Layout with Alternate Tab Arrangement pieces=[[(2,0,0,1),(3,0,1,1),X,Z,0b1001], [(1,0,0,0),(2,0,0,1),Z,Y,0b1100], [(2,0,0,1),(2,0,0,1),X,Y,0b1100], [(3,1,0,1),(2,0,0,1),Z,Y,0b0110], [(4,1,0,2),(2,0,0,1),X,Y,0b0110], [(2,0,0,1),(1,0,0,0),X,Z,0b1100]] self.draw_pieces(pieces, thickness, spacing) # Create effect instance and apply it. effect = BoxMaker() effect.affect()
zackurtz/box-maker
boxmaker.py
Python
gpl-3.0
10,763
0.047849
from __future__ import print_function, absolute_import import cv2 import numpy as np import sys, os path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(path) import visual_auxiliary as va def is_center_blue_line(lbot): frame = lbot.getImage() if frame is not None: rois = va.detect_blue_line(frame) maxIndex = np.argmax(rois) if maxIndex==1 and rois[maxIndex]>20: return True return False
robocomp/learnbot
learnbot_dsl/functions/perceptual/camera/is_center_blue_line.py
Python
gpl-3.0
418
0.028708
# -*- coding: utf-8 -*- """ Internal function called by glmnet. See also glmnet, cvglmnet """ # import packages/methods import scipy import ctypes from loadGlmLib import loadGlmLib def elnet(x, is_sparse, irs, pcs, y, weights, offset, gtype, parm, lempty, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, intr, maxit, family): # load shared fortran library glmlib = loadGlmLib() # pre-process data ybar = scipy.dot(scipy.transpose(y), weights) ybar = ybar/sum(weights) nulldev = (y - ybar)**2 * weights # ka lst = ['covariance', 'naive'] ka = [i for i in range(len(lst)) if lst[i] == gtype] if len(ka) == 0: raise ValueError('unrecognized type for ka'); else: ka = ka[0] + 1 # convert from 0-based to 1-based index for fortran # offset if len(offset) == 0: offset = y*0 is_offset = False else: is_offset = True # remove offset from y y = y - offset # now convert types and allocate memory before calling # glmnet fortran library ###################################### # --------- PROCESS INPUTS ----------- ###################################### # force inputs into fortran order and into the correct scipy datatype copyFlag = False x = x.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) irs = irs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag) pcs = pcs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag) y = y.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) weights = weights.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) jd = jd.astype(dtype = scipy.int32, order = 'F', copy = copyFlag) vp = vp.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) cl = cl.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) ulam = ulam.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) ###################################### # --------- ALLOCATE OUTPUTS --------- ###################################### # lmu lmu = -1 lmu_r = ctypes.c_int(lmu) # a0 a0 = scipy.zeros([nlam], dtype = scipy.float64) a0 = a0.astype(dtype = scipy.float64, order = 'F', copy = False) a0_r = a0.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) # ca ca = scipy.zeros([nx, nlam], dtype = scipy.float64) ca = ca.astype(dtype = scipy.float64, order = 'F', copy = False) ca_r = ca.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) # ia ia = -1*scipy.ones([nx], dtype = scipy.int32) ia = ia.astype(dtype = scipy.int32, order = 'F', copy = False) ia_r = ia.ctypes.data_as(ctypes.POINTER(ctypes.c_int)) # nin nin = -1*scipy.ones([nlam], dtype = scipy.int32) nin = nin.astype(dtype = scipy.int32, order = 'F', copy = False) nin_r = nin.ctypes.data_as(ctypes.POINTER(ctypes.c_int)) # rsq rsq = -1*scipy.ones([nlam], dtype = scipy.float64) rsq = rsq.astype(dtype = scipy.float64, order = 'F', copy = False) rsq_r = rsq.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) # alm alm = -1*scipy.ones([nlam], dtype = scipy.float64) alm = alm.astype(dtype = scipy.float64, order = 'F', copy = False) alm_r = alm.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) # nlp nlp = -1 nlp_r = ctypes.c_int(nlp) # jerr jerr = -1 jerr_r = ctypes.c_int(jerr) # ################################### # main glmnet fortran caller # ################################### if is_sparse: # sparse elnet glmlib.spelnet_( ctypes.byref(ctypes.c_int(ka)), ctypes.byref(ctypes.c_double(parm)), ctypes.byref(ctypes.c_int(len(weights))), ctypes.byref(ctypes.c_int(nvars)), x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), pcs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), irs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.byref(ctypes.c_int(ne)), ctypes.byref(ctypes.c_int(nx)), ctypes.byref(ctypes.c_int(nlam)), ctypes.byref(ctypes.c_double(flmin)), ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.byref(ctypes.c_double(thresh)), ctypes.byref(ctypes.c_int(isd)), ctypes.byref(ctypes.c_int(intr)), ctypes.byref(ctypes.c_int(maxit)), ctypes.byref(lmu_r), a0_r, ca_r, ia_r, nin_r, rsq_r, alm_r, ctypes.byref(nlp_r), ctypes.byref(jerr_r) ) else: # call fortran elnet routine glmlib.elnet_( ctypes.byref(ctypes.c_int(ka)), ctypes.byref(ctypes.c_double(parm)), ctypes.byref(ctypes.c_int(len(weights))), ctypes.byref(ctypes.c_int(nvars)), x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.byref(ctypes.c_int(ne)), ctypes.byref(ctypes.c_int(nx)), ctypes.byref(ctypes.c_int(nlam)), ctypes.byref(ctypes.c_double(flmin)), ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.byref(ctypes.c_double(thresh)), ctypes.byref(ctypes.c_int(isd)), ctypes.byref(ctypes.c_int(intr)), ctypes.byref(ctypes.c_int(maxit)), ctypes.byref(lmu_r), a0_r, ca_r, ia_r, nin_r, rsq_r, alm_r, ctypes.byref(nlp_r), ctypes.byref(jerr_r) ) # ################################### # post process results # ################################### # check for error if (jerr_r.value > 0): raise ValueError("Fatal glmnet error in library call : error code = ", jerr_r.value) elif (jerr_r.value < 0): print("Warning: Non-fatal error in glmnet library call: error code = ", jerr_r.value) print("Check results for accuracy. Partial or no results returned.") # clip output to correct sizes lmu = lmu_r.value a0 = a0[0:lmu] ca = ca[0:nx, 0:lmu] ia = ia[0:nx] nin = nin[0:lmu] rsq = rsq[0:lmu] alm = alm[0:lmu] # ninmax ninmax = max(nin) # fix first value of alm (from inf to correct value) if lempty: t1 = scipy.log(alm[1]) t2 = scipy.log(alm[2]) alm[0] = scipy.exp(2*t1 - t2) # create return fit dictionary if ninmax > 0: ca = ca[0:ninmax, :] df = scipy.sum(scipy.absolute(ca) > 0, axis=0) ja = ia[0:ninmax] - 1 # ia is 1-indexed in fortran oja = scipy.argsort(ja) ja1 = ja[oja] beta = scipy.zeros([nvars, lmu], dtype = scipy.float64) beta[ja1, :] = ca[oja, :] else: beta = scipy.zeros([nvars, lmu], dtype = scipy.float64) df = scipy.zeros([1, lmu], dtype = scipy.float64) fit = dict() fit['a0'] = a0 fit['beta'] = beta fit['dev'] = rsq fit['nulldev'] = nulldev fit['df']= df fit['lambdau'] = alm fit['npasses'] = nlp_r.value fit['jerr'] = jerr_r.value fit['dim'] = scipy.array([nvars, lmu], dtype = scipy.integer) fit['offset'] = is_offset fit['class'] = 'elnet' # ################################### # return to caller # ################################### return fit #----------------------------------------- # end of method elmnet #-----------------------------------------
hanfang/glmnet_python
glmnet_python/elnet.py
Python
gpl-2.0
8,525
0.029795
from django.shortcuts import get_object_or_404 from django.views.generic import DetailView, ListView from braces.views import OrderableListMixin from .models import Post, Tag ORDER_FIELD = {'title': 'title'} class PermissionMixin(object): def get_queryset(self, *args, **kwargs): qs = super(PermissionMixin, self).get_queryset(*args, **kwargs) return qs.for_user(self.request.user) class PostDetailView(PermissionMixin, DetailView): model = Post class PostListView(PermissionMixin, OrderableListMixin, ListView): model = Post paginate_by = 10 orderable_columns = ("pk", "name", "city") orderable_columns_default = "created_on" def get_queryset(self, *args, **kwargs): qs = super(PostDetailView, self).get_queryset(*args, **kwargs) if 'tag_slug' in self.kwargs: self.tag = get_object_or_404(Tag, slug=self.kwargs['tag_slug']) qs = qs.filter(tags__in=self.tag) return qs def get_context_data(self, **kwargs): context = super(PostListView, self).get_context_data(**kwargs) if hasattr(self, 'tag'): self.context['object'] = self.tag return context
ad-m/pru
pru/blog/views.py
Python
bsd-3-clause
1,186
0
import random import numpy as np from tpg.learner import Learner from tpg.action_object import ActionObject from tpg.program import Program from tpg.team import Team dummy_init_params = { 'generation': 0, 'actionCodes':[ 0,1,2,3,4,5,6,7,8,9,10,11 ] } dummy_mutate_params = { 'pProgMut': 0.5, 'pActMut': 0.5, 'pActAtom': 0.5, 'pProgMut': 0.5, 'pInstDel': 0.5, 'pInstMut': 0.5, 'pInstAdd': 0.5, 'pLrnDel': 0.5, 'pLrnAdd': 0.5, 'pLrnMut': 0.5, 'nOperations': 8, 'nDestinations': 8, 'inputSize': 8, 'actionCodes':[ 0,1,2,3,4,5,6,7,8,9,10,11 ], 'pInstSwp':0.5, 'generation': 1 } ''' Dummy Creates These should be used to test constructs other than the ones being created by the function. For example, to test a Team you would create dummy programs and learners. But you wouldn't use the create_dummy_team function to test the creation of a team. This is because these methods verify nothing about the init procedure of the class they're returning an object of. ''' ''' Create a dummy program with some preset values ''' def create_dummy_program(): program = Program( maxProgramLength=128, nOperations=7, nDestinations=8, inputSize=100, initParams = dummy_init_params ) return program ''' Create dummy team with some number of learners. Returns the team and the learners added to it ''' def create_dummy_team(num_learners=2): team = Team(dummy_init_params) learners = [] for x in range(0, num_learners): learner = create_dummy_learner() learners.append(learner) team.addLearner(learner) return team, learners ''' Create a dummy action object ''' def create_dummy_action_object(): action_object = ActionObject(action=random.randint(0,10), initParams=dummy_init_params) return action_object ''' Create a dummy action object with a given team ''' def create_dummy_team_action(team): action_object = ActionObject(team, initParams=dummy_init_params) return action_object ''' Create a dummy learner with some preset values ''' def create_dummy_learner(): learner = Learner( dummy_init_params, program=create_dummy_program(), actionObj=create_dummy_action_object(), numRegisters=8 ) return learner ''' Create a list of dummy learners ''' def create_dummy_learners(num_learners=100): learners = [] for i in range(num_learners): learners.append(create_dummy_learner()) return learners """ Transform visual input from ALE to flat vector. inState should be made int32 before passing in. """ def getStateALE(inState): # each row is all 1 color rgbRows = np.reshape(inState,(len(inState[0])*len(inState), 3)).T # add each with appropriate shifting # get RRRRRRRR GGGGGGGG BBBBBBBB return np.add(np.left_shift(rgbRows[0], 16), np.add(np.left_shift(rgbRows[1], 8), rgbRows[2]))
Ryan-Amaral/PyTPG
tpg_tests/test_utils.py
Python
mit
2,999
0.015005
# # Generated by the Open ERP module recorder ! #
avanzosc/avanzosc6.1
steel_quality_test/__init__.py
Python
agpl-3.0
50
0
"""Webhook handlers for mobile_app.""" import asyncio from functools import wraps import logging import secrets from aiohttp.web import HTTPBadRequest, Request, Response, json_response from nacl.secret import SecretBox import voluptuous as vol from homeassistant.components import notify as hass_notify, tag from homeassistant.components.binary_sensor import ( DEVICE_CLASSES as BINARY_SENSOR_CLASSES, ) from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM from homeassistant.components.device_tracker import ( ATTR_BATTERY, ATTR_GPS, ATTR_GPS_ACCURACY, ATTR_LOCATION_NAME, ) from homeassistant.components.frontend import MANIFEST_JSON from homeassistant.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES from homeassistant.components.zone.const import DOMAIN as ZONE_DOMAIN from homeassistant.const import ( ATTR_DOMAIN, ATTR_SERVICE, ATTR_SERVICE_DATA, CONF_WEBHOOK_ID, HTTP_BAD_REQUEST, HTTP_CREATED, ) from homeassistant.core import EventOrigin from homeassistant.exceptions import HomeAssistantError, ServiceNotFound, TemplateError from homeassistant.helpers import config_validation as cv, device_registry as dr from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.template import attach from homeassistant.helpers.typing import HomeAssistantType from homeassistant.util.decorator import Registry from .const import ( ATTR_ALTITUDE, ATTR_APP_DATA, ATTR_APP_VERSION, ATTR_CAMERA_ENTITY_ID, ATTR_COURSE, ATTR_DEVICE_ID, ATTR_DEVICE_NAME, ATTR_EVENT_DATA, ATTR_EVENT_TYPE, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION, ATTR_SENSOR_ATTRIBUTES, ATTR_SENSOR_DEVICE_CLASS, ATTR_SENSOR_ICON, ATTR_SENSOR_NAME, ATTR_SENSOR_STATE, ATTR_SENSOR_TYPE, ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR, ATTR_SENSOR_UNIQUE_ID, ATTR_SENSOR_UOM, ATTR_SPEED, ATTR_SUPPORTS_ENCRYPTION, ATTR_TEMPLATE, ATTR_TEMPLATE_VARIABLES, ATTR_VERTICAL_ACCURACY, ATTR_WEBHOOK_DATA, ATTR_WEBHOOK_ENCRYPTED, ATTR_WEBHOOK_ENCRYPTED_DATA, ATTR_WEBHOOK_TYPE, CONF_CLOUDHOOK_URL, CONF_REMOTE_UI_URL, CONF_SECRET, DATA_CONFIG_ENTRIES, DATA_DELETED_IDS, DATA_STORE, DOMAIN, ERR_ENCRYPTION_ALREADY_ENABLED, ERR_ENCRYPTION_NOT_AVAILABLE, ERR_ENCRYPTION_REQUIRED, ERR_INVALID_FORMAT, ERR_SENSOR_NOT_REGISTERED, SIGNAL_LOCATION_UPDATE, SIGNAL_SENSOR_UPDATE, ) from .helpers import ( _decrypt_payload, empty_okay_response, error_response, registration_context, safe_registration, savable_state, supports_encryption, webhook_response, ) _LOGGER = logging.getLogger(__name__) DELAY_SAVE = 10 WEBHOOK_COMMANDS = Registry() COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES) SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR] WEBHOOK_PAYLOAD_SCHEMA = vol.Schema( { vol.Required(ATTR_WEBHOOK_TYPE): cv.string, vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list), vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean, vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string, } ) def validate_schema(schema): """Decorate a webhook function with a schema.""" if isinstance(schema, dict): schema = vol.Schema(schema) def wrapper(func): """Wrap function so we validate schema.""" @wraps(func) async def validate_and_run(hass, config_entry, data): """Validate input and call handler.""" try: data = schema(data) except vol.Invalid as ex: err = vol.humanize.humanize_error(data, ex) _LOGGER.error("Received invalid webhook payload: %s", err) return empty_okay_response() return await func(hass, config_entry, data) return validate_and_run return wrapper async def handle_webhook( hass: HomeAssistantType, webhook_id: str, request: Request ) -> Response: """Handle webhook callback.""" if webhook_id in hass.data[DOMAIN][DATA_DELETED_IDS]: return Response(status=410) config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] device_name = config_entry.data[ATTR_DEVICE_NAME] try: req_data = await request.json() except ValueError: _LOGGER.warning("Received invalid JSON from mobile_app device: %s", device_name) return empty_okay_response(status=HTTP_BAD_REQUEST) if ( ATTR_WEBHOOK_ENCRYPTED not in req_data and config_entry.data[ATTR_SUPPORTS_ENCRYPTION] ): _LOGGER.warning( "Refusing to accept unencrypted webhook from %s", device_name, ) return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required") try: req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data) except vol.Invalid as ex: err = vol.humanize.humanize_error(req_data, ex) _LOGGER.error( "Received invalid webhook from %s with payload: %s", device_name, err ) return empty_okay_response() webhook_type = req_data[ATTR_WEBHOOK_TYPE] webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {}) if req_data[ATTR_WEBHOOK_ENCRYPTED]: enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA] webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data) if webhook_type not in WEBHOOK_COMMANDS: _LOGGER.error( "Received invalid webhook from %s of type: %s", device_name, webhook_type ) return empty_okay_response() _LOGGER.debug( "Received webhook payload from %s for type %s: %s", device_name, webhook_type, webhook_payload, ) # Shield so we make sure we finish the webhook, even if sender hangs up. return await asyncio.shield( WEBHOOK_COMMANDS[webhook_type](hass, config_entry, webhook_payload) ) @WEBHOOK_COMMANDS.register("call_service") @validate_schema( { vol.Required(ATTR_DOMAIN): cv.string, vol.Required(ATTR_SERVICE): cv.string, vol.Optional(ATTR_SERVICE_DATA, default={}): dict, } ) async def webhook_call_service(hass, config_entry, data): """Handle a call service webhook.""" try: await hass.services.async_call( data[ATTR_DOMAIN], data[ATTR_SERVICE], data[ATTR_SERVICE_DATA], blocking=True, context=registration_context(config_entry.data), ) except (vol.Invalid, ServiceNotFound, Exception) as ex: _LOGGER.error( "Error when calling service during mobile_app " "webhook (device name: %s): %s", config_entry.data[ATTR_DEVICE_NAME], ex, ) raise HTTPBadRequest() from ex return empty_okay_response() @WEBHOOK_COMMANDS.register("fire_event") @validate_schema( { vol.Required(ATTR_EVENT_TYPE): cv.string, vol.Optional(ATTR_EVENT_DATA, default={}): dict, } ) async def webhook_fire_event(hass, config_entry, data): """Handle a fire event webhook.""" event_type = data[ATTR_EVENT_TYPE] hass.bus.async_fire( event_type, data[ATTR_EVENT_DATA], EventOrigin.remote, context=registration_context(config_entry.data), ) return empty_okay_response() @WEBHOOK_COMMANDS.register("stream_camera") @validate_schema({vol.Required(ATTR_CAMERA_ENTITY_ID): cv.string}) async def webhook_stream_camera(hass, config_entry, data): """Handle a request to HLS-stream a camera.""" camera = hass.states.get(data[ATTR_CAMERA_ENTITY_ID]) if camera is None: return webhook_response( {"success": False}, registration=config_entry.data, status=HTTP_BAD_REQUEST, ) resp = {"mjpeg_path": "/api/camera_proxy_stream/%s" % (camera.entity_id)} if camera.attributes["supported_features"] & CAMERA_SUPPORT_STREAM: try: resp["hls_path"] = await hass.components.camera.async_request_stream( camera.entity_id, "hls" ) except HomeAssistantError: resp["hls_path"] = None else: resp["hls_path"] = None return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("render_template") @validate_schema( { str: { vol.Required(ATTR_TEMPLATE): cv.template, vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict, } } ) async def webhook_render_template(hass, config_entry, data): """Handle a render template webhook.""" resp = {} for key, item in data.items(): try: tpl = item[ATTR_TEMPLATE] attach(hass, tpl) resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES)) except TemplateError as ex: resp[key] = {"error": str(ex)} return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("update_location") @validate_schema( { vol.Optional(ATTR_LOCATION_NAME): cv.string, vol.Required(ATTR_GPS): cv.gps, vol.Required(ATTR_GPS_ACCURACY): cv.positive_int, vol.Optional(ATTR_BATTERY): cv.positive_int, vol.Optional(ATTR_SPEED): cv.positive_int, vol.Optional(ATTR_ALTITUDE): vol.Coerce(float), vol.Optional(ATTR_COURSE): cv.positive_int, vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int, } ) async def webhook_update_location(hass, config_entry, data): """Handle an update location webhook.""" hass.helpers.dispatcher.async_dispatcher_send( SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data ) return empty_okay_response() @WEBHOOK_COMMANDS.register("update_registration") @validate_schema( { vol.Optional(ATTR_APP_DATA, default={}): dict, vol.Required(ATTR_APP_VERSION): cv.string, vol.Required(ATTR_DEVICE_NAME): cv.string, vol.Required(ATTR_MANUFACTURER): cv.string, vol.Required(ATTR_MODEL): cv.string, vol.Optional(ATTR_OS_VERSION): cv.string, } ) async def webhook_update_registration(hass, config_entry, data): """Handle an update registration webhook.""" new_registration = {**config_entry.data, **data} device_registry = await dr.async_get_registry(hass) device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])}, manufacturer=new_registration[ATTR_MANUFACTURER], model=new_registration[ATTR_MODEL], name=new_registration[ATTR_DEVICE_NAME], sw_version=new_registration[ATTR_OS_VERSION], ) hass.config_entries.async_update_entry(config_entry, data=new_registration) await hass_notify.async_reload(hass, DOMAIN) return webhook_response( safe_registration(new_registration), registration=new_registration, ) @WEBHOOK_COMMANDS.register("enable_encryption") async def webhook_enable_encryption(hass, config_entry, data): """Handle a encryption enable webhook.""" if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]: _LOGGER.warning( "Refusing to enable encryption for %s because it is already enabled!", config_entry.data[ATTR_DEVICE_NAME], ) return error_response( ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled" ) if not supports_encryption(): _LOGGER.warning( "Unable to enable encryption for %s because libsodium is unavailable!", config_entry.data[ATTR_DEVICE_NAME], ) return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable") secret = secrets.token_hex(SecretBox.KEY_SIZE) data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret} hass.config_entries.async_update_entry(config_entry, data=data) return json_response({"secret": secret}) @WEBHOOK_COMMANDS.register("register_sensor") @validate_schema( { vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict, vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All( vol.Lower, vol.In(COMBINED_CLASSES) ), vol.Required(ATTR_SENSOR_NAME): cv.string, vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES), vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string, vol.Optional(ATTR_SENSOR_UOM): cv.string, vol.Optional(ATTR_SENSOR_STATE, default=None): vol.Any( None, bool, str, int, float ), vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon, } ) async def webhook_register_sensor(hass, config_entry, data): """Handle a register sensor webhook.""" entity_type = data[ATTR_SENSOR_TYPE] unique_id = data[ATTR_SENSOR_UNIQUE_ID] device_name = config_entry.data[ATTR_DEVICE_NAME] unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}" existing_sensor = unique_store_key in hass.data[DOMAIN][entity_type] data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID] # If sensor already is registered, update current state instead if existing_sensor: _LOGGER.debug( "Re-register for %s of existing sensor %s", device_name, unique_id ) entry = hass.data[DOMAIN][entity_type][unique_store_key] data = {**entry, **data} hass.data[DOMAIN][entity_type][unique_store_key] = data hass.data[DOMAIN][DATA_STORE].async_delay_save( lambda: savable_state(hass), DELAY_SAVE ) if existing_sensor: async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, data) else: register_signal = f"{DOMAIN}_{data[ATTR_SENSOR_TYPE]}_register" async_dispatcher_send(hass, register_signal, data) return webhook_response( {"success": True}, registration=config_entry.data, status=HTTP_CREATED, ) @WEBHOOK_COMMANDS.register("update_sensor_states") @validate_schema( vol.All( cv.ensure_list, [ # Partial schema, enough to identify schema. # We don't validate everything because otherwise 1 invalid sensor # will invalidate all sensors. vol.Schema( { vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES), vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string, }, extra=vol.ALLOW_EXTRA, ) ], ) ) async def webhook_update_sensor_states(hass, config_entry, data): """Handle an update sensor states webhook.""" sensor_schema_full = vol.Schema( { vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict, vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon, vol.Required(ATTR_SENSOR_STATE): vol.Any(None, bool, str, int, float), vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES), vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string, } ) device_name = config_entry.data[ATTR_DEVICE_NAME] resp = {} for sensor in data: entity_type = sensor[ATTR_SENSOR_TYPE] unique_id = sensor[ATTR_SENSOR_UNIQUE_ID] unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}" if unique_store_key not in hass.data[DOMAIN][entity_type]: _LOGGER.error( "Refusing to update %s non-registered sensor: %s", device_name, unique_store_key, ) err_msg = f"{entity_type} {unique_id} is not registered" resp[unique_id] = { "success": False, "error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg}, } continue entry = hass.data[DOMAIN][entity_type][unique_store_key] try: sensor = sensor_schema_full(sensor) except vol.Invalid as err: err_msg = vol.humanize.humanize_error(sensor, err) _LOGGER.error( "Received invalid sensor payload from %s for %s: %s", device_name, unique_id, err_msg, ) resp[unique_id] = { "success": False, "error": {"code": ERR_INVALID_FORMAT, "message": err_msg}, } continue new_state = {**entry, **sensor} hass.data[DOMAIN][entity_type][unique_store_key] = new_state async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, new_state) resp[unique_id] = {"success": True} hass.data[DOMAIN][DATA_STORE].async_delay_save( lambda: savable_state(hass), DELAY_SAVE ) return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("get_zones") async def webhook_get_zones(hass, config_entry, data): """Handle a get zones webhook.""" zones = [ hass.states.get(entity_id) for entity_id in sorted(hass.states.async_entity_ids(ZONE_DOMAIN)) ] return webhook_response(zones, registration=config_entry.data) @WEBHOOK_COMMANDS.register("get_config") async def webhook_get_config(hass, config_entry, data): """Handle a get config webhook.""" hass_config = hass.config.as_dict() resp = { "latitude": hass_config["latitude"], "longitude": hass_config["longitude"], "elevation": hass_config["elevation"], "unit_system": hass_config["unit_system"], "location_name": hass_config["location_name"], "time_zone": hass_config["time_zone"], "components": hass_config["components"], "version": hass_config["version"], "theme_color": MANIFEST_JSON["theme_color"], } if CONF_CLOUDHOOK_URL in config_entry.data: resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL] try: resp[CONF_REMOTE_UI_URL] = hass.components.cloud.async_remote_ui_url() except hass.components.cloud.CloudNotAvailable: pass return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("scan_tag") @validate_schema({vol.Required("tag_id"): cv.string}) async def webhook_scan_tag(hass, config_entry, data): """Handle a fire event webhook.""" await tag.async_scan_tag( hass, data["tag_id"], config_entry.data[ATTR_DEVICE_ID], registration_context(config_entry.data), ) return empty_okay_response()
tchellomello/home-assistant
homeassistant/components/mobile_app/webhook.py
Python
apache-2.0
18,676
0.00075
#!/usr/bin/python2.7 # -*- encoding=utf-8 -*- from argparse import ArgumentParser, RawTextHelpFormatter import codecs import gevent from gevent import monkey import json from types import UnicodeType from crawlers import Crawler from crawlers.local.static import get_election_type_name from utils import check_dir def print_json(filename, data): with open(filename, 'w') as f: json.dump(data, f, encoding="UTF-8", indent=2) def print_csv(filename, data): def transform(txt): if isinstance(txt, int): txt = str(txt) if isinstance(txt, list): txt = '||'.join(txt) txt = txt.replace(',', '|') if isinstance(txt, UnicodeType): txt = txt.encode('utf8') return txt attrs = ['assembly_no', 'district', 'cand_no', 'party', 'name_kr', 'name_cn', 'sex', 'birthyear', 'birthmonth', 'birthday', 'address', 'job', 'education', 'experience', 'recommend_priority', 'votenum', 'voterate', 'elected'] with open(filename, 'w') as f: f.write(codecs.BOM_UTF8) f.write(','.join(attrs)) f.write('\n') for cand in data: values = (cand[attr] if attr in cand else '' for attr in attrs) values = (transform(value) for value in values) f.write(','.join(values)) f.write('\n') def crawl(target, _type, nth, printer, filename, level=None): crawler = Crawler(target, _type, nth, level) cand_list = crawler.crawl() printer(filename, cand_list) def create_parser(): parser = ArgumentParser(formatter_class=RawTextHelpFormatter) parser.add_argument('target', choices=['assembly', 'local', 'president'],\ help="name of target election") parser.add_argument('type', choices=['candidates', 'elected', 'precandidates'], help="type of person") parser.add_argument('start', help="starting election id", type=float) parser.add_argument('end', help="ending election id", type=float,\ nargs='?', default=None) parser.add_argument('-t', '--test', dest='test', action='store_true', help="assign datatype to csv instead of json") parser.add_argument('-d', dest='directory', help="specify data directory") # TODO: change to subparser parser.add_argument('-l', choices=['pg', 'pm', 'pp', 'mg', 'mm', 'mp', 'eg', 'em'], dest="level", help="specify level for local elections.\n" "- 1st char: {p:province, m:municipality, e:education},\n" "- 2nd char: {g: governor, m: member}") return parser def main(args): printer = print_csv if args.test else print_json filetype = 'csv' if args.test else 'json' datadir = args.directory if args.directory else '.' check_dir(datadir) if args.target=='local': if args.end: jobs = [] args.level = get_election_type_name(args.level) for n in xrange(args.start, args.end+1): filename = '%s/%s-%s-%s-%d.%s'\ % (datadir, args.target, args.level, args.type, n, filetype) job = gevent.spawn(crawl, target=args.target, level=args.level,\ _type=args.type, nth=n, filename=filename, printer=printer) jobs.append(job) gevent.joinall(jobs) else: n = args.start args.level = get_election_type_name(args.level) filename = '%s/%s-%s-%s-%.01f.%s' %\ (datadir, args.target, args.level, args.type, n, filetype) crawl(target=args.target, level=args.level, _type=args.type, nth=n,\ filename=filename, printer=printer) else: if args.end: jobs = [] for n in xrange(args.start, args.end+1): filename = '%s/%s-%s-%d.%s'\ % (datadir, args.target, args.type, n, filetype) job = gevent.spawn(crawl, target=args.target, _type=args.type, nth=n,\ filename=filename, printer=printer) jobs.append(job) gevent.joinall(jobs) else: n = args.start filename = '%s/%s-%s-%.01f.%s' %\ (datadir, args.target, args.type, n, filetype) crawl(target=args.target, _type=args.type, nth=n,\ filename=filename, printer=printer) print 'Data written to %s' % filename if __name__ == '__main__': monkey.patch_all() parser = create_parser() args = parser.parse_args() main(args)
teampopong/crawlers
election_commission/main.py
Python
agpl-3.0
4,602
0.007388
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # This is a virtual module that is entirely implemented as an action plugin and runs on the controller from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: template version_added: historical options: follow: description: - Determine whether symbolic links should be followed. - When set to C(yes) symbolic links will be followed, if they exist. - When set to C(no) symbolic links will not be followed. - Previous to Ansible 2.4, this was hardcoded as C(yes). type: bool default: no version_added: '2.4' notes: - You can use the M(copy) module with the C(content:) option if you prefer the template inline, as part of the playbook. - For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default. seealso: - module: copy - module: win_copy - module: win_template author: - Ansible Core Team - Michael DeHaan extends_documentation_fragment: - backup - files - template_common - validate ''' EXAMPLES = r''' - name: Template a file to /etc/files.conf template: src: /mytemplates/foo.j2 dest: /etc/file.conf owner: bin group: wheel mode: '0644' - name: Template a file, using symbolic modes (equivalent to 0644) template: src: /mytemplates/foo.j2 dest: /etc/file.conf owner: bin group: wheel mode: u=rw,g=r,o=r - name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file template: src: named.conf_{{ ansible_os_family}}.j2 dest: /etc/named.conf group: named setype: named_conf_t mode: 0640 - name: Create a DOS-style text file from a template template: src: config.ini.j2 dest: /share/windows/config.ini newline_sequence: '\r\n' - name: Copy a new sudoers file into place, after passing validation with visudo template: src: /mine/sudoers dest: /etc/sudoers validate: /usr/sbin/visudo -cf %s - name: Update sshd configuration safely, avoid locking yourself out template: src: etc/ssh/sshd_config.j2 dest: /etc/ssh/sshd_config owner: root group: root mode: '0600' validate: /usr/sbin/sshd -t -f %s backup: yes '''
Dhivyap/ansible
lib/ansible/modules/files/template.py
Python
gpl-3.0
2,564
0.00234
import os def run(name='test1.py'): filename = os.getcwd() + name exec(compile(open(filename).read(), filename, 'exec'))
karljakoblarsson/Rattan-Geometry
Utils.py
Python
mit
130
0.007692
from selenium.webdriver.common.by import By from SeleniumPythonFramework.src.main.Pages.CommonPage import CommonPage # Production locations TRY_TEXT = {"by": By.ID, "locator": "url-input"} TRY_BUTTON = {"by": By.ID, "locator": "get-data"} PATH = "" class HomePage(CommonPage): def __init__(self, **kwargs): super(HomePage, self).__init__(page_url=PATH, **kwargs) def try_url_text(self): return self.get_element(TRY_TEXT) def try_url_button(self): return self.get_element(TRY_BUTTON) def try_url(self, url): self.try_url_text().send_keys(url) try_button = self.try_url_button() with self.wait_for_page_load: try_button.click()
GinoGalotti/python-selenium-utils
SeleniumPythonFramework/src/main/Pages/HomePage.py
Python
apache-2.0
710
0
# Encas Sales Management Server # Copyright 2013 - Hugo Caille # # This file is part of Encas. # # Encas is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Encas is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Encas. If not, see <http://www.gnu.org/licenses/>. from functools import wraps from flask import jsonify from sqlalchemy.exc import OperationalError class ApiError(Exception): def __init__(self, reason): self.reason = reason def __str__(self): return repr(self.reason) def serialize(self): return {'error' : True, 'reason' : self.reason} class MissingFieldsError(Exception): def __init__(self, fields): self.fields = fields self.fields.sort() def reason(self): fields_len = len(self.fields) i = 0 msg = "Missing fields: " for field in self.fields: msg += str(field) if i < fields_len - 1: msg += ", " else: msg += "." i += 1 return msg def __str__(self): return self.reason() def serialize(self): return {'error' : True, 'reason' : self.reason()} def errorhandler(func): @wraps(func) def wrapper(*args, **kwargs): try: data = func(*args, **kwargs) result = {'error' : False} if data is not None: result['data'] = data return jsonify(result) except MissingFieldsError as e: return jsonify(e.serialize()) except ApiError as e: return jsonify(e.serialize()) except OperationalError as e: return jsonify({'error' : True, 'reason' : "Cannot access database"}) except ValueError: return jsonify({'error' : True, 'reason' : "Invalid input"}) return wrapper
hugoatease/encas
errors.py
Python
gpl-3.0
2,359
0.010598
from __future__ import print_function import os import sys import subprocess import pkg_resources try: import pkg_resources _has_pkg_resources = True except: _has_pkg_resources = False try: import svn.local _has_svn_local = True except: _has_svn_local = False def test_helper(): return "test helper text" def dict_to_str(d): """ Given a dictionary d, return a string with each entry in the form 'key: value' and entries separated by newlines. """ vals = [] for k in d.keys(): vals.append('{}: {}'.format(k, d[k])) v = '\n'.join(vals) return v def module_version(module, label=None): """ Helper function for getting the module ("module") in the current namespace and their versions. The optional argument 'label' allows you to set the string used as the dictionary key in the returned dictionary. By default the key is '[module] version'. """ if not _has_pkg_resources: return {} version = pkg_resources.get_distribution(module).version if label: k = '{}'.format(label) else: k = '{} version'.format(module) return {k: '{}'.format(version)} def file_contents(filename, label=None): """ Helper function for getting the contents of a file, provided the filename. Returns a dictionary keyed (by default) with the filename where the value is a string containing the contents of the file. The optional argument 'label' allows you to set the string used as the dictionary key in the returned dictionary. """ if not os.path.isfile(filename): print('ERROR: {} NOT FOUND.'.format(filename)) return {} else: fin = open(filename, 'r') contents = '' for l in fin: contents += l if label: d = {'{}'.format(label): contents} else: d = {filename: contents} return d def svn_information(svndir=None, label=None): """ Helper function for obtaining the SVN repository information for the current directory (default) or the directory supplied in the svndir argument. Returns a dictionary keyed (by default) as 'SVN INFO' where the value is a string containing essentially what is returned by 'svn info'. The optional argument 'label' allows you to set the string used as the dictionary key in the returned dictionary. """ if not _has_svn_local: print('SVN information unavailable.') print('You do not have the "svn" package installed.') print('Install "svn" from pip using "pip install svn"') return {} if svndir: repo = svn.local.LocalClient(svndir) else: repo = svn.local.LocalClient(os.getcwd()) try: # Get a dictionary of the SVN repository information info = repo.info() except: print('ERROR: WORKING DIRECTORY NOT AN SVN REPOSITORY.') return {} v = dict_to_str(info) if label: k = '{}'.format(label) else: k = 'SVN INFO' return {k: v} def get_git_hash(gitpath=None, label=None): """ Helper function for obtaining the git repository hash. for the current directory (default) or the directory supplied in the gitpath argument. Returns a dictionary keyed (by default) as 'GIT HASH' where the value is a string containing essentially what is returned by subprocess. The optional argument 'label' allows you to set the string used as the dictionary key in the returned dictionary. """ if gitpath: thisdir = os.getcwd() os.chdir(gitpath) try: sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip() except subprocess.CalledProcessError as e: print("ERROR: WORKING DIRECTORY NOT A GIT REPOSITORY") return {} if label: l = '{}'.format(label) else: l = 'GIT HASH' return {l:sha} def get_source_code(scode,sourcepath=None, label=None): """ Helper function for obtaining the source code. for the current directory (default) or the directory supplied in the sourcepath argument. Returns a dictionary keyed (by default) as 'source code' where the value is a string containing the source code. The optional argument 'label' allows you to set the string used as the dictionary key in the returned dictionary. """ if sourcepath: os.chdir(sourcepath) if not os.path.isfile(scode): print('ERROR: {} NOT FOUND.'.format(scode)) return {} else: with open(scode,'r') as f: s = f.read() if label: n = {'{}'.format(label):s} else: n = {'source code':s} return n
MetaPlot/MetaPlot
metaplot/helpers.py
Python
mit
4,900
0.008776
""" TAGME implementation @author: Faegheh Hasibi ([email protected]) """ import argparse import math from nordlys.config import OUTPUT_DIR from nordlys.tagme import config from nordlys.tagme import test_coll from nordlys.tagme.query import Query from nordlys.tagme.mention import Mention from nordlys.tagme.lucene_tools import Lucene ENTITY_INDEX = Lucene(config.INDEX_PATH) ANNOT_INDEX = Lucene(config.INDEX_ANNOT_PATH, use_ram=True) # ENTITY_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1") # ANNOT_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1-annot/", use_ram=True) ENTITY_INDEX.open_searcher() ANNOT_INDEX.open_searcher() class Tagme(object): DEBUG = 0 def __init__(self, query, rho_th, sf_source="wiki"): self.query = query self.rho_th = rho_th self.sf_source = sf_source # TAMGE params self.link_prob_th = 0.001 self.cmn_th = 0.02 self.k_th = 0.3 self.link_probs = {} self.in_links = {} self.rel_scores = {} # dictionary {men: {en: rel_score, ...}, ...} self.disamb_ens = {} def parse(self): """ Parses the query and returns all candidate mention-entity pairs. :return: candidate entities {men:{en:cmn, ...}, ...} """ ens = {} for ngram in self.query.get_ngrams(): mention = Mention(ngram) # performs mention filtering (based on the paper) if (len(ngram) == 1) or (ngram.isdigit()) or (mention.wiki_occurrences < 2) or (len(ngram.split()) > 6): continue link_prob = self.__get_link_prob(mention) if link_prob < self.link_prob_th: continue # These mentions will be kept self.link_probs[ngram] = link_prob # Filters entities by cmn threshold 0.001; this was only in TAGME source code and speeds up the process. # TAGME source code: it.acubelab.tagme.anchor (lines 279-284) ens[ngram] = mention.get_men_candidate_ens(0.001) # filters containment mentions (based on paper) candidate_entities = {} sorted_mentions = sorted(ens.keys(), key=lambda item: len(item.split())) # sorts by mention length for i in range(0, len(sorted_mentions)): m_i = sorted_mentions[i] ignore_m_i = False for j in range(i+1, len(sorted_mentions)): m_j = sorted_mentions[j] if (m_i in m_j) and (self.link_probs[m_i] < self.link_probs[m_j]): ignore_m_i = True break if not ignore_m_i: candidate_entities[m_i] = ens[m_i] return candidate_entities def disambiguate(self, candidate_entities): """ Performs disambiguation and link each mention to a single entity. :param candidate_entities: {men:{en:cmn, ...}, ...} :return: disambiguated entities {men:en, ...} """ # Gets the relevance score rel_scores = {} for m_i in candidate_entities.keys(): if self.DEBUG: print "********************", m_i, "********************" rel_scores[m_i] = {} for e_m_i in candidate_entities[m_i].keys(): if self.DEBUG: print "-- ", e_m_i rel_scores[m_i][e_m_i] = 0 for m_j in candidate_entities.keys(): # all other mentions if (m_i == m_j) or (len(candidate_entities[m_j].keys()) == 0): continue vote_e_m_j = self.__get_vote(e_m_i, candidate_entities[m_j]) rel_scores[m_i][e_m_i] += vote_e_m_j if self.DEBUG: print m_j, vote_e_m_j # pruning uncommon entities (based on the paper) self.rel_scores = {} for m_i in rel_scores: for e_m_i in rel_scores[m_i]: cmn = candidate_entities[m_i][e_m_i] if cmn >= self.cmn_th: if m_i not in self.rel_scores: self.rel_scores[m_i] = {} self.rel_scores[m_i][e_m_i] = rel_scores[m_i][e_m_i] # DT pruning disamb_ens = {} for m_i in self.rel_scores: if len(self.rel_scores[m_i].keys()) == 0: continue top_k_ens = self.__get_top_k(m_i) best_cmn = 0 best_en = None for en in top_k_ens: cmn = candidate_entities[m_i][en] if cmn >= best_cmn: best_en = en best_cmn = cmn disamb_ens[m_i] = best_en return disamb_ens def prune(self, dismab_ens): """ Performs AVG pruning. :param dismab_ens: {men: en, ... } :return: {men: (en, score), ...} """ linked_ens = {} for men, en in dismab_ens.iteritems(): coh_score = self.__get_coherence_score(men, en, dismab_ens) rho_score = (self.link_probs[men] + coh_score) / 2.0 if rho_score >= self.rho_th: linked_ens[men] = (en, rho_score) return linked_ens def __get_link_prob(self, mention): """ Gets link probability for the given mention. Here, in fact, we are computing key-phraseness. """ pq = ENTITY_INDEX.get_phrase_query(mention.text, Lucene.FIELDNAME_CONTENTS) mention_freq = ENTITY_INDEX.searcher.search(pq, 1).totalHits if mention_freq == 0: return 0 if self.sf_source == "wiki": link_prob = mention.wiki_occurrences / float(mention_freq) # This is TAGME implementation, from source code: # link_prob = float(mention.wiki_occurrences) / max(mention_freq, mention.wiki_occurrences) elif self.sf_source == "facc": link_prob = mention.facc_occurrences / float(mention_freq) return link_prob def __get_vote(self, entity, men_cand_ens): """ vote_e = sum_e_i(mw_rel(e, e_i) * cmn(e_i)) / i :param entity: en :param men_cand_ens: {en: cmn, ...} :return: voting score """ entity = entity if self.sf_source == "wiki" else entity[0] vote = 0 for e_i, cmn in men_cand_ens.iteritems(): e_i = e_i if self.sf_source == "wiki" else e_i[0] mw_rel = self.__get_mw_rel(entity, e_i) # print "\t", e_i, "cmn:", cmn, "mw_rel:", mw_rel vote += cmn * mw_rel vote /= float(len(men_cand_ens)) return vote def __get_mw_rel(self, e1, e2): """ Calculates Milne & Witten relatedness for two entities. This implementation is based on Dexter implementation (which is similar to TAGME implementation). - Dexter implementation: https://github.com/dexter/dexter/blob/master/dexter-core/src/main/java/it/cnr/isti/hpc/dexter/relatedness/MilneRelatedness.java - TAGME: it.acubelab.tagme.preprocessing.graphs.OnTheFlyArrayMeasure """ if e1 == e2: # to speed-up return 1.0 en_uris = tuple(sorted({e1, e2})) ens_in_links = [self.__get_in_links([en_uri]) for en_uri in en_uris] if min(ens_in_links) == 0: return 0 conj = self.__get_in_links(en_uris) if conj == 0: return 0 numerator = math.log(max(ens_in_links)) - math.log(conj) denominator = math.log(ANNOT_INDEX.num_docs()) - math.log(min(ens_in_links)) rel = 1 - (numerator / denominator) if rel < 0: return 0 return rel def __get_in_links(self, en_uris): """ returns "and" occurrences of entities in the corpus. :param en_uris: list of dbp_uris """ en_uris = tuple(sorted(set(en_uris))) if en_uris in self.in_links: return self.in_links[en_uris] term_queries = [] for en_uri in en_uris: term_queries.append(ANNOT_INDEX.get_id_lookup_query(en_uri, Lucene.FIELDNAME_CONTENTS)) and_query = ANNOT_INDEX.get_and_query(term_queries) self.in_links[en_uris] = ANNOT_INDEX.searcher.search(and_query, 1).totalHits return self.in_links[en_uris] def __get_coherence_score(self, men, en, dismab_ens): """ coherence_score = sum_e_i(rel(e_i, en)) / len(ens) - 1 :param en: entity :param dismab_ens: {men: (dbp_uri, fb_id), ....} """ coh_score = 0 for m_i, e_i in dismab_ens.iteritems(): if m_i == men: continue coh_score += self.__get_mw_rel(e_i, en) coh_score = coh_score / float(len(dismab_ens.keys()) - 1) if len(dismab_ens.keys()) - 1 != 0 else 0 return coh_score def __get_top_k(self, mention): """Returns top-k percent of the entities based on rel score.""" k = int(round(len(self.rel_scores[mention].keys()) * self.k_th)) k = 1 if k == 0 else k sorted_rel_scores = sorted(self.rel_scores[mention].items(), key=lambda item: item[1], reverse=True) top_k_ens = [] count = 1 prev_rel_score = sorted_rel_scores[0][1] for en, rel_score in sorted_rel_scores: if rel_score != prev_rel_score: count += 1 if count > k: break top_k_ens.append(en) prev_rel_score = rel_score return top_k_ens def main(): parser = argparse.ArgumentParser() parser.add_argument("-th", "--threshold", help="score threshold", type=float, default=0) parser.add_argument("-data", help="Data set name", choices=['y-erd', 'erd-dev', 'wiki-annot30', 'wiki-disamb30']) args = parser.parse_args() if args.data == "erd-dev": queries = test_coll.read_erd_queries() elif args.data == "y-erd": queries = test_coll.read_yerd_queries() elif args.data == "wiki-annot30": queries = test_coll.read_tagme_queries(config.WIKI_ANNOT30_SNIPPET) elif args.data == "wiki-disamb30": queries = test_coll.read_tagme_queries(config.WIKI_DISAMB30_SNIPPET) out_file_name = OUTPUT_DIR + "/" + args.data + "_tagme_wiki10.txt" open(out_file_name, "w").close() out_file = open(out_file_name, "a") # process the queries for qid, query in sorted(queries.items(), key=lambda item: int(item[0]) if item[0].isdigit() else item[0]): print "[" + qid + "]", query tagme = Tagme(Query(qid, query), args.threshold) print " parsing ..." cand_ens = tagme.parse() print " disambiguation ..." disamb_ens = tagme.disambiguate(cand_ens) print " pruning ..." linked_ens = tagme.prune(disamb_ens) out_str = "" for men, (en, score) in linked_ens.iteritems(): out_str += str(qid) + "\t" + str(score) + "\t" + en + "\t" + men + "\tpage-id" + "\n" print out_str, "-----------\n" out_file.write(out_str) print "output:", out_file_name if __name__ == "__main__": main()
hasibi/TAGME-Reproducibility
nordlys/tagme/tagme.py
Python
mit
11,198
0.001875
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_xml version_added: "2.7" short_description: Add XML fragment to an XML parent description: - Adds XML fragments formatted as strings to existing XML on remote servers. - For non-Windows targets, use the M(xml) module instead. options: path: description: - The path of remote servers XML. type: path required: true aliases: [ dest, file ] fragment: description: - The string representation of the XML fragment to be added. type: str required: true aliases: [ xmlstring ] xpath: description: - The node of the remote server XML where the fragment will go. type: str required: true backup: description: - Determine whether a backup should be created. - When set to C(yes), create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: no type: description: - The type of XML you are working with. type: str required: yes default: element choices: [ attribute, element, text ] attribute: description: - The attribute name if the type is 'attribute'. - Required if C(type=attribute). type: str author: - Richard Levenberg (@richardcs) ''' EXAMPLES = r''' - name: Apply our filter to Tomcat web.xml win_xml: path: C:\apache-tomcat\webapps\myapp\WEB-INF\web.xml fragment: '<filter><filter-name>MyFilter</filter-name><filter-class>com.example.MyFilter</filter-class></filter>' xpath: '/*' - name: Apply sslEnabledProtocols to Tomcat's server.xml win_xml: path: C:\Tomcat\conf\server.xml xpath: '//Server/Service[@name="Catalina"]/Connector[@port="9443"]' attribute: 'sslEnabledProtocols' fragment: 'TLSv1,TLSv1.1,TLSv1.2' type: attribute ''' RETURN = r''' backup_file: description: Name of the backup file that was created. returned: if backup=yes type: str sample: C:\Path\To\File.txt.11540.20150212-220915.bak msg: description: What was done. returned: always type: str sample: "xml added" err: description: XML comparison exceptions. returned: always, for type element and -vvv or more type: list sample: attribute mismatch for actual=string '''
alxgu/ansible
lib/ansible/modules/windows/win_xml.py
Python
gpl-3.0
2,841
0.00176
import functools import re import pytest from autoray import do, lazy, to_numpy, infer_backend, get_dtype_name, astype from numpy.testing import assert_allclose from .test_autoray import BACKENDS, gen_rand def test_manual_construct(): def foo(a, b, c): a1, a2 = a b1 = b['1'] c1, c2 = c['sub'] return do('sum', do('stack', (a1, a2, b1, c1, c2)), axis=0) x = do('random.uniform', size=(5, 7), like='numpy') x0 = lazy.array(x[0, :]) x1 = lazy.array(x[1, :]) x2 = lazy.array(x[2, :]) x3 = lazy.array(x[3, :]) x4 = lazy.array(x[4, :]) y = lazy.LazyArray( backend=infer_backend(x), fn=foo, args=((x0, x1), {'1': x2}), kwargs=dict(c={'sub': (x3, x4)}), shape=(7,), dtype='float64', ) assert y.deps == (x0, x1, x2, x3, x4) assert re.match( r'x\d+ = foo\d+\(\(x\d+, x\d+,\), ' r'{1: x\d+}, c: {sub: \(x\d+, x\d+,\)}\)', y.get_source() ) assert_allclose(y.compute(), x.sum(0)) def modified_gram_schmidt(X): Q = [] for j in range(0, X.shape[0]): q = X[j, :] for i in range(0, j): rij = do("tensordot", do("conj", Q[i]), q, axes=1) q = q - rij * Q[i] rjj = do("linalg.norm", q, 2) Q.append(q / rjj) return do("stack", tuple(Q), axis=0) def wrap_strict_check(larray): fn_orig = larray._fn @functools.wraps(fn_orig) def checked(*args, **kwargs): data = fn_orig(*args, **kwargs) assert tuple(data.shape) == larray.shape assert get_dtype_name(data) == larray.dtype assert infer_backend(data) == larray.backend return data return checked def make_strict(larray): for node in larray: larray._fn = wrap_strict_check(larray) @pytest.mark.parametrize("backend", BACKENDS) def test_lazy_mgs(backend): if backend == "sparse": pytest.xfail("Sparse doesn't support 'linalg.norm' yet...") x = gen_rand((5, 5), backend) lx = lazy.array(x) ly = modified_gram_schmidt(lx) make_strict(ly) assert str(ly) == ( f"<LazyArray(fn=stack, shape=(5, 5), " f"dtype=float64, backend='{backend}')>" ) assert isinstance(ly, lazy.LazyArray) hmax = ly.history_max_size() hpeak = ly.history_peak_size() htot = ly.history_total_size() assert hmax == 25 assert 25 < hpeak < htot assert len(tuple(ly)) == 57 assert len({node.fn_name for node in ly}) == 9 assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x))) with lazy.shared_intermediates(): ly = modified_gram_schmidt(lx) make_strict(ly) assert len(tuple(ly)) == 51 assert len({node.fn_name for node in ly}) == 9 assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x))) def test_partial_evaluation(): la = lazy.array(gen_rand((10, 10), "numpy")) lb = lazy.array(gen_rand((10, 10), "numpy")) lc = lazy.array(gen_rand((10, 10), "numpy")) ld = lazy.array(gen_rand((10, 10), "numpy")) lab = do("tanh", la @ lb) lcd = lc @ ld ls = lab + lcd ld = do("abs", lab / lcd) le = do("einsum", "ab,ba->a", ls, ld) lf = do("sum", le) make_strict(lf) assert len(tuple(lf)) == 12 lf.compute_constants(variables=[lc, ld]) # constants = [la, lb] assert len(tuple(lf)) == 9 assert "tanh" not in {node.fn_name for node in lf} lf.compute() def test_plot(): import matplotlib matplotlib.use("Template") la = lazy.array(gen_rand((10, 10), "numpy")) lb = lazy.array(gen_rand((10, 10), "numpy")) lc = lazy.array(gen_rand((10, 10), "numpy")) ld = lazy.array(gen_rand((10, 10), "numpy")) lab = do("tanh", la @ lb) lcd = lc @ ld ls = lab + lcd ld = do("abs", lab / lcd) le = do("einsum", "ab,ba->a", ls, ld) lf = do("sum", le) lf.plot() lf.plot(variables=[lc, ld]) lf.plot_history_size_footprint() def test_share_intermediates(): la = lazy.array(gen_rand((10, 10), "numpy")) lb = lazy.array(gen_rand((10, 10), "numpy")) l1 = do("tanh", la @ lb) l2 = do("tanh", la @ lb) ly = l1 + l2 assert len(tuple(ly)) == 7 y1 = ly.compute() with lazy.shared_intermediates(): l1 = do("tanh", la @ lb) l2 = do("tanh", la @ lb) ly = l1 + l2 assert len(tuple(ly)) == 5 y2 = ly.compute() assert_allclose(y1, y2) @pytest.mark.parametrize("backend", BACKENDS) def test_transpose_chain(backend): lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend)) l1 = do("transpose", lx, (1, 0, 3, 2, 4)) l2 = do("transpose", l1, (1, 0, 3, 2, 4)) assert l2.args[0] is lx assert l2.deps == (lx,) assert len(tuple(l1)) == 2 assert len(tuple(l2)) == 2 assert_allclose( to_numpy(lx.compute()), to_numpy(l2.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) def test_reshape_chain(backend): lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend)) l1 = do("reshape", lx, (6, 4, 30)) l2 = do("reshape", l1, (-1,)) assert len(tuple(l1)) == 2 assert len(tuple(l2)) == 2 assert l2.args[0] is lx assert l2.deps == (lx,) assert_allclose( to_numpy(lx.compute()).flatten(), to_numpy(l2.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) @pytest.mark.parametrize("dtype", ["float64", "complex128"]) def test_svd(backend, dtype): if backend == "sparse": pytest.xfail("Sparse doesn't support 'linalg.svd' yet...") x = lazy.array(gen_rand((4, 5), backend, dtype)) U, s, VH = do("linalg.svd", x) assert U.shape == (4, 4) assert s.shape == (4,) assert VH.shape == (4, 5) s = astype(s, dtype) ly = U @ (do("reshape", s, (-1, 1)) * VH) make_strict(ly) assert_allclose( to_numpy(x.compute()), to_numpy(ly.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) def test_qr(backend): if backend == "sparse": pytest.xfail("Sparse doesn't support 'linalg.qr' yet...") x = lazy.array(gen_rand((4, 5), backend)) Q, R = do("linalg.qr", x) assert Q.shape == (4, 4) assert R.shape == (4, 5) ly = Q @ R make_strict(ly) assert_allclose( to_numpy(x.compute()), to_numpy(ly.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) @pytest.mark.parametrize("dtype", ["float64", "complex128"]) def test_eig_inv(backend, dtype): if backend in ("cupy", "dask", "torch", "mars", "sparse"): pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...") # N.B. the prob that a real gaussian matrix has all real eigenvalues is # ``2**(-d * (d - 1) / 4)`` - see Edelman 1997 - so need ``d >> 5`` d = 20 x = lazy.array(gen_rand((d, d), backend, dtype)) el, ev = do("linalg.eig", x) assert el.shape == (d,) assert ev.shape == (d, d) ly = ev @ (do("reshape", el, (-1, 1)) * do("linalg.inv", ev)) make_strict(ly) assert_allclose( to_numpy(x.compute()), to_numpy(ly.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) @pytest.mark.parametrize("dtype", ["float64", "complex128"]) def test_eigh(backend, dtype): if backend in ("dask", "mars", "sparse",): pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...") x = lazy.array(gen_rand((5, 5), backend, dtype)) x = x + x.H el, ev = do("linalg.eigh", x) assert get_dtype_name(ev) == dtype assert el.shape == (5,) assert ev.shape == (5, 5) ly = ev @ (do("reshape", el, (-1, 1)) * ev.H) make_strict(ly) assert_allclose( to_numpy(x.compute()), to_numpy(ly.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) @pytest.mark.parametrize("dtype", ["float64", "complex128"]) def test_cholesky(backend, dtype): if backend in ("sparse",): pytest.xfail(f"{backend} doesn't support 'linalg.cholesky' yet...") x = lazy.array(gen_rand((5, 5), backend, dtype)) x = x @ x.H C = do("linalg.cholesky", x) assert C.shape == (5, 5) ly = C @ C.H make_strict(ly) assert_allclose( to_numpy(x.compute()), to_numpy(ly.compute()), ) @pytest.mark.parametrize("backend", BACKENDS) @pytest.mark.parametrize("dtype", ["float64", "complex128"]) def test_solve(backend, dtype): if backend in ("sparse",): pytest.xfail(f"{backend} doesn't support 'linalg.solve' yet...") A = lazy.array(gen_rand((5, 5), backend, dtype)) y = lazy.array(gen_rand((5,), backend, dtype)) x = do("linalg.solve", A, y) assert x.shape == (5,) # tensorflow e.g. doesn't allow ``A @ x`` for vector x ... ly = do("tensordot", A, x, axes=1) make_strict(ly) assert_allclose( to_numpy(y.compute()), to_numpy(ly.compute()), ) def test_dunder_magic(): a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x * a b = x * b a = a * y b = b * y a *= z b *= z assert_allclose(a, b.compute()) a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x + a b = x + b a = a + y b = b + y a += z b += z assert_allclose(a, b.compute()) a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x - a b = x - b a = a - y b = b - y a -= z b -= z assert_allclose(a, b.compute()) a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x / a b = x / b a = a / y b = b / y a /= z b /= z assert_allclose(a, b.compute()) a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x // a b = x // b a = a // y b = b // y a //= z b //= z assert_allclose(a, b.compute()) a = do('random.uniform', size=(), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3), like='numpy') a = x ** a b = x ** b a = a ** y b = b ** y a **= z b **= z assert_allclose(a, b.compute()) a = do('random.uniform', size=(3, 3), like='numpy') b = lazy.array(a) x, y, z = do('random.uniform', size=(3, 3, 3), like='numpy') a = x @ a b = x @ b a = a @ y b = b @ y a = a @ z b @= z assert_allclose(a, b.compute()) def test_indexing(): a = do('random.uniform', size=(2, 3, 4, 5), like='numpy') b = lazy.array(a) for key in [ 0, (1, ..., -1), (0, 1, slice(None), -2) ]: assert_allclose(a[key], b[key].compute()) def test_einsum(): a = do('random.uniform', size=(2, 3, 4, 5), like='numpy') b = do('random.uniform', size=(4, 5), like='numpy') c = do('random.uniform', size=(6, 2, 3), like='numpy') eq = 'abcd,cd,fab->fd' x1 = do('einsum', eq, a, b, c) la, lb, lc = map(lazy.array, (a, b, c)) x2 = do('einsum', eq, la, lb, lc) assert_allclose(x1, x2.compute()) def test_tensordot(): a = do('random.uniform', size=(7, 3, 4, 5), like='numpy') b = do('random.uniform', size=(5, 6, 3, 2), like='numpy') x1 = do('tensordot', a, b, axes=[(1, 3), (2, 0)]) la, lb = map(lazy.array, (a, b)) x2 = do('tensordot', la, lb, axes=[(1, 3), (2, 0)]) assert_allclose(x1, x2.compute()) def test_use_variable_to_trace_function(): a = lazy.Variable(shape=(2, 3), backend='numpy') b = lazy.Variable(shape=(3, 4), backend='numpy') c = do('tanh', a @ b) f = c.get_function([a, b]) x = do('random.uniform', size=(2, 3), like='numpy') y = do('random.uniform', size=(3, 4), like='numpy') z = f([x, y]) assert z.shape == (2, 4)
jcmgray/autoray
tests/test_lazy.py
Python
apache-2.0
11,880
0
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import os import curses import cumodoro.config as config import cumodoro.interface as interface import cumodoro.globals as globals from cumodoro.cursest import Refresher import logging log = logging.getLogger('cumodoro') def set_title(msg): print("\x1B]0;%s\x07" % msg) def get_title(): print("\x1B[23t") return sys.stdin.read() def save_title(): print("\x1B[22t") def restore_title(): print("\x1B[23t") def main(): globals.refresher = Refresher() globals.refresher.start() globals.database.create() globals.database.load_tasks() os.environ["ESCDELAY"] = "25" save_title() set_title("Cumodoro") curses.wrapper(interface.main) restore_title()
gisodal/cumodoro
cumodoro/main.py
Python
mit
765
0.007843
""" Stores application data. """ # standard libraries import copy import json import pathlib import threading import typing # third party libraries from nion.swift.model import Utility from nion.utils import Event from nion.utils import StructuredModel class ApplicationData: """Application data is a singleton that stores application data.""" def __init__(self, file_path: typing.Optional[pathlib.Path] = None) -> None: self.__lock = threading.RLock() self.__file_path = file_path self.__data_dict: typing.Optional[typing.Dict[str, typing.Any]] = None self.data_changed_event = Event.Event() @property def file_path(self) -> typing.Optional[pathlib.Path]: return self.__file_path @file_path.setter def file_path(self, value: pathlib.Path) -> None: self.__file_path = value def get_data_dict(self) -> typing.Dict[str, typing.Any]: with self.__lock: data_changed = self.__read_data_dict() result = copy.deepcopy(self.__data_dict) if self.__data_dict else dict() if data_changed: self.data_changed_event.fire() return result def set_data_dict(self, d: typing.Mapping[str, typing.Any]) -> None: with self.__lock: self.__data_dict = dict(d) self.__write_data_dict() self.data_changed_event.fire() def __read_data_dict(self) -> bool: if self.__data_dict is None and self.__file_path and self.__file_path.exists(): with open(self.__file_path) as f: self.__data_dict = json.load(f) return True return False def __write_data_dict(self) -> None: if self.__file_path: with Utility.AtomicFileWriter(self.__file_path) as fp: json.dump(self.__data_dict, fp, skipkeys=True, indent=4) __application_data = ApplicationData() def set_file_path(file_path: pathlib.Path) -> None: __application_data.file_path = file_path def get_data() -> typing.Dict[str, typing.Any]: return __application_data.get_data_dict() def set_data(d: typing.Mapping[str, typing.Any]) -> None: __application_data.set_data_dict(d) # class SessionMetadata: """Session data is a singleton that stores application data via the ApplicationData singleton.""" def __init__(self) -> None: site_field = StructuredModel.define_field("site", StructuredModel.STRING) instrument_field = StructuredModel.define_field("instrument", StructuredModel.STRING) task_field = StructuredModel.define_field("task", StructuredModel.STRING) microscopist_field = StructuredModel.define_field("microscopist", StructuredModel.STRING) sample_field = StructuredModel.define_field("sample", StructuredModel.STRING) sample_area_field = StructuredModel.define_field("sample_area", StructuredModel.STRING) schema = StructuredModel.define_record("SessionMetadata", [site_field, instrument_field, task_field, microscopist_field, sample_field, sample_area_field]) self.__model = StructuredModel.build_model(schema, value=get_data().get("session_metadata", dict())) def model_changed() -> None: data = get_data() data["session_metadata"] = self.__model.to_dict_value() set_data(data) self.__model_changed_listener = self.__model.model_changed_event.listen(model_changed) @property def model(self) -> StructuredModel.RecordModel: return typing.cast(StructuredModel.RecordModel, self.__model) __session_metadata = SessionMetadata() def get_session_metadata_model() -> StructuredModel.RecordModel: return __session_metadata.model def get_session_metadata_dict() -> typing.Dict[str, typing.Any]: return dict(typing.cast(typing.Mapping[str, typing.Any], __session_metadata.model.to_dict_value()))
nion-software/nionswift
nion/swift/model/ApplicationData.py
Python
gpl-3.0
3,883
0.004378
beta = 5 # "beta" value in adiabatic correction to wind profile Cd = 840.0 # heat capacity of mineral component of soil, J/kg/K Co = 1920.0 # heat capacity of organic component of soil, J/kg/K Cp = 1004.67 # specific heat of dry air at constant pressure, J/kg-K Cpd = 1004.67 # specific heat of dry air at constant pressure, J/kg-K Cw = 4180.0 # heat capacity of water, J/kg/K D0 = 10. # specific humidity deficit threshold for Lasslop et al (2010) NEE expression E0_long = 100 # long term activation energy, default value eps = 0.0000001 # a small number for comparing floats g = 9.81 # gravitation constant gamma = 28 # "gamma" value in adiabatic correction to wind profile g2kg = 1E-3 # convert grams to kilograms k = 0.4 # von Karmans constant Lv = 2453600 # latent heat of vapourisation, J/kg Mc = 0.0120107 # molecular weight of carbon, kg/mol Mco2 = 0.04401 # molecular weight of carbon dioxide, kg/mol Md = 0.02897 # molecular weight of dry air, kg/mol missing_value = -9999 # missing data value large_value = 1E35 # large value small_value = -1E35 # small value Mv = 0.01802 # molecular weight of water vapour, kg/mol mu = Md/Mv # ratio of dry air molecular weight to water vapour molecular weight rho_water = 1000.0 # density of water, kg/m^3 R = 8.314 # universal gas constant, J/mol.K Rd = 287.04 # gas constant for dry air, J/kg/K Rv = 461.5 # gas constant for water vapour, J/kg/K Pi = 3.14159 # Pi sb = 5.6704E-8 # Stefan-Boltzman constant, W/m^2/K^4 Tref = 15.0 # reference temperature in the Lloyd-Taylor respiration equation, degC T0 = -46.02 # zero temp[erature in the Lloyd-Taylor respiration equation, degC lwVert = 0.115 # vertical path length of CSAT3, m lwHor = 0.058 # horizontal path length of CSAT3, m lTv = 0.115 # path length of sonic virtual temperature, m dIRGA = 0.0095 # path diameter of LI7500 IRGA, m lIRGA = 0.127 # path length of LI7500 IRGA, m Tb = 1800 # 30-min period, seconds C2K = 273.15 # convert degrees celsius to kelvin # dictionary of site names and time zones tz_dict = {"adelaideriver":"Australia/Darwin", "alicespringsmulga":"Australia/Darwin", "arcturus":"Australia/Brisbane", "calperum":"Australia/Adelaide", "capetribulation":"Australia/Brisbane", "cowbay":"Australia/Brisbane", "cumberlandplains":"Australia/Sydney", "cup_ec":"Australia/Sydney", "daintree":"Australia/Brisbane", "dalypasture":"Australia/Darwin", "dalyregrowth":"Australia/Darwin", "dalyuncleared":"Australia/Darwin", "dargo":"Australia/Melbourne", "dryriver":"Australia/Darwin", "foggdam":"Australia/Darwin", "gingin":"Australia/Perth", "greatwestern":"Australia/Perth", "gww":"Australia/Perth", "howardsprings":"Australia/Darwin", "litchfield":"Australia/Darwin", "nimmo":"Australia/Sydney", "reddirt":"Australia/Darwin", "riggs":"Australia/Melbourne", "robson":"Australia/Brisbane", "samford":"Australia/Brisbane", "sturtplains":"Australia/Darwin", "titreeeast":"Australia/Darwin", "tumbarumba":"Australia/Canberra", "wallaby":"Australia/Melbourne", "warra":"Australia/Hobart", "whroo":"Australia/Melbourne", "wombat":"Australia/Melbourne", "yanco_jaxa":"Australia/Sydney"} units_synonyms = {"Fsd":["W/m^2","W+1m-2"], "Fsu":["W/m^2","W+1m-2"], "Fld":["W/m^2","W+1m-2"], "Flu":["W/m^2","W+1m-2"], "Fn":["W/m^2","W+1m-2"], "Fg":["W/m^2","W+1m-2"], "Precip":["mm"], "ps":["kPa"], "RH":["%","percent"], "Sws":["frac","m^3/m^3","m+3m-3"], "Ta":["C","degC"], "Ts":["C","degC"], "Wd":["degT","deg","degrees"], "Ws":["m/s","m+1s-1"]}
OzFlux/PyFluxPro
scripts/constants.py
Python
gpl-3.0
4,163
0.017535
#!/usr/bin/python # Copyright (c) 2015 VMware, Inc. All Rights Reserved. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: vca_fw short_description: add remove firewall rules in a gateway in a vca description: - Adds or removes firewall rules from a gateway in a vca environment version_added: "2.0" options: username: description: - The vca username or email address, if not set the environment variable VCA_USER is checked for the username. required: false default: None password: description: - The vca password, if not set the environment variable VCA_PASS is checked for the password required: false default: None org: description: - The org to login to for creating vapp, mostly set when the service_type is vdc. required: false default: None service_id: description: - The service id in a vchs environment to be used for creating the vapp required: false default: None host: description: - The authentication host to be used when service type is vcd. required: false default: None api_version: description: - The api version to be used with the vca required: false default: "5.7" service_type: description: - The type of service we are authenticating against required: false default: vca choices: [ "vca", "vchs", "vcd" ] state: description: - if the object should be added or removed required: false default: present choices: [ "present", "absent" ] verify_certs: description: - If the certificates of the authentication is to be verified required: false default: True vdc_name: description: - The name of the vdc where the gateway is located. required: false default: None gateway_name: description: - The name of the gateway of the vdc where the rule should be added required: false default: gateway fw_rules: description: - A list of firewall rules to be added to the gateway, Please see examples on valid entries required: True default: false ''' EXAMPLES = ''' #Add a set of firewall rules - hosts: localhost connection: local tasks: - vca_fw: instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' vdc_name: 'benz_ansible' state: 'absent' fw_rules: - description: "ben testing" source_ip: "Any" dest_ip: 192.168.2.11 - description: "ben testing 2" source_ip: 192.168.2.100 source_port: "Any" dest_port: "22" dest_ip: 192.168.2.13 is_enable: "true" enable_logging: "false" protocol: "Tcp" policy: "allow" ''' import time, json, xmltodict HAS_PYVCLOUD = False try: from pyvcloud.vcloudair import VCA from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType HAS_PYVCLOUD = True except ImportError: pass SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'} LOGIN_HOST = {} LOGIN_HOST['vca'] = 'vca.vmware.com' LOGIN_HOST['vchs'] = 'vchs.vmware.com' VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', 'dest_ip', 'dest_port', 'source_ip', 'source_port', 'protocol'] def vca_login(module=None): service_type = module.params.get('service_type') username = module.params.get('username') password = module.params.get('password') instance = module.params.get('instance_id') org = module.params.get('org') service = module.params.get('service_id') vdc_name = module.params.get('vdc_name') version = module.params.get('api_version') verify = module.params.get('verify_certs') if not vdc_name: if service_type == 'vchs': vdc_name = module.params.get('service_id') if not org: if service_type == 'vchs': if vdc_name: org = vdc_name else: org = service if service_type == 'vcd': host = module.params.get('host') else: host = LOGIN_HOST[service_type] if not username: if 'VCA_USER' in os.environ: username = os.environ['VCA_USER'] if not password: if 'VCA_PASS' in os.environ: password = os.environ['VCA_PASS'] if not username or not password: module.fail_json(msg = "Either the username or password is not set, please check") if service_type == 'vchs': version = '5.6' if service_type == 'vcd': if not version: version == '5.6' vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) if service_type == 'vca': if not vca.login(password=password): module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None): s_json = serialize_instances(vca.instances) module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong .. Please check",\ valid_instances=s_json) if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, org_url=vca.vcloud_session.org_url): module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content) return vca if service_type == 'vchs': if not vca.login(password=password): module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) if not vca.login(token=vca.token): module.fail_json(msg = "Failed to get the token", error=vca.response.content) if not vca.login_to_org(service, org): module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content) return vca if service_type == 'vcd': if not vca.login(password=password, org=org): module.fail_json(msg = "Login Failed: Please check username or password or host parameters") if not vca.login(password=password, org=org): module.fail_json(msg = "Failed to get the token", error=vca.response.content) if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url): module.fail_json(msg = "Failed to login to org", error=vca.response.content) return vca def validate_fw_rules(module=None, fw_rules=None): VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Any'] for rule in fw_rules: if not isinstance(rule, dict): module.fail_json(msg="Firewall rules must be a list of dictionaries, Please check", valid_keys=VALID_RULE_KEYS) for k in rule.keys(): if k not in VALID_RULE_KEYS: module.fail_json(msg="%s is not a valid key in fw rules, Please check above.." %k, valid_keys=VALID_RULE_KEYS) rule['dest_port'] = rule.get('dest_port', 'Any') rule['dest_ip'] = rule.get('dest_ip', 'Any') rule['source_port'] = rule.get('source_port', 'Any') rule['source_ip'] = rule.get('source_ip', 'Any') rule['protocol'] = rule.get('protocol', 'Any') rule['policy'] = rule.get('policy', 'allow') rule['is_enable'] = rule.get('is_enable', 'true') rule['enable_logging'] = rule.get('enable_logging', 'false') rule['description'] = rule.get('description', 'rule added by Ansible') if not rule['protocol'] in VALID_PROTO: module.fail_json(msg="the value in protocol is not valid, valid values are as above", valid_proto=VALID_PROTO) return fw_rules def create_protocol_list(protocol): plist = [] plist.append(protocol.get_Tcp()) plist.append(protocol.get_Any()) plist.append(protocol.get_Tcp()) plist.append(protocol.get_Udp()) plist.append(protocol.get_Icmp()) plist.append(protocol.get_Other()) return plist def create_protocols_type(protocol): all_protocols = {"Tcp": None, "Udp": None, "Icmp": None, "Any": None} all_protocols[protocol] = True return ProtocolsType(**all_protocols) def main(): module = AnsibleModule( argument_spec=dict( username = dict(default=None), password = dict(default=None), org = dict(default=None), service_id = dict(default=None), instance_id = dict(default=None), host = dict(default=None), api_version = dict(default='5.7'), service_type = dict(default='vca', choices=['vchs', 'vca', 'vcd']), state = dict(default='present', choices = ['present', 'absent']), vdc_name = dict(default=None), gateway_name = dict(default='gateway'), fw_rules = dict(required=True, default=None, type='list'), ) ) vdc_name = module.params.get('vdc_name') org = module.params.get('org') service = module.params.get('service_id') state = module.params.get('state') service_type = module.params.get('service_type') host = module.params.get('host') instance_id = module.params.get('instance_id') fw_rules = module.params.get('fw_rules') gateway_name = module.params.get('gateway_name') verify_certs = dict(default=True, type='bool'), if not HAS_PYVCLOUD: module.fail_json(msg="python module pyvcloud is needed for this module") if service_type == 'vca': if not instance_id: module.fail_json(msg="When service type is vca the instance_id parameter is mandatory") if not vdc_name: module.fail_json(msg="When service type is vca the vdc_name parameter is mandatory") if service_type == 'vchs': if not service: module.fail_json(msg="When service type vchs the service_id parameter is mandatory") if not org: org = service if not vdc_name: vdc_name = service if service_type == 'vcd': if not host: module.fail_json(msg="When service type is vcd host parameter is mandatory") vca = vca_login(module) vdc = vca.get_vdc(vdc_name) if not vdc: module.fail_json(msg = "Error getting the vdc, Please check the vdc name") mod_rules = validate_fw_rules(module, fw_rules) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: module.fail_json(msg="Not able to find the gateway %s, please check the gateway_name param" %gateway_name) rules = gateway.get_fw_rules() existing_rules = [] del_rules = [] for rule in rules: current_trait = (create_protocol_list(rule.get_Protocols()), rule.get_DestinationPortRange(), rule.get_DestinationIp(), rule.get_SourcePortRange(), rule.get_SourceIp()) for idx, val in enumerate(mod_rules): trait = (create_protocol_list(create_protocols_type(val['protocol'])), val['dest_port'], val['dest_ip'], val['source_port'], val['source_ip']) if current_trait == trait: del_rules.append(mod_rules[idx]) mod_rules.pop(idx) existing_rules.append(current_trait) if state == 'absent': if len(del_rules) < 1: module.exit_json(changed=False, msg="Nothing to delete", delete_rules=mod_rules) else: for i in del_rules: gateway.delete_fw_rule(i['protocol'], i['dest_port'], i['dest_ip'], i['source_port'], i['source_ip']) task = gateway.save_services_configuration() if not task: module.fail_json(msg="Unable to Delete Rule, please check above error", error=gateway.response.content) if not vca.block_until_completed(task): module.fail_json(msg="Error while waiting to remove Rule, please check above error", error=gateway.response.content) module.exit_json(changed=True, msg="Rules Deleted", deleted_rules=del_rules) if len(mod_rules) < 1: module.exit_json(changed=False, rules=existing_rules) if len(mod_rules) >= 1: for i in mod_rules: gateway.add_fw_rule(i['is_enable'], i['description'], i['policy'], i['protocol'], i['dest_port'], i['dest_ip'], i['source_port'], i['source_ip'], i['enable_logging']) task = gateway.save_services_configuration() if not task: module.fail_json(msg="Unable to Add Rule, please check above error", error=gateway.response.content) if not vca.block_until_completed(task): module.fail_json(msg="Failure in waiting for adding firewall rule", error=gateway.response.content) module.exit_json(changed=True, rules=mod_rules) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
muffl0n/ansible-modules-extras
cloud/vmware/vca_fw.py
Python
gpl-3.0
14,207
0.011755
# ------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------ from nose.tools import with_setup import subprocess import requests import os from .. import util import time import json # ------------------------------------------------------------------------------ # Constants # ------------------------------------------------------------------------------ G_TEST_HOST = 'http://127.0.0.1:12345/' # ------------------------------------------------------------------------------ # globals # ------------------------------------------------------------------------------ g_server_pid = -1 # ------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------ def setup_func(): global g_server_pid l_subproc = subprocess.Popen(["../../build/examples/basic"]) g_server_pid = l_subproc.pid time.sleep(0.2) # ------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------ def teardown_func(): global g_server_pid l_code, l_out, l_err = util.run_command('kill -9 %d'%(g_server_pid)) time.sleep(0.2) # ------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------ @with_setup(setup_func, teardown_func) def bb_test_basic_001(): # Unimplemented request l_e = G_TEST_HOST + 'bleep/bloop/blop' l_r = requests.get(l_e) assert l_r.status_code == 501 l_r_json = json.loads(l_r.content) assert l_r_json != None assert len(l_r_json['errors']) > 0 assert l_r_json['errors'][0]['code'] == 501 # Valid request l_e = G_TEST_HOST + 'bananas' l_r = requests.get(l_e) assert l_r.status_code == 200 assert 'Hello World' in l_r.content
Verizon/hlx
tests/blackbox/examples/bb_test_basic.py
Python
apache-2.0
1,993
0.002509
import _plotly_utils.basevalidators class TokenValidator(_plotly_utils.basevalidators.StringValidator): def __init__(self, plotly_name="token", parent_name="histogram2d.stream", **kwargs): super(TokenValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), no_blank=kwargs.pop("no_blank", True), strict=kwargs.pop("strict", True), **kwargs )
plotly/plotly.py
packages/python/plotly/plotly/validators/histogram2d/stream/_token.py
Python
mit
499
0.002004
"""New song class to work with a plain text song file format""" import os import re chord_regex = re.compile("[A-G][1-9#bminajsugd]*[/]*[A-G]*[1-9#bminajsugd]*") valid_chords = "ABCDEFGb#minajsugd123456789" not_chords = "HJKLOPQRTVWXYZ\n" class Chord(object): """Represents a single chord within a song file""" def __init__(self, chord): self.text = chord def __repr__(self): return "Chord({})".format(self.text) class Chordline(object): """Represents multiple chords that are on a separate line""" def __init__(self, chords): self.text = chords def __repr__(self): return "Chordline({})".format(self.text) class Text(object): """Represents plain text, such as lyrics, within a song file""" def __init__(self, text): self.text = text def __repr__(self): return "Text({})".format(self.text) def combine(chord_line, lyrics): """Combines a line of chords with its associated lyrics""" # make sure the lyrics line is long enough to hold chords if(len(chord_line) > len(lyrics)): lyrics = lyrics.ljust(len(chord_line)) # find valid chords matches = chord_regex.finditer(chord_line) # list of (location, chord) chords = list(zip([match.start() for match in matches], chord_line.split())) # insert chords in verse order since insertion shifts positions of subsequent chords combined = [] chords.reverse() for chord in chords: loc, ch = chord combined.append(Text(lyrics[loc:])) combined.append(Chord(ch)) lyrics = lyrics[:loc] if len(lyrics) > 0: # handle any leftover text before first chord combined.append(Text(lyrics)) combined.reverse() return combined def is_chord_line(line): """Checks if the line contains chords""" if contains_only(line, valid_chords) and not contains_any(line, not_chords): return True else: return False def find_commands(text): """Returns a list of line numbers which contain a colon, representing a command""" line_numbers = [] num = 0 for line in text: if ":" in line: line_numbers.append(num) num += 1 return line_numbers def contains_any(line, letters): """Check if any of the letters are in the line""" for letter in letters: if letter in line: return True return False def contains_only(line, letters): """Check if the line only contains these letters""" for c in line: if c.isalnum(): # if character is alphanumeric if c in letters: continue else: # character not found in letters return False else: # ignore non-alphanumeric characters continue return True if __name__ == '__main__': s = Song('songs/10000Reasons.txt') c = s.attributes['chorus 1']
brownjm/praisetex
song.py
Python
gpl-3.0
2,932
0.006139
# -*- coding: utf-8 -*- """ Django settings for pfa project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ from __future__ import absolute_import, unicode_literals import environ ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /) APPS_DIR = ROOT_DIR.path('pfa') env = environ.Env() # APP CONFIGURATION # ------------------------------------------------------------------------------ DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Useful template tags: # 'django.contrib.humanize', # Admin 'django.contrib.admin', ) THIRD_PARTY_APPS = ( 'session_security', # session timeout management 'crispy_forms', # Form layouts 'allauth', # registration 'allauth.account', # registration 'allauth.socialaccount', # registration ) # Apps specific for this project go here. LOCAL_APPS = ( 'pfa.users', # custom users app 'pfa.postfix', 'pfa.api', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS # MIDDLEWARE CONFIGURATION # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES = ( # Make sure djangosecure.middleware.SecurityMiddleware is listed first 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Session inactivity management 'session_security.middleware.SessionSecurityMiddleware', ) # MIGRATIONS CONFIGURATION # ------------------------------------------------------------------------------ MIGRATION_MODULES = { 'sites': 'pfa.contrib.sites.migrations' } # DEBUG # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool("DJANGO_DEBUG", False) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG # FIXTURE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( str(APPS_DIR.path('fixtures')), ) # EMAIL CONFIGURATION # ------------------------------------------------------------------------------ EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend') # MANAGER CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ("""Frederic Tschannen""", '[email protected]'), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ 'default': env.db("DATABASE_URL", default="postgres://localhost/pfa"), } DATABASES['default']['ATOMIC_REQUESTS'] = True # GENERAL CONFIGURATION # ------------------------------------------------------------------------------ # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'UTC' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', # Your stuff: custom template context processors go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( str(APPS_DIR.path('templates')), ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = 'bootstrap3' # STATIC FILE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = str(ROOT_DIR('staticfiles')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( str(APPS_DIR.path('static')), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # MEDIA CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = str(APPS_DIR('media')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' # URL Configuration # ------------------------------------------------------------------------------ ROOT_URLCONF = 'config.urls' # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'config.wsgi.application' # AUTHENTICATION CONFIGURATION # ------------------------------------------------------------------------------ AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) # Some really nice defaults ACCOUNT_AUTHENTICATION_METHOD = 'username' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' # Custom user app defaults # Select the correct user model AUTH_USER_MODEL = 'users.User' LOGIN_REDIRECT_URL = 'users:redirect' LOGIN_URL = 'account_login' # SLUGLIFIER AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify' # LOGGING CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Your common stuff: Below this line define 3rd party library settings
fretscha/pfa
config/settings/common.py
Python
bsd-3-clause
8,513
0.000822
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2013-2014, Martín Gaitán # Copyright (c) 2012-2013, Alexander Jung-Loddenkemper # This file is part of Waliki (http://waliki.nqnwebs.com/) # License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE) #=============================================================================== # DOCS #=============================================================================== """Plugin for upload files to waliki webpages""" #=============================================================================== # IMPORTS #=============================================================================== import os.path import imghdr from flaskext.uploads import ( UploadSet, ALL, configure_uploads, patch_request_class ) from flask import (render_template, flash, request, Blueprint, current_app, abort, send_file, url_for, jsonify) #=============================================================================== # CONSTANTS #=============================================================================== CLOSE_WINDOW_HTML = """ <html> <head> <script type="text/javascript"> window.close(); </script> </head> <body> </body> </html>""" #=============================================================================== # BLUEPRINT AND BASE SETUP #=============================================================================== def default_dest(app): return os.path.join(app.config.get('CONTENT_DIR'), 'uploads') media = UploadSet('media', ALL, default_dest=default_dest) uploads = Blueprint('uploads', __name__, template_folder='templates') #=============================================================================== # SLOT #=============================================================================== def extra_actions(page, **extra): context = extra['extra_context'] actions = context.get('extra_actions', []) actions.append(('Attachments', url_for('uploads.upload', url=extra.get('url')))) context['extra_actions'] = actions #=============================================================================== # INITIALIZER #=============================================================================== REQUIREMENTS = ["Flask-Uploads"] def init(app): app.register_blueprint(uploads) configure_uploads(app, media) app.signals.signal('pre-edit').connect(extra_actions) patch_request_class(app, 32 * 1024 * 1024) # limit 32mb #=============================================================================== # ROUTES #=============================================================================== @uploads.route('/<path:url>/_upload', methods=['GET', 'POST']) def upload(url): last_attached = None page = current_app.wiki.get_or_404(url) if request.method == 'POST' and 'attach' in request.files: last_attached = request.files['attach'] media.save(last_attached, folder=page.url) flash('"%s" was attached succesfully to /%s' % (last_attached.filename, page.url)) try: files = os.listdir(os.path.join(current_app.config.get('CONTENT_DIR'), 'uploads', page.url)) except OSError: files = [] return render_template('upload.html', page=page, files=files, markup=current_app.wiki.markup) def _base_file(url, filename): page = current_app.wiki.get_or_404(url) directory = os.path.join(current_app.config.get('CONTENT_DIR'), 'uploads', url) try: files = os.listdir(directory) except OSError: files = [] if not filename in files: abort(404) outfile = os.path.join(directory, filename) return outfile @uploads.route('/<path:url>/_attachment/<filename>') def get_file(url, filename): outfile = _base_file(url, filename) # by default only images are embeddable. as_attachment = ((not imghdr.what(outfile) and 'embed' not in request.args) or 'as_attachment' in request.args) return send_file(outfile, as_attachment=as_attachment) @uploads.route('/<path:url>/_remove/<filename>', methods=['POST', 'DELETE']) def remove_file(url, filename): outfile = _base_file(url, filename) try: os.remove(outfile) finally: return jsonify({'removed': filename}) return jsonify({'removed': None}) #=============================================================================== # MAIN #=============================================================================== if __name__ == "__main__": print(__doc__)
mgaitan/waliki_flask
waliki/extensions/uploads.py
Python
bsd-3-clause
4,692
0.008316
no_inputs = int(raw_input()) for i in range (0, no_inputs): n, k, t, f = map(int, raw_input().split()) answer = n + k*((f-n)/(k-1)) print answer
prabodhprakash/problemsolving
spoj/EBOXES.py
Python
mit
148
0.033784
#! /usr/bin/env python # coding:utf8 from argparse import ArgumentParser import os import sys PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0] sys.path.insert(0, os.path.join(PATH_OF_THIS_SCRIPT, "..")) import GetOrganelleLib from GetOrganelleLib.pipe_control_func import * from GetOrganelleLib.seq_parser import * from GetOrganelleLib.sam_parser import * from GetOrganelleLib.statistical_func import * from GetOrganelleLib.versions import get_versions PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0] from sympy import Interval import sys import platform SYSTEM_NAME = "" if platform.system() == "Linux": SYSTEM_NAME = "linux" elif platform.system() == "Darwin": SYSTEM_NAME = "macOS" else: sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ") exit() GO_LIB_PATH = os.path.split(GetOrganelleLib.__file__)[0] GO_DEP_PATH = os.path.realpath(os.path.join(GO_LIB_PATH, "..", "GetOrganelleDep", SYSTEM_NAME)) try: # python2 UnicodeDecodeError ± reload(sys) sys.setdefaultencoding('utf8') except NameError: pass def get_options(): parser = ArgumentParser("evaluate_assembly_using_mapping.py -f fasta_file -1 RAW_1.fq -2 RAW_2.fq -o output") parser.add_argument("-f", dest="fasta", help="input assembly fasta file.") parser.add_argument("-1", dest="original_fq_1") parser.add_argument("-2", dest="original_fq_2") parser.add_argument("-u", dest="unpaired_fq_files", default="", help="Input file(s) with unpaired (single-end) reads to be added to the pool. " "files could be comma-separated lists such as 'seq1,seq2'.") parser.add_argument("-X", "--max-lib-len", dest="max_lib_len", type=int, default=1200, help="Corresponding to '-X' option in Bowtie2. Default: %(default)s.") parser.add_argument("-c", dest="is_circular", default="auto", help="(yes/no/auto) input fasta is circular. " "If auto was chosen, the input fasta would be treated as circular when the sequence name " "ends with '(circular)'. " "Default: auto") parser.add_argument("-o", dest="output_base", help="output folder.") parser.add_argument("-t", dest="threads", type=int, default=2, help="threads.") parser.add_argument("--continue", dest="resume", default=False, action="store_true") parser.add_argument("--seed", dest="random_seed", default=12345, type=int, help="Seed for random number generator. Default: %(default)s") parser.add_argument("--draw", dest="draw_plot", default=False, action="store_true", help="Draw density plot using matplotlib, which should be installed.") parser.add_argument("--plot-format", dest="plot_format", default="pdf,png", help='Default: pdf,png') parser.add_argument("--plot-title", dest="plot_title", help="Default: `the file name of the input fasta`") parser.add_argument("--plot-subtitle", dest="plot_subtitle", default="", help="A 4-space indicates a line break. Default: None") parser.add_argument("--plot-transparent", dest="plot_transparent", default=False, action="store_true", help="Default: False") parser.add_argument("--plot-x-density", dest="plot_x_density", default=12000., type=float, help="Default: %(default)s") # parser.add_argument("--plot-x-sliding-window", dest="sliding_window_size", default=1, type=int, # help="Default: %(default)s") parser.add_argument("--plot-x-gap-dots", dest="gap_len", default=3000, type=int, help="Number of sites added in-between isolated contigs. Default: %(default)s") parser.add_argument("--plot-figure-height", dest="figure_height", default=5., type=float, help="Default: %(default)s") parser.add_argument("--plot-y-lim", dest="y_lim", type=float, help="Y axis value limit. ") # parser.add_argument("--plot-figure-extra-width", dest="extra_width", default=3., type=float, # help="Default: %(default)s") parser.add_argument("--plot-font", dest="plot_font", default=None, help="For plot of unicode characters in some environments. Use 'Times New Roman','Arial' etc. " "Default: %(default)s.") parser.add_argument("--disable-customized-error-rate", dest="customized_error_rate", default=True, action="store_true") parser.add_argument("--which-bowtie2", dest="which_bowtie2", default="", help="Assign the path to Bowtie2 binary files if not added to the path. " "Default: try GetOrganelleDep/" + SYSTEM_NAME + "/bowtie2 first, then $PATH") parser.add_argument("--bowtie2-mode", dest="bowtie2_mode", default="--sensitive", help="Default: %(default)s") parser.add_argument("--bowtie2-options", dest="other_bowtie2_options", default="--no-discordant --dovetail", help="Default: %(default)s") parser.add_argument("--stat-mode", dest="stat_mode", default="best", help="Statistical mode for counting multiple hits of a single read: best/all. " "The all mode is meaningful only when '-k <INT>' was included in '--bowtie2-options'. " "Default: %(default)s") parser.add_argument("--debug", dest="debug_mode", default=False, action="store_true", help="Turn on debug mode.") parser.add_argument("-v", "--version", action="version", version="GetOrganelle v{version}".format(version=get_versions())) options = parser.parse_args() if not (options.fasta and ((options.original_fq_1 and options.original_fq_2) or options.unpaired_fq_files) and options.output_base): sys.stderr.write("Insufficient arguments!\n") sys.exit() if not os.path.isdir(options.output_base): os.mkdir(options.output_base) if options.debug_mode: log_level = "DEBUG" else: log_level = "INFO" assert options.stat_mode in ("best", "all") log_handler = simple_log(logging.getLogger(), options.output_base, "", log_level=log_level) log_handler.info("") log_handler.info("Python " + str(sys.version).replace("\n", " ")) log_handler.info("PLATFORM: " + " ".join(platform.uname())) # log versions of python libs lib_versions_info = [] if options.draw_plot: try: import matplotlib except ImportError: pass else: lib_versions_info.append("matplotlib " + matplotlib.__version__) lib_versions_info.append("GetOrganelleLib " + GetOrganelleLib.__version__) log_handler.info("PYTHON LIBS: " + "; ".join(lib_versions_info)) # log versions of dependencies dep_versions_info = [] if not options.which_bowtie2: try_this_bin = os.path.join(GO_DEP_PATH, "bowtie2", "bowtie2") if os.path.isfile(try_this_bin) and executable(try_this_bin): options.which_bowtie2 = os.path.split(try_this_bin)[0] if not executable(os.path.join(options.which_bowtie2, "bowtie2")): log_handler.error(os.path.join(options.which_bowtie2, "bowtie2") + " not accessible!") exit() else: output, err = subprocess.Popen( os.path.join(options.which_bowtie2, "bowtie2") + " --version", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).communicate() this_lines = output.decode("utf8").split("\n")[:3] dep_versions_info.append("Bowtie2 " + this_lines[0].split()[-1].strip()) if not executable(os.path.join(options.which_bowtie2, "bowtie2-build") + " --large-index"): log_handler.error(os.path.join(options.which_bowtie2, "bowtie2-build") + " not accessible!") exit() log_handler.info("DEPENDENCIES: " + "; ".join(dep_versions_info)) log_handler.info("WORKING DIR: " + os.getcwd()) # if not executable(os.path.join(options.which_bowtie2, "bowtie2-build-l")): # log_handler.error(os.path.join(options.which_bowtie2, "bowtie2-build-l") + " not accessible!") # exit() log_handler.info(" ".join(["\"" + arg + "\"" if " " in arg else arg for arg in sys.argv]) + "\n") log_handler = timed_log(log_handler, options.output_base, "", log_level=log_level) return options, log_handler def modify_fasta(original_fasta, new_fasta, is_circular, max_lib_len): # duplicated seq names would cause error in downstream analysis count_name_freq = {} fasta_ob = SequenceList(original_fasta) for record in fasta_ob: if record.label in count_name_freq: count_name_freq[record.label] += 1 else: count_name_freq[record.label] = 1 record.seq = record.seq.replace("*", "N").replace("-", "N") duplicated_name_go = {seq_name: 0 for seq_name in count_name_freq if count_name_freq[seq_name] > 1} # if is_circular == "yes": for record in fasta_ob: if len(record.seq): record.seq = re_linear_circular_seqs(record.seq) to_add = record.seq[:max_lib_len] added_len = len(to_add) record.seq += to_add # in case ref is extremely short while added_len < max_lib_len: to_add = record.seq[:(max_lib_len - added_len)] added_len += len(to_add) record.seq += to_add if record.label in duplicated_name_go: duplicated_name_go[record.label] += 1 record.label = record.label.split(" ")[0] + "--" + str(duplicated_name_go[record.label]) # \ # + " ".join(record.label.split(" ")[1:]) fasta_ob.write_fasta(new_fasta) elif is_circular == "auto": for record in fasta_ob: if len(record.seq) and record.label.endswith("(circular)"): record.seq = re_linear_circular_seqs(record.seq) to_add = record.seq[:max_lib_len] added_len = len(to_add) record.seq += to_add # in case ref is extremely short while added_len < max_lib_len: to_add = record.seq[:(max_lib_len - added_len)] added_len += len(to_add) record.seq += to_add if record.label in duplicated_name_go: duplicated_name_go[record.label] += 1 record.label = record.label.split(" ")[0] + "--" + str(duplicated_name_go[record.label]) # \ # + " ".join(record.label.split(" ")[1:]) fasta_ob.write_fasta(new_fasta) else: for record in fasta_ob: if record.label in duplicated_name_go: duplicated_name_go[record.label] += 1 record.label = record.label.split(" ")[0] + "--" + str(duplicated_name_go[record.label]) # \ # + " ".join(record.label.split(" ")[1:]) fasta_ob.write_fasta(new_fasta) def get_lengths_with_seq_names_modified(raw_fasta_file, log_handler=None): # duplicated seq names would cause error in downstream analysis count_name_freq = {} fasta_ob = SequenceList(raw_fasta_file) for record in fasta_ob: if record.label in count_name_freq: count_name_freq[record.label] += 1 else: count_name_freq[record.label] = 1 duplicated_name_go = {seq_name: 0 for seq_name in count_name_freq if count_name_freq[seq_name] > 1} for record in fasta_ob: if record.label in duplicated_name_go: duplicated_name_go[record.label] += 1 record.label = record.label.split(" ")[0] + "--" + str(duplicated_name_go[record.label]) else: record.label = record.label.split(" ")[0] if log_handler: lengths = [len(rc.seq) for rc in fasta_ob] log_handler.info("Reference length: " + str(sum(lengths)) + " (" + ", ".join([str(l) for l in lengths]) + ")") return {record.label: len(record.seq) for record in fasta_ob} def adjust_vertically_in_one_line(max_val, min_val=0, soft_min_gap=20., *y_positions): record_original_order = [[this_y_pos, raw_order] for raw_order, this_y_pos in enumerate(y_positions)] record_original_order.sort() len_val = len(record_original_order) go_val = 0 while go_val + 1 < len_val: if record_original_order[go_val][0] + soft_min_gap > record_original_order[go_val + 1][0]: record_original_order[go_val + 1][0] = record_original_order[go_val][0] + soft_min_gap go_change = go_val + 2 while go_change < len_val: if record_original_order[go_change - 1][0] + soft_min_gap > record_original_order[go_change][0]: record_original_order[go_change][0] = record_original_order[go_change - 1][0] + soft_min_gap go_change += 1 else: break go_val += 1 # push out if record_original_order[-1][0] > max_val: record_original_order[-1][0] = max_val record_original_order.sort(reverse=True) go_val = 0 while go_val + 1 < len_val: if record_original_order[go_val][0] - soft_min_gap < record_original_order[go_val + 1][0]: record_original_order[go_val + 1][0] = record_original_order[go_val][0] - soft_min_gap go_change = go_val + 2 while go_change < len_val: if record_original_order[go_change - 1][0] - soft_min_gap < record_original_order[go_change][0]: record_original_order[go_change][0] = record_original_order[go_change - 1][0] - soft_min_gap go_change += 1 else: break go_val += 1 # push back, mean if record_original_order[0][0] < min_val: mean_dist = float(max_val - min_val) / (len_val - 1) record_original_order[0][0] = max_val record_original_order[-1][0] = min_val for go_val in range(1, len_val - 1): record_original_order[go_val][0] = max_val - go_val * mean_dist # sort by original order record_original_order.sort(key=lambda x: x[1]) return [new_val[0] for new_val in record_original_order] def adjust_vertically_in_different_lines(middle_y, min_graph_dist, x_factor=1., y_factor=1., *sorted_x_y_positions): x_y_positions = deepcopy(sorted_x_y_positions) for go_p, (x_pos, y_pos) in enumerate(x_y_positions): if go_p > 0 and (x_pos - x_y_positions[go_p - 1][0]) * x_factor < min_graph_dist: go_back = go_p - 1 constraints = Interval(-inf, inf) while go_back >= 0 and (x_pos - x_y_positions[go_back][0]) * x_factor < min_graph_dist: prev_x = x_y_positions[go_back][0] if len(x_y_positions[go_back][1]): prev_y = sum(x_y_positions[go_back][1]) / len(x_y_positions[go_back][1]) else: prev_y = 0. y_move = (min_graph_dist ** 2 - ((x_pos - prev_x) * x_factor) ** 2) ** 0.5 / y_factor constraints &= (Interval(-inf, prev_y - y_move) | Interval(prev_y + y_move, inf)) go_back -= 1 if middle_y not in constraints: new_average_y = sorted(constraints.boundary, key=lambda cons_y: abs(cons_y-middle_y))[0] if len(y_pos): old_y = sum(y_pos) / len(y_pos) else: old_y = 0. x_y_positions[go_p][1] = x_y_positions[go_p][1] - (old_y - new_average_y) return x_y_positions def main(): options, log_handler = get_options() try: new_fasta = os.path.join(options.output_base, "modified.fasta") if not (options.resume and os.path.exists(new_fasta)): modify_fasta(options.fasta, new_fasta, options.is_circular, max_lib_len=options.max_lib_len) unpaired_fq_files = [] if options.unpaired_fq_files: unpaired_fq_files = options.unpaired_fq_files.split(",") other_bowtie2_options = " -X " + str(options.max_lib_len) + " " + options.other_bowtie2_options + " " bowtie2_mode = " " + options.bowtie2_mode + " " map_with_bowtie2(seed_file=new_fasta, original_fq_files=unpaired_fq_files, bowtie_out=os.path.join(options.output_base, "check"), resume=options.resume, threads=options.threads, random_seed=options.random_seed, silent=False or options.debug_mode, log_handler=log_handler, verbose_log=options.debug_mode, which_bowtie2=options.which_bowtie2, bowtie2_other_options=other_bowtie2_options, fq_1=options.original_fq_1, fq_2=options.original_fq_2, bowtie2_mode=bowtie2_mode) ref_lengths = get_lengths_with_seq_names_modified(options.fasta, log_handler) mapping_records = MapRecords(sam_file=os.path.join(options.output_base, "check.sam"), ref_real_len_dict=ref_lengths) sequence_statistics = \ mapping_records.get_customized_mapping_characteristics(multiple_hits_mode=options.stat_mode) num_mapped_reads = mapping_records.get_number_of_mapped_reads() num_paired, num_single = num_mapped_reads["paired"], num_mapped_reads["single"] if options.draw_plot: import matplotlib matplotlib.use('Agg') if options.plot_font: matplotlib.rc('font', family=options.plot_font) import matplotlib.pyplot as plt else: plt = None # make data and default settings gap_len = options.gap_len extra_width = 3. # options.extra_width sliding_w_size = 1 # options.sliding_window_size x_data_len = gap_len * (len(mapping_records.references) - 1) \ + sum([mapping_records.references[ref]["real_len"] for ref in mapping_records.references]) fig_width = extra_width + x_data_len / options.plot_x_density fig_height = options.figure_height extra_percent = extra_width / fig_width title_height_percent = 0.09 add_extra_to_left = 0.27 # for extra_width==3 plot_area_l, plot_area_r = extra_percent * add_extra_to_left, 1 - extra_percent * (1 - add_extra_to_left) plot_area_b, plot_area_t = title_height_percent, 1 - title_height_percent cigar_chars = ["M", "X", "I", "D"] cigar_char_dict = {"M": "Matched", "X": "Mismatched", "I": "Inserted", "D": "Deleted"} color_used = {"M": [(0.133, 0.616, 0.361), 0.5], "X": [(0.145, 0.651, 0.961), 0.3], "I": [(0.996, 0.804, 0.322), 0.8], "D": [(0.831, 0.310, 0.275), 0.5]} x_data = np.array(range(x_data_len)) y_data = {} # log mean and std y_stat = dict() max_y_dat = 0 # start @20190304: add extra error rate @20190304 err_all_cover = np.array([]) err_subset_cover = [np.array([]) for foo in range(len(sequence_statistics["M"]))] # end @20190304 for cigar_char in cigar_chars: y_stat[cigar_char] = {"subset": [], "all": []} this_cover = [] this_cover_for_stat = [] ref_list = sorted(list(sequence_statistics[cigar_char])) go_to_subset = 0 while ref_list: ref = ref_list.pop(0) smoothed_cover_per_site = sequence_statistics[cigar_char][ref] this_mean, this_std = float(np.average(smoothed_cover_per_site)), float(np.std(smoothed_cover_per_site)) y_stat[cigar_char]["subset"].append((this_mean, this_std, len(smoothed_cover_per_site))) this_cover_for_stat.extend(smoothed_cover_per_site) # start @20190304 if options.customized_error_rate: if cigar_char in {"X", "I", "D"}: if len(err_subset_cover[go_to_subset]): err_subset_cover[go_to_subset] += np.array(smoothed_cover_per_site) else: err_subset_cover[go_to_subset] = np.array(smoothed_cover_per_site) # end @20190304 if sliding_w_size != 1: new_averaged_cover = [] for j in range(len(smoothed_cover_per_site)): if j % sliding_w_size: new_averaged_cover.append(0) else: new_averaged_cover.append(np.average(smoothed_cover_per_site[j: j + sliding_w_size])) smoothed_cover_per_site = np.array(new_averaged_cover) this_cover.extend(smoothed_cover_per_site) max_y_dat = max(max(smoothed_cover_per_site), max_y_dat) if ref_list: this_cover.extend([0] * gap_len) go_to_subset += 1 y_data[cigar_char] = np.ma.masked_where(np.array(this_cover) <= 0, this_cover) y_stat[cigar_char]["all"] = float(np.average(this_cover_for_stat)), float(np.std(this_cover_for_stat)) # start @20190304 if options.customized_error_rate: if cigar_char in {"X", "I", "D"}: if len(err_all_cover): err_all_cover += np.array(this_cover_for_stat) else: err_all_cover = np.array(this_cover_for_stat) # end @20190304 if not max_y_dat: raise ValueError("No mapped reads found!") if options.y_lim: max_y_dat = options.y_lim # start @20190304 if options.customized_error_rate: y_stat["error"] = {"all": [np.average(err_all_cover) / y_stat["M"]["all"][0], np.std(err_all_cover) / y_stat["M"]["all"][0]], "subset": [[np.average(err_subset_cover[go_to_sb]) / y_stat["M"]["subset"][go_to_sb][0], np.std(err_subset_cover[go_to_sb]) / y_stat["M"]["subset"][go_to_sb][0], y_stat["M"]["subset"][go_to_sb][2]] for go_to_sb in range(len(sequence_statistics["M"]))]} # end @20190304 # create figure if options.draw_plot: fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height)) ax.spines['top'].set_visible(False) # ax.spines['bottom'].set_visible(False) ax.spines['right'].set_visible(False) # ax.spines['left'].set_visible(False) plt.setp(ax.get_xticklabels(), fontsize=12) plt.setp(ax.get_yticklabels(), fontsize=12) fig.subplots_adjust(left=plot_area_l, right=plot_area_r, bottom=plot_area_b, top=plot_area_t) lines = plt.plot(x_data, y_data["M"], 'o', x_data, y_data["X"], 'o', x_data, y_data["I"], 'o', x_data, y_data["D"], 'o') for go_to, this_char in enumerate(cigar_chars): plt.setp(lines[go_to], color=color_used[this_char][0], alpha=color_used[this_char][1], markersize=0.2) plt.title(" " + (options.plot_title if options.plot_title else options.fasta), fontsize=18, loc='left') subtitle_x_pos = x_data_len + (1 - plot_area_r) * fig_width * options.plot_x_density subtitle_y_pos = max_y_dat * (1 + (1. / (1 - 2 * title_height_percent) - 1) / 8) mapped_str = [] if num_paired: mapped_str.append("# mapped pairs: " + str(int(num_paired / 2))) if num_single: mapped_str.append("# mapped reads: " + str(int(num_paired / 2)) + "×2+" + str(num_single)) for subtitle_str in [sub_str.strip() for sub_str in options.plot_subtitle.split(" ")] + mapped_str: plt.text(subtitle_x_pos, subtitle_y_pos, subtitle_str, fontsize=12, alpha=0.7, horizontalalignment='right', verticalalignment='center', multialignment='center') subtitle_y_pos -= max_y_dat / options.figure_height / 4. # for fontsize==2 # write to log line_labels = {c_: full_name + ": " + "%.2f" % y_stat[c_]["all"][0] + "±" + "%.2f" % y_stat[c_]["all"][1] for c_, full_name in cigar_char_dict.items()} echo_statistics = [ line_labels[cigar_char] + " (" + ", ".join(["%.2f" % here_mean + "±" + "%.2f" % here_std for here_mean, here_std, here_len in y_stat[cigar_char]["subset"]]) + ")" for cigar_char in cigar_chars] if options.customized_error_rate: echo_statistics.append("Customized error rate: " + "%.4f" % y_stat["error"]["all"][0] + "±" + "%.4f" % y_stat["error"]["all"][1] + " (" + ", ".join(["%.4f" % here_mean + "±" + "%.4f" % here_std for here_mean, here_std, here_len in y_stat["error"]["subset"]]) + ")") echo_statistics.insert(0, "# mapped pairs: " + str(int(num_paired / 2))) echo_statistics.insert(0, "# mapped reads: " + str(int(num_paired / 2)) + "×2+" + str(num_single)) for log_line in echo_statistics: log_handler.info(log_line) # plot txt if options.draw_plot: new_y_pos = adjust_vertically_in_one_line(max_y_dat, 0, max_y_dat / 20, y_stat["M"]["subset"][-1][0], y_stat["X"]["subset"][-1][0], y_stat["I"]["subset"][-1][0], y_stat["D"]["subset"][-1][0]) for go_to, this_char in enumerate(cigar_chars): plt.text(x_data_len + 0.05 * options.plot_x_density, new_y_pos[go_to], line_labels[this_char], color=color_used[this_char][0], fontsize=12) if y_stat["M"]["subset"][-1][0] and max_y_dat / y_stat["M"]["subset"][-1][0] > 2.: middle_pos = (y_stat["M"]["subset"][-1][0] + y_stat["M"]["subset"][-1][1] + max_y_dat) / 2 else: middle_pos = (y_stat["M"]["subset"][-1][0] - y_stat["M"]["subset"][-1][1] + y_stat["X"]["subset"][-1][0] + y_stat["X"]["subset"][-1][1]) / 2 middle_set = np.array([middle_pos + max_y_dat/15, middle_pos + max_y_dat/45, middle_pos - max_y_dat/45, middle_pos - max_y_dat/15]) x_y_pos = [] this_accumulated_pos = 0 for this_mean_cov, this_std_cov, this_len in y_stat["M"]["subset"]: x_y_pos.append([this_accumulated_pos + this_len / 2, deepcopy(middle_set)]) this_accumulated_pos += this_len + gap_len width_over_x = (fig_width - extra_width) / x_data_len height_over_y = fig_height / max_y_dat new_x_y_pos = adjust_vertically_in_different_lines(np.average(middle_set), (fig_height**2 + (fig_width - extra_width)**2) ** 0.5 / 10, width_over_x, height_over_y, *x_y_pos) label_x_offset = -0.3 / width_over_x for go_to, this_char in enumerate(cigar_chars): # accumulated_pos = 0 for go_subset, (this_mean_cov, this_std_cov, this_len) in enumerate(y_stat[this_char]["subset"]): this_x, this_y = new_x_y_pos[go_subset] plt.text(this_x + label_x_offset, this_y[go_to], "%.2f" % this_mean_cov + "±" + "%.2f" % this_std_cov, color=color_used[this_char][0], fontsize=9) # accumulated_pos += this_len + gap_len for go_subset, (this_mean_cov, this_std_cov, this_len) in enumerate(y_stat["M"]["subset"]): this_x, this_y = new_x_y_pos[go_subset] plt.text(this_x + label_x_offset, this_y[0] + max_y_dat * 2/45, str(this_len) + " bp", color="black", fontsize=9) # density plot could also be added follow this # a = plt.axes([.65, .6, .2, .2], facecolor='k') # n, bins, patches = plt.hist(s, 400, density=True) # plt.title('Probability') # plt.xticks([]) # plt.yticks([]) for plot_format in options.plot_format.split(","): plt.savefig(os.path.join(options.output_base, "mapping." + plot_format), transparent=options.plot_transparent, dpi=300) log_handler = simple_log(log_handler, options.output_base, "") log_handler.info("") except Exception as e: log_handler.exception(str(e)) logging.shutdown() if __name__ == '__main__': main()
Kinggerm/GetOrganelle
Utilities/evaluate_assembly_using_mapping.py
Python
gpl-3.0
29,914
0.005517
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-10-18 15:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("froide_campaign", "0007_campaign_subject_template"), ] operations = [ migrations.CreateModel( name="CampaignPage", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("title", models.CharField(max_length=255)), ("slug", models.SlugField()), ("description", models.TextField(blank=True)), ("public", models.BooleanField(default=False)), ("campaigns", models.ManyToManyField(to="froide_campaign.Campaign")), ], options={ "verbose_name": "Campaign page", "verbose_name_plural": "Campaign pages", }, ), ]
okfde/froide-campaign
froide_campaign/migrations/0008_campaignpage.py
Python
mit
1,181
0.000847
""" Multiple stacked lstm implemeation on the lip movement data. Akm Ashiquzzaman [email protected] Fall 2016 """ from __future__ import print_function import numpy as np np.random.seed(1337) #random seed fixing for reproducibility #data load & preprocessing X_train = np.load('../data/videopart43.npy').astype('float32') Y_train = np.load('../data/audiopart43.npy').astype('float32') #normalizing data X_train = X_train/255 Y_train = Y_train/32767 X_train = X_train.reshape((826,13,1,53,53)).astype('float32') Y_train = Y_train.reshape((826,13*4702)).astype('float32') from keras.models import Sequential from keras.layers import Dense,Activation,Dropout,TimeDistributed,LSTM,Bidirectional from keras.layers import Convolution2D,Flatten,MaxPooling2D import time print("Building Model.....") model_time = time.time() model = Sequential() model.add(TimeDistributed(Convolution2D(64, 3, 3,border_mode='valid'),batch_input_shape=(14,13,1,53,53),input_shape=(13,1,53,53))) model.add(Activation('tanh')) model.add(Dropout(0.25)) model.add(TimeDistributed(Convolution2D(32, 2, 2, border_mode='valid'))) model.add(Activation('tanh')) model.add(TimeDistributed(Flatten())) model.add(Bidirectional(LSTM(256,return_sequences=True,stateful=True))) model.add(Dropout(0.20)) model.add(Bidirectional(LSTM(128,return_sequences=True,stateful=True))) model.add(Dropout(0.20)) model.add((LSTM(64,stateful=True))) model.add(Dropout(0.20)) model.add((Dense(512))) model.add(Activation('tanh')) model.add(Dropout(0.5)) model.add((Dense(13*4702))) model.add(Activation('tanh')) model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy']) #checkpoint import from keras.callbacks import ModelCheckpoint from os.path import isfile, join #weight file name weight_file = '../weights/time-dis-cnn_weight.h5' #loading previous weight file for resuming training if isfile(weight_file): model.load_weights(weight_file) #weight-checkmark checkpoint = ModelCheckpoint(weight_file, monitor='acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] print("model compile time: "+str(time.time()-model_time)+'s') # fit the model model.fit(X_train,Y_train, nb_epoch=1, batch_size=14,callbacks=callbacks_list) pred = model.predict(X_train,batch_size=14,verbose=1) pred = pred*32767 pred = pred.reshape(826*13,4702) print('pred shape',pred.shape) print('pred dtype',pred.dtype) np.save('../predictions/pred-time-cnn.npy',pred)
zamanashiq3/code-DNN
time_dis_cnn.py
Python
mit
2,449
0.02205
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar) # All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'TAG Project Stage', 'version': '1.0', 'category': 'Projects & Services', 'sequence': 14, 'summary': '', 'description': """ Project Stage ===================== """, 'author': 'ADHOC SA', 'website': 'www.adhoc.com.ar', 'images': [ ], 'depends': [ 'project', ], 'data': [ 'view/project_view.xml', 'view/menu_item.xml', 'security/ir.model.access.csv', ], 'demo': [ ], 'test': [ ], 'installable': True, 'auto_install': False, 'application': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
smartforceplus/SmartForceplus
openerp/addons/project_stage/__openerp__.py
Python
agpl-3.0
1,603
0
from datetime import datetime import csv import pandas import os import sys os.chdir(sys.argv[1]) ticker_f = open(sys.argv[2], "rb") ticker_reader = csv.reader(ticker_f) tickers = [r[0] for r in ticker_reader][1:] ticker_f.close() tln = len(tickers) t_1 = datetime.now() # build full data frame res = None for i, t in enumerate(tickers): t_n = t.split("/")[1] df = pandas.io.parsers.read_csv("%s.csv" % t_n) df[t_n] = (df["Close"].shift(1) - df["Close"]) / df["Close"] df = df[["Date", t_n]] df.set_index("Date") if res is None: res = df else: res = res.merge(df, on="Date", how="outer") print i, i * 100. / tln, datetime.now() - t_1 res = res.dropna(axis=0, int(sys.argv[3])) # drop many missing obs res = res.dropna(axis=1, int(sys.argv[4])) # drop many missing vars res = res.dropna() res.to_csv(sys.argv[5])
lbybee/NVLDA
code/build_dataset.py
Python
gpl-2.0
865
0.002312
#!/usr/bin/env python #want to display file contents #testing display code import pyperclip import re import subprocess import os,sys,time counter=1 already_checked='' def get_extension(file_name): if file_name.find('.')!=-1: ext = file_name.split('.') return (ext[1]) else: return 'txt' def cut(str, len1): return str[len1 + 1:] #to remove first line which is meant for reading from which file #for displaying contents def find(name, path): for root, dirs, files in os.walk(path): if name in files: return os.path.join(root, name) #ubuntu notification (message sending) def sendmessage(message): subprocess.Popen(['notify-send', message]) return while(True): str = pyperclip.paste() #for bring the content of clipboard to str variable str_low= str.lower() str_lower =str_low.split("\n") if(str_lower[0]=="stop -safe"): sendmessage("Stopped the background process for code snippet management...byebye") os.exit() if (str_lower[0].find("from")!=-1 and str_lower[0].find("code") !=-1 and str_lower[0].find("snippet") !=-1 and str_lower[0].find("-safe") !=-1): if (re.search(r'\w+\.[a-z,A-Z,0-9]',str_lower[0])!=None and str_lower[0].find("-deep")==-1): #filename is given str1=str.split(' ') #split() returns a list [ ] file_str=str1[(len(str1)-2)] #in the first line take the last seconde element of list file_str=file_str.replace(".txt"," ") #if filename.txt is given remove txt and search --for all other we need extension if(file_str==already_checked): continue str2= find(file_str,"/home/nikhil/code_snippets") #finding the directory of the file from where code to be copied try: file1= open(str2,"r") except: print "ohhh mann" sendmessage("file not found in codesnippets sorry") already_checked=file_str continue pyperclip.copy(file1.read()) sendmessage("Fetched press ctrl+v") elif (str_lower[0].find("-deep")!=-1 and re.search("\'[a-z,A-Z,0-9, ]+\'",str_lower[0])!=None):#text is given and need to grep it search_string= re.search("\'[a-z,A-Z,0-9, ]+\'",str_lower[0]) if search_string!=None: entered_string = search_string.group() final_search_string=entered_string[1:len(entered_string)-1] try: hosts = subprocess.check_output("grep '%s' /home/nikhil/code_snippets -r" % (final_search_string), shell=True) #http://stackoverflow.com/questions/12809467/how-to-get-output-of-grep-command-python lists=re.findall(r"/[a-z,A-Z,0-9]+\.[a-z]+",hosts) #befor using below line e.g:- /ooh.py s=lists[0][1:] #after using above line e.g:-ooh.py extension=get_extension(s) print extension file_obj=open('/home/nikhil/code_snippets/'+extension.upper()+'/'+s,'r') pyperclip.copy(file_obj.read()) sendmessage("Success..Fetched!") except: sendmessage("unable to find") else: sendmessage("You Entered Non Existing Search String..") os.system('python /home/nikhil/Desktop/haha.py') #todo file not found exception is being raised --unlimited times #same comment in multiple files means it is showing only first file with that comment --handle this
nikhilponnuru/codeCrumbs
code/code_display.py
Python
mit
3,605
0.035229
import os,sys,django sys.path.append(os.path.dirname(os.path.abspath('.'))) os.environ["DJANGO_SETTINGS_MODULE"] = 'skill_huddle.settings' django.setup() from sh_app.models import SH_User,League,Suggestion,Huddle from django.contrib.auth.models import User from django_countries import countries from localflavor.us.us_states import STATE_CHOICES from django.utils import timezone import random def createUsers(): random.seed() with open('adjectives.txt','r') as adjs,\ open('nouns.txt','r') as nouns: list_adjs = adjs.readlines() list_nouns = nouns.readlines() for i in range(1,100): #create and save user object #random user name first_name = list_adjs[random.randint(0,len(list_adjs))].replace('\n','') last_name = list_nouns[random.randint(0,len(list_nouns))].replace('\n','') usrname = (first_name + '_' + last_name)[:30] usr = User(username = usrname,email = "[email protected]") usr.set_password("zxcvbnm,") usr.first_name = first_name usr.last_name = last_name sh = SH_User() try: usr.save() #create and save sh user sh.user = usr sh.first_name = first_name sh.last_name = last_name sh.save() except: mesage = "failed to create user:%s" % usrname print(mesage) def createLeagues(): random.seed() sh_users = SH_User.objects.all() list_countries = list(map(lambda x: x[0],list(countries))) list_states = list(map(lambda x: x[0],list(STATE_CHOICES))) string_book = '' with open('aristotle.txt','r') as fi: string_book = fi.read() for i in range(1,10): new_league = League() with open('adjectives.txt','r') as adjs,open('nouns.txt','r') as nouns: list_adjs = adjs.readlines() list_nouns = nouns.readlines() name = "%s %s league" % (list_adjs[random.randint(0,len(list_adjs)-1)].replace('\n',''), list_nouns[random.randint(0,len(list_nouns))].replace('\n','')) desc_start = random.randint(0,82824 - 300) description = string_book[desc_start : desc_start + 160] country = list_countries[random.randint(0,len(list_countries) -1)] if country == 'US': new_league.state = list_states[random.randint(0,len(list_states) -1)] city = list_nouns[random.randint(0,len(list_nouns))].replace('\n','') new_league.city = city new_league.name = name.capitalize() new_league.decription = description new_league.country = country new_league.head_official = sh_users[random.randint(0,len(sh_users) - 1)] try: new_league.save() new_league.officials.add(new_league.head_official) new_league.members.add(new_league.head_official) except: errormsg = 'Failed to create league: %s' % new_league.name print(errormsg) def addLeagueMembers(): random.seed() #add sh_users to the list for league in League.objects.all(): usrs = list(SH_User.objects.all()) usrs.remove(league.head_official) for i in range(0,25): new_member = usrs[random.randint(0,len(usrs) - 1)] usrs.remove(new_member) try: league.members.add(new_member) except: errormsg = "Failed to add member: %s" % new_member print(errormsg) def addLeagueOfficials(): random.seed() for league in League.objects.all(): list_members = list(league.members.all()) list_members.remove(league.head_official) for i in range(0,3): new_official = list_members[random.randint(0,len(list_members) -1)] list_members.remove(new_official) try: league.officials.add(new_official) except: errormsg = "Feiled to add official: %s" % new_official def createSuggestions(): random.seed() with open('adjectives.txt','r') as adjs,\ open('nouns.txt','r') as nouns: list_adjs = adjs.readlines() list_nouns = nouns.readlines() string_book = '' with open('aristotle.txt','r') as fi: string_book = fi.read() for league in League.objects.all(): for i in range(0,10): tot_members = league.members.count() rand_user = league.members.all()[random.randint(0,tot_members -1)] name = list_adjs[random.randint(0,len(list_adjs)-1)].strip('\n') +\ " " + list_nouns[random.randint(0,len(list_nouns)-1)].strip('\n') +\ " " + list_nouns[random.randint(0,len(list_nouns)-1)] + " suggestion" desc_start = random.randint(0,82824 - 300) description = string_book[desc_start: desc_start + 200] new_suggestion = Suggestion() new_suggestion.name = name.capitalize() new_suggestion.suggested_by = rand_user new_suggestion.description = description new_suggestion.voting_starts = timezone.now() -\ timezone.timedelta(days=random.randint(0,10)) new_suggestion.voting_ends = new_suggestion.voting_starts +\ timezone.timedelta(days=random.randint(1,10)) try: new_suggestion.league = league new_suggestion.save() if new_suggestion.voting_ends < timezone.now(): random_int = random.randint(0, 2) if random_int == 0: for sh_user in league.members.all(): new_suggestion.upvotes.add(sh_user) new_suggestion.is_accepted = True new_suggestion.save() except: errormsg = "Failed to add Suggestion: %s" % new_suggestion print(errormsg) def voteOnSuggestions(): random.seed() for league in League.objects.all(): for suggestion in league.suggestions.all(): for member in league.members.all(): votetype = random.randint(0,2) if votetype > 0: if votetype == 1: #upvote try: suggestion.upvotes.add(member) except: errormsg = "Failed to add upvoter %s" % member print(errormsg) else: #downvote try: suggestion.downvotes.add(member) except: errormsg = "Failed to add downvoter %s" % member print(errormsg) def clearVotes(): for league in League.objects.all(): for suggestion in league.suggestions.all(): try: suggestion.upvotes.clear() except: errormsg = "Failed to clear upvotes for %s" % suggestion print(errormsg) try: suggestion.downvotes.clear() except: errormsg = "Failed to clear downvotes for %s" % suggestion print(errormsg) def createHuddles(): random.seed() list_adjs = [] list_nouns = [] list_roadtype = ['Avenue','Road','Street','Drive'] string_book = '' with open('adjectives.txt','r') as adjs,open('nouns.txt','r') as nouns,\ open('aristotle.txt','r') as fi: list_adjs = adjs.readlines() list_nouns = nouns.readlines() string_book = fi.read() for league in League.objects.all(): for i in range(0,10): name = list_adjs[random.randint(1,len(list_adjs))-1].strip('\n') + " " + list_nouns[random.randint(1,len(list_nouns))-1].strip('\n') + " huddle" address = str(random.randint(1,1000)) +\ " " + list_nouns[random.randint(1,len(list_nouns))-1].strip('\n') +\ " " + list_roadtype[random.randint(1,len(list_roadtype))-1] desc_start = random.randint(0,82824 - 300) description = string_book[desc_start : desc_start + 160] date = timezone.now() + timezone.timedelta(days=random.randint(-20,20)) new_huddle = Huddle() new_huddle.name = name.capitalize() new_huddle.address = address new_huddle.description = description new_huddle.league = league new_huddle.date = date list_officials = list(league.officials.all()) try: new_huddle.save() for j in range(0,3): expert = list_officials[random.randint(0,len(list_officials)-1)] new_huddle.experts.add(expert) list_officials.remove(expert) except: errormsg = "Failed to create: %s" % new_huddle def clearHuddles(): for league in League.objects.all(): for huddle in league.huddles.all(): huddle.delete() def attendHuddles(): random.seed() for league in League.objects.all(): for huddle in league.huddles.all(): for member in league.members.all(): attendance = random.randint(0,10) if attendance ==0: try: huddle.attendants.add(member) except: errormsg = "Failed to add attendee: %s to huddle %s" % (member,huddle) print(errormsg) if __name__ == '__main__': createUsers() createLeagues() addLeagueMembers() addLeagueOfficials() createSuggestions() voteOnSuggestions() createHuddles() attendHuddles()
skill-huddle/skill-huddle
dummy_data/populatedb.py
Python
mit
10,113
0.014734
## # Copyright 2016 DECaF Project Group, University of Paderborn # This file is part of the decaf orchestration framework # All Rights Reserved. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. ## import uuid from twisted.internet.defer import Deferred, inlineCallbacks from multiprocessing.dummy import Pool from decaf_utils_protocol_stack import JsonRpcMessageApplication from decaf_utils_protocol_stack.json import JsonRPCCall, JsonRPCNotify, JsonRPCError, JsonRPCResult from decaf_utils_protocol_stack.rpc.sync_result import sync class RpcLayer(JsonRpcMessageApplication): def __init__(self, host_url=u"amqp://127.0.0.1", ioloop=None, **params): super(RpcLayer, self).__init__(host_url=host_url, ioloop=ioloop, **params) self._pending = dict() self._pool = Pool() self.msg_handler = None def send_result(self, receiver, result, id): exchange = str(receiver).split(".")[0] self.route(receiver, JsonRPCResult(result=result, id=id), exchange=exchange) def send_error(self, receiver, code, id, *args, **kwargs): self.route(receiver, JsonRPCError(code=code, id=id, args=args, kwargs=kwargs)) def call(self, method, *args, **kwargs): corr_id = str(uuid.uuid4()) ret = Deferred() self._pending[corr_id] = ret self.route(method, JsonRPCCall(method=method, args=args, kwargs=kwargs, id=corr_id)) return ret def publish(self, routing_key, msg, **params): corr_id = str(uuid.uuid4()) ret = Deferred() self._pending[corr_id] = ret self.route(routing_key, JsonRPCNotify(method=routing_key, args=(msg,)), **params) return ret def callSync(self, timeout, rpc_name, *args, **kwargs): @sync(timeout=timeout) def wrap(): return self.call(rpc_name, *args, **kwargs) return wrap() def notify(self, method, *args, **kwargs): self.publish(method, JsonRPCNotify(method=method, args=args, kwargs=kwargs)) def deregister(self, routing_key, **params): params["method"] = "anycast" super(RpcLayer, self).unsubscribe(routing_key, **params) def unsubscribe(self, routing_key, **params): super(RpcLayer, self).unsubscribe(routing_key, **params) def subscribe(self, routing_key, function_pointer, frame=False, **params): self.logger.debug("Subscribing to %s with params: %s" % (routing_key, params)) if function_pointer is None: function_pointer = self.receive else: if not frame: function_pointer = self._make_handler(function_pointer) function_pointer = self.apply_in_pool(function_pointer) super(RpcLayer, self).subscribe(routing_key, function_pointer=function_pointer, **params) def register_direct(self, routing_key, msg_handler): pass def register(self, routing_key, function_pointer=None, **params): if function_pointer is None: function_pointer = self.receive else: function_pointer = self._make_handler(function_pointer) function_pointer = self.apply_in_pool(function_pointer) params = params or dict() params["method"] = "anycast" self._top_layer.subscribe(routing_key, function_pointer=function_pointer, **params) def receive(self, *args, **kwargs): self._pool.apply_async(func=self._receive, args=args, kwds=kwargs) def apply_in_pool(self, function): def apply_f(*args, **kwargs): self._pool.apply_async(func=function, args=args, kwds=kwargs) apply_f.func_name = function.func_name return apply_f def _make_handler(self, function): """ This method creates a wrapper for the given "function". This serves two purposes: A) Send the result back to the caller. B) Create an environment for asynchronous RPC within function. :param function: :param reply_to: :param corr_id: :return: """ # ----------------- INTERNAL FUNCTION ------------------------------------------------------------ @inlineCallbacks def on_call(routing_key, message, sender=None, **params): assert self.logger if isinstance(message, JsonRPCCall): try: self.logger.info("-------------------CALL TO COMPONENT-----------------------") self.logger.info("Executing function '%s' with argument(s) %s and %s", function.func_name, message.get_args, message.get_kwargs) res = yield function(*message.get_args, **message.get_kwargs) # self._out_channel.basic_ack(delivery_tag=delivery_tag) self.send_result(result=res, receiver=sender, id=message.get_id) except BaseException as e: self.logger.info("----------------CALL TO COMPONENT FAILED---------------------") self.logger.exception("Message: \n %s \n caused an Error: \n %s" % (message, e)) self.send_error(code=1, message=e.message, receiver=sender, id=message.get_id, args=e.args) except: self.logger.info("-----------------CALL TO COMPONENT FAILED---------------------") self.logger.exception("Message: \n %s \n caused an Error" % (message)) self.send_error(code=1, receiver=sender, id=message.get_id) if isinstance(message, JsonRPCNotify): try: self.logger.info("--------------DELIVER EVENT TO COMPONENT---------------------------") self.logger.info("Executing function '%s' with argument(s) %s and %s", function.func_name, message.get_args, message.get_kwargs) function(*message.get_args, **message.get_kwargs) except BaseException as e: self.logger.info("--------------DELIVER EVENT TO COMPONENT FAILED---------------------") self.logger.exception("Message: \n %s \n caused an Error: \n %s" % (message, e)) # ----------------- INTERNAL FUNCTION ------------------------------------------------------------ return on_call def _receive(self, routing_key, message, sender=None, **params): if isinstance(message, JsonRPCResult): self.logger.info("----------------RECEIVED A RESULT---------------------") self.logger.info("Result received: \n %s" % (message)) corr_id = message.get_id deferred = self._pending.get(corr_id, None) if deferred: deferred.callback(message.get_result) del self._pending[corr_id] if isinstance(message, JsonRPCError): self.logger.info("----------------RECEIVED AN ERROR---------------------") self.logger.exception("Error received: \n %s" % (message)) corr_id = message.get_id deferred = self._pending.get(corr_id, None) if deferred: deferred.errback(message) del self._pending[corr_id] if self.msg_handler: self.msg_handler(routing_key, message, sender, **params) pass def get_transport_layer(self): return super(RpcLayer, self).get_transport_layer() def set_msg_handler(self, msg_handler): self.msg_handler = msg_handler
CN-UPB/OpenBarista
utils/decaf-utils-protocol-stack/decaf_utils_protocol_stack/rpc/json_rpc_application.py
Python
mpl-2.0
7,693
0.0039
import pytest from selenium import webdriver from selenium.webdriver.common.keys import Keys @pytest.fixture(scope='function') def browser(request): browser_ = webdriver.Firefox() def fin(): browser_.quit() request.addfinalizer(fin) return browser_ def test_can_show_a_relevant_code_snippet(browser): # Jan visits the site browser.get('http://localhost:8000') # He notices the title and header reference the site name site_name = 'Scout' assert site_name in browser.title header_text = browser.find_element_by_tag_name('h1').text assert site_name in header_text # He is invited to search for code snippets expected_search_prompt = 'Enter some code-related keywords' search_box = browser.find_element_by_id('id_search_box') actual_search_prompt = search_box.get_attribute('placeholder') assert actual_search_prompt == expected_search_prompt # He searches "python yield" search_box.send_keys('python yield') search_box.send_keys(Keys.ENTER) # The page updates, and now the page shows a code snippet # that uses the dummy variables "mylist" and "mygenerator" # (the highest-voted python page on StackOverflow.com is # /questions/231767/what-does-the-yield-keyword-do-in-python) snippets = browser.find_elements_by_tag_name('code') assert any(['mylist' in snippet.text and 'mygenerator' in snippet.text for snippet in snippets])
jvanbrug/scout
functional_tests.py
Python
mit
1,460
0
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Tests the following set of sequences: z-a-b-c: (1X) a-b-c: (6X) a-d-e: (2X) a-f-g-a-h: (1X) We want to insure that when we see 'a', that we predict 'b' with highest confidence, then 'd', then 'f' and 'h' with equally low confidence. We expect the following prediction scores: inputPredScore_at1 : 0.7 inputPredScore_at2 : 1.0 inputPredScore_at3 : 1.0 inputPredScore_at4 : 1.0 """ from nupic.frameworks.prediction.helpers import importBaseDescription config = dict( sensorVerbosity=0, spVerbosity=0, tpVerbosity=0, ppVerbosity=2, filenameTrain = 'confidence/confidence2.csv', filenameTest = 'confidence/confidence2.csv', iterationCountTrain=None, iterationCountTest=None, trainTPRepeats = 5, trainTP=True, ) mod = importBaseDescription('../base/description.py', config) locals().update(mod.__dict__)
tkaitchuck/nupic
examples/prediction/experiments/confidenceTest/2/description.py
Python
gpl-3.0
2,040
0.005392
from flask.ext.restplus import Namespace from app.models.track import Track as TrackModel from .helpers import custom_fields as fields from .helpers.helpers import ( can_create, can_update, can_delete, requires_auth ) from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \ PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, SERVICE_RESPONSES from .helpers.utils import Resource, ETAG_HEADER_DEFN api = Namespace('tracks', description='Tracks', path='/') TRACK_SESSION = api.model('TrackSession', { 'id': fields.Integer(required=True), 'title': fields.String(), }) TRACK = api.model('Track', { 'id': fields.Integer(required=True), 'name': fields.String(required=True), 'description': fields.String(), 'color': fields.Color(required=True), 'track_image_url': fields.Upload(), 'location': fields.String(), 'sessions': fields.List(fields.Nested(TRACK_SESSION)), }) TRACK_PAGINATED = api.clone('TrackPaginated', PAGINATED_MODEL, { 'results': fields.List(fields.Nested(TRACK)) }) TRACK_POST = api.clone('TrackPost', TRACK) del TRACK_POST['id'] del TRACK_POST['sessions'] # Create DAO class TrackDAO(ServiceDAO): version_key = 'tracks_ver' DAO = TrackDAO(TrackModel, TRACK_POST) @api.route('/events/<int:event_id>/tracks/<int:track_id>') @api.doc(responses=SERVICE_RESPONSES) class Track(Resource): @api.doc('get_track') @api.header(*ETAG_HEADER_DEFN) @api.marshal_with(TRACK) def get(self, event_id, track_id): """Fetch a track given its id""" return DAO.get(event_id, track_id) @requires_auth @can_delete(DAO) @api.doc('delete_track') @api.marshal_with(TRACK) def delete(self, event_id, track_id): """Delete a track given its id""" return DAO.delete(event_id, track_id) @requires_auth @can_update(DAO) @api.doc('update_track', responses=PUT_RESPONSES) @api.marshal_with(TRACK) @api.expect(TRACK_POST) def put(self, event_id, track_id): """Update a track given its id""" return DAO.update(event_id, track_id, self.api.payload) @api.route('/events/<int:event_id>/tracks') class TrackList(Resource): @api.doc('list_tracks') @api.header(*ETAG_HEADER_DEFN) @api.marshal_list_with(TRACK) def get(self, event_id): """List all tracks""" return DAO.list(event_id) @requires_auth @can_create(DAO) @api.doc('create_track', responses=POST_RESPONSES) @api.marshal_with(TRACK) @api.expect(TRACK_POST) def post(self, event_id): """Create a track""" return DAO.create( event_id, self.api.payload, self.api.url_for(self, event_id=event_id) ) @api.route('/events/<int:event_id>/tracks/page') class TrackListPaginated(Resource, PaginatedResourceBase): @api.doc('list_tracks_paginated', params=PAGE_PARAMS) @api.header(*ETAG_HEADER_DEFN) @api.marshal_with(TRACK_PAGINATED) def get(self, event_id): """List tracks in a paginated manner""" args = self.parser.parse_args() return DAO.paginated_list(args=args, event_id=event_id)
gaeun/open-event-orga-server
app/api/tracks.py
Python
gpl-3.0
3,167
0.000316
from gpaw import GPAW from gpaw.lrtddft import LrTDDFT c = GPAW('Be_gs_8bands.gpw') dE = 10 # maximal Kohn-Sham transition energy to consider in eV lr = LrTDDFT(c, xc='LDA', energy_range=dE) lr.write('lr_dE.dat.gz')
robwarm/gpaw-symm
doc/documentation/tddft/Be_8bands_lrtddft_dE.py
Python
gpl-3.0
218
0.004587
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## """\ Test that Spack's shebang filtering works correctly. """ import os import stat import pytest import tempfile import shutil import filecmp from llnl.util.filesystem import mkdirp import spack from spack.hooks.sbang import shebang_too_long, filter_shebangs_in_directory from spack.util.executable import which short_line = "#!/this/is/short/bin/bash\n" long_line = "#!/this/" + ('x' * 200) + "/is/long\n" lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n" lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n" node_line = "#!/this/" + ('x' * 200) + "/is/node\n" node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n" sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root last_line = "last!\n" class ScriptDirectory(object): """Directory full of test scripts to run sbang instrumentation on.""" def __init__(self): self.tempdir = tempfile.mkdtemp() self.directory = os.path.join(self.tempdir, 'dir') mkdirp(self.directory) # Script with short shebang self.short_shebang = os.path.join(self.tempdir, 'short') with open(self.short_shebang, 'w') as f: f.write(short_line) f.write(last_line) # Script with long shebang self.long_shebang = os.path.join(self.tempdir, 'long') with open(self.long_shebang, 'w') as f: f.write(long_line) f.write(last_line) # Lua script with long shebang self.lua_shebang = os.path.join(self.tempdir, 'lua') with open(self.lua_shebang, 'w') as f: f.write(lua_line) f.write(last_line) # Lua script with long shebang self.lua_textbang = os.path.join(self.tempdir, 'lua_in_text') with open(self.lua_textbang, 'w') as f: f.write(short_line) f.write(lua_in_text) f.write(last_line) # Node script with long shebang self.node_shebang = os.path.join(self.tempdir, 'node') with open(self.node_shebang, 'w') as f: f.write(node_line) f.write(last_line) # Node script with long shebang self.node_textbang = os.path.join(self.tempdir, 'node_in_text') with open(self.node_textbang, 'w') as f: f.write(short_line) f.write(node_in_text) f.write(last_line) # Script already using sbang. self.has_sbang = os.path.join(self.tempdir, 'shebang') with open(self.has_sbang, 'w') as f: f.write(sbang_line) f.write(long_line) f.write(last_line) # Fake binary file. self.binary = os.path.join(self.tempdir, 'binary') tar = which('tar', required=True) tar('czf', self.binary, self.has_sbang) def destroy(self): shutil.rmtree(self.tempdir, ignore_errors=True) @pytest.fixture def script_dir(): sdir = ScriptDirectory() yield sdir sdir.destroy() def test_shebang_handling(script_dir): assert shebang_too_long(script_dir.lua_shebang) assert shebang_too_long(script_dir.long_shebang) assert not shebang_too_long(script_dir.short_shebang) assert not shebang_too_long(script_dir.has_sbang) assert not shebang_too_long(script_dir.binary) assert not shebang_too_long(script_dir.directory) filter_shebangs_in_directory(script_dir.tempdir) # Make sure this is untouched with open(script_dir.short_shebang, 'r') as f: assert f.readline() == short_line assert f.readline() == last_line # Make sure this got patched. with open(script_dir.long_shebang, 'r') as f: assert f.readline() == sbang_line assert f.readline() == long_line assert f.readline() == last_line # Make sure this got patched. with open(script_dir.lua_shebang, 'r') as f: assert f.readline() == sbang_line assert f.readline() == lua_line_patched assert f.readline() == last_line # Make sure this got patched. with open(script_dir.node_shebang, 'r') as f: assert f.readline() == sbang_line assert f.readline() == node_line_patched assert f.readline() == last_line assert filecmp.cmp(script_dir.lua_textbang, os.path.join(script_dir.tempdir, 'lua_in_text')) assert filecmp.cmp(script_dir.node_textbang, os.path.join(script_dir.tempdir, 'node_in_text')) # Make sure this is untouched with open(script_dir.has_sbang, 'r') as f: assert f.readline() == sbang_line assert f.readline() == long_line assert f.readline() == last_line def test_shebang_handles_non_writable_files(script_dir): # make a file non-writable st = os.stat(script_dir.long_shebang) not_writable_mode = st.st_mode & ~stat.S_IWRITE os.chmod(script_dir.long_shebang, not_writable_mode) test_shebang_handling(script_dir) st = os.stat(script_dir.long_shebang) assert oct(not_writable_mode) == oct(st.st_mode)
EmreAtes/spack
lib/spack/spack/test/sbang.py
Python
lgpl-2.1
6,406
0.001405
# -*- coding: utf-8 -*- import unittest from khayyam import algorithms_c as alg_c from khayyam import algorithms_pure as alg_p __author__ = 'vahid' # TODO: test with negative values class TestCAlgorithms(unittest.TestCase): def test_get_julian_day_from_gregorian(self): self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2016, 2, 30) self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2015, 2, 29) self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, 2016, 2, 30) self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, 2015, 2, 29) self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, -4713, 2, 30) self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, -4713, 2, 29) self.assertEqual( alg_c.get_julian_day_from_gregorian_date(-4713, 11, 25), alg_p.get_julian_day_from_gregorian_date(-4713, 11, 25) ) for i in range(3000): self.assertEqual( alg_c.get_julian_day_from_gregorian_date(i, 1, 1), alg_p.get_julian_day_from_gregorian_date(i, 1, 1) ) def test_is_leap_year(self): for i in range(3000): self.assertEqual( alg_c.is_jalali_leap_year(i), alg_p.is_jalali_leap_year(i) ) def test_days_in_year(self): for i in range(3000): self.assertEqual( alg_c.get_days_in_jalali_year(i), alg_p.get_days_in_jalali_year(i) ) def test_days_in_month(self): for i in range(3000): for m in range(1, 13): c = alg_c.get_days_in_jalali_month(i, m) p = alg_p.get_days_in_jalali_month(i, m) self.assertEqual(c, p, "year: %s, month: %s, results: {c: %s, py: %s}" % (i, m, c, p)) def test_julian_day_from_jalali_date(self): for y in range(303): for m in range(1, 13): for d in range(1, alg_c.get_days_in_jalali_month(y, m)+1): self.assertEqual( alg_c.get_julian_day_from_jalali_date(y, m, d), alg_p.get_julian_day_from_jalali_date(y, m, d), "year: %s, month: %s, day: %s" % (y, m, d) ) def test_jalali_date_from_julian_day(self): jd = 0 while jd < 365 * 1000: jd += 1 c = alg_c.get_jalali_date_from_julian_day(jd) p = alg_p.get_jalali_date_from_julian_day(jd) self.assertEqual(c, p, "Julian day: %s\t%s <> %s" % (jd, c, p)) def test_gregorian_date_from_julian_day(self): jd = 0 self.assertRaises(ValueError, alg_c.get_gregorian_date_from_julian_day, jd) self.assertRaises(ValueError, alg_p.get_gregorian_date_from_julian_day, jd) while jd < 365 * 200: jd += 1 self.assertEqual( alg_c.get_gregorian_date_from_julian_day(jd), alg_p.get_gregorian_date_from_julian_day(jd) ) def test_jalali_date_from_gregorian_date(self): jd = 0 while jd < 365 * 200: jd += 1 cd = alg_c.get_gregorian_date_from_julian_day(jd) pd = alg_p.get_gregorian_date_from_julian_day(jd) c = alg_c.get_jalali_date_from_gregorian_date(*cd) p = alg_p.get_jalali_date_from_gregorian_date(*pd) self.assertEqual(c, p, 'jd: %s c: %s py: %s cdate: %s pydate: %s' % (jd, c, p, cd, pd)) def test_algorithm_import(self): from khayyam import algorithms self.assertTrue(hasattr(algorithms, 'is_jalali_leap_year')) self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_year')) self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_month')) self.assertTrue(hasattr(algorithms, 'get_julian_day_from_gregorian_date')) self.assertTrue(hasattr(algorithms, 'get_julian_day_from_jalali_date')) self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_julian_day')) self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_gregorian_date')) self.assertTrue(hasattr(algorithms, 'get_gregorian_date_from_julian_day')) if __name__ == '__main__': # pragma: no cover unittest.main()
pylover/khayyam
khayyam/tests/test_algorithms.py
Python
gpl-3.0
4,401
0.002954
import pynamics import numpy import logging logger = logging.getLogger('pynamics.integration') def integrate(*args,**kwargs): if pynamics.integrator==0: return integrate_odeint(*args,**kwargs) elif pynamics.integrator==1: newargs = args[0],args[2][0],args[1],args[2][-1] return integrate_rk(*newargs ,**kwargs) def integrate_odeint(*arguments,**keyword_arguments): import scipy.integrate logger.info('beginning integration') result = scipy.integrate.odeint(*arguments,**keyword_arguments) logger.info('finished integration') return result def integrate_rk(*arguments,**keyword_arguments): import scipy.integrate logger.info('beginning integration') try: result = scipy.integrate.RK45(*arguments,**keyword_arguments) y = [result.y] while True: result.step() y.append(result.y) except RuntimeError: pass logger.info('finished integration') return y
idealabasu/code_pynamics
python/pynamics/integration.py
Python
mit
1,030
0.017476
from .nes import Nes from .bus.devices.cartridge import CartridgeFactory
Hexadorsimal/pynes
nes/__init__.py
Python
mit
73
0
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Flowroute LLC # Written by Matthew Williams <[email protected]> # Based on yum module written by Seth Vidal <skvidal at fedoraproject.org> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: apt short_description: Manages apt-packages description: - Manages I(apt) packages (such as for Debian/Ubuntu). version_added: "0.0.2" options: name: description: - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding) required: false default: null aliases: [ 'pkg', 'package' ] state: description: - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. required: false default: present choices: [ "latest", "absent", "present", "build-dep" ] update_cache: description: - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. required: false default: no choices: [ "yes", "no" ] cache_valid_time: description: - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds. required: false default: 0 purge: description: - Will force purging of configuration files if the module state is set to I(absent). required: false default: no choices: [ "yes", "no" ] default_release: description: - Corresponds to the C(-t) option for I(apt) and sets pin priorities required: false default: null install_recommends: description: - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false default: null choices: [ "yes", "no" ] force: description: - If C(yes), force installs/removes. required: false default: "no" choices: [ "yes", "no" ] allow_unauthenticated: description: - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup. required: false default: "no" choices: [ "yes", "no" ] version_added: "2.1" upgrade: description: - 'If yes or safe, performs an aptitude safe-upgrade.' - 'If full, performs an aptitude full-upgrade.' - 'If dist, performs an apt-get dist-upgrade.' - 'Note: This does not upgrade a specific package, use state=latest for that.' version_added: "1.1" required: false default: "no" choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - Options should be supplied as comma separated list required: false default: 'force-confdef,force-confold' deb: description: - Path to a .deb package on the remote machine. - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1) required: false version_added: "1.6" autoremove: description: - If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option. required: false default: no choices: [ "yes", "no" ] aliases: [ 'autoclean'] version_added: "2.1" only_upgrade: description: - Only upgrade a package if it is already installed. required: false default: false version_added: "2.1" requirements: - python-apt (python 2) - python3-apt (python 3) - aptitude author: "Matthew Williams (@mgwilliams)" notes: - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise C(apt-get) suffices. ''' EXAMPLES = ''' - name: Update repositories cache and install "foo" package apt: name: foo update_cache: yes - name: Remove "foo" package apt: name: foo state: absent - name: Install the package "foo" apt: name: foo state: present - name: Install the version '1.00' of package "foo" apt: name: foo=1.00 state: present - name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport apt: name: nginx state: latest default_release: squeeze-backports update_cache: yes - name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends" apt: name: openjdk-6-jdk state: latest install_recommends: no - name: Update all packages to the latest version apt: upgrade: dist - name: Run the equivalent of "apt-get update" as a separate step apt: update_cache: yes - name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago apt: update_cache: yes cache_valid_time: 3600 - name: Pass options to dpkg on run apt: upgrade: dist update_cache: yes dpkg_options: 'force-confold,force-confdef' - name: Install a .deb package apt: deb: /tmp/mypackage.deb - name: Install the build dependencies for package "foo" apt: pkg: foo state: build-dep - name: Install a .deb package from the internet. apt: deb: https://example.com/python-ppq_0.1-1_all.deb ''' RETURN = ''' cache_updated: description: if the cache was updated or not returned: success, in some cases type: boolean sample: True cache_update_time: description: time of the last cache update (0 if unknown) returned: success, in some cases type: int sample: 1425828348000 stdout: description: output from apt returned: success, when needed type: string sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..." stderr: description: error output from apt returned: success, when needed type: string sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." ''' # NOQA # added to stave off future warnings about apt api import warnings warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import datetime import fnmatch import itertools import os import re import sys import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils._text import to_bytes, to_native from ansible.module_utils.urls import fetch_url # APT related constants APT_ENV_VARS = dict( DEBIAN_FRONTEND = 'noninteractive', DEBIAN_PRIORITY = 'critical', # We screenscrape apt-get and aptitude output for information so we need # to make sure we use the C locale when running commands LANG = 'C', LC_ALL = 'C', LC_MESSAGES = 'C', LC_CTYPE = 'C', ) DPKG_OPTIONS = 'force-confdef,force-confold' APT_GET_ZERO = "\n0 upgraded, 0 newly installed" APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed" APT_LISTS_PATH = "/var/lib/apt/lists" APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" HAS_PYTHON_APT = True try: import apt import apt.debfile import apt_pkg except ImportError: HAS_PYTHON_APT = False if sys.version_info[0] < 3: PYTHON_APT = 'python-apt' else: PYTHON_APT = 'python3-apt' def package_split(pkgspec): parts = pkgspec.split('=', 1) if len(parts) > 1: return parts[0], parts[1] else: return parts[0], None def package_versions(pkgname, pkg, pkg_cache): try: versions = set(p.version for p in pkg.versions) except AttributeError: # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) pkg_versions = (p.VersionList for p in pkg_cache_list) versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) return versions def package_version_compare(version, other_version): try: return apt_pkg.version_compare(version, other_version) except AttributeError: return apt_pkg.VersionCompare(version, other_version) def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the # low-level apt_pkg.Package object which contains # state fields not directly accessible from the # higher-level apt.package.Package object. pkg = cache[pkgname] ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: is_installed = False upgradable = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True return is_installed, upgradable, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages # mark as upgradable and let apt-get install deal with it return False, True, False else: return False, False, False try: has_files = len(pkg.installed_files) > 0 except UnicodeDecodeError: has_files = True except AttributeError: has_files = False # older python-apt cannot be used to determine non-purged try: package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED except AttributeError: # python-apt 0.7.X has very weak low-level object try: # might not be necessary as python-apt post-0.7.X should have current_state property package_is_installed = pkg.is_installed except AttributeError: # assume older version of python-apt is installed package_is_installed = pkg.isInstalled if version: versions = package_versions(pkgname, pkg, cache._cache) avail_upgrades = fnmatch.filter(versions, version) if package_is_installed: try: installed_version = pkg.installed.version except AttributeError: installed_version = pkg.installedVersion # Only claim the package is installed if the version is matched as well package_is_installed = fnmatch.fnmatch(installed_version, version) # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: if package_version_compare(candidate, installed_version) > 0: package_is_upgradable = True break else: package_is_upgradable = bool(avail_upgrades) else: try: package_is_upgradable = pkg.is_upgradable except AttributeError: # assume older version of python-apt is installed package_is_upgradable = pkg.isUpgradable return package_is_installed, package_is_upgradable, has_files def expand_dpkg_options(dpkg_options_compressed): options_list = dpkg_options_compressed.split(',') dpkg_options = "" for dpkg_option in options_list: dpkg_options = '%s -o "Dpkg::Options::=--%s"' \ % (dpkg_options, dpkg_option) return dpkg_options.strip() def expand_pkgspec_from_fnmatches(m, pkgspec, cache): # Note: apt-get does implicit regex matching when an exact package name # match is not found. Something like this: # matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)] # (Should also deal with the ':' for multiarch like the fnmatch code below) # # We have decided not to do similar implicit regex matching but might take # a PR to add some sort of explicit regex matching: # https://github.com/ansible/ansible-modules-core/issues/1258 new_pkgspec = [] if pkgspec: for pkgspec_pattern in pkgspec: pkgname_pattern, version = package_split(pkgspec_pattern) # note that none of these chars is allowed in a (debian) pkgname if frozenset('*?[]!').intersection(pkgname_pattern): # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work if ":" not in pkgname_pattern: # Filter the multiarch packages from the cache only once try: pkg_name_cache = _non_multiarch except NameError: pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841 else: # Create a cache of pkg_names including multiarch only once try: pkg_name_cache = _all_pkg_names except NameError: pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841 matches = fnmatch.filter(pkg_name_cache, pkgname_pattern) if len(matches) == 0: m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern)) else: new_pkgspec.extend(matches) else: # No wildcards in name new_pkgspec.append(pkgspec_pattern) return new_pkgspec def parse_diff(output): diff = to_native(output).splitlines() try: # check for start marker from aptitude diff_start = diff.index('Resolving dependencies...') except ValueError: try: # check for start marker from apt-get diff_start = diff.index('Reading state information...') except ValueError: # show everything diff_start = -1 try: # check for end marker line from both apt-get and aptitude diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item)) except StopIteration: diff_end = len(diff) diff_start += 1 diff_end += 1 return {'prepared': '\n'.join(diff[diff_start:diff_end])} def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, autoremove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if not installed or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if len(packages) != 0: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if build_dep: cmd = "%s -y %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, autoremove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} if rc: return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)) else: return (True, dict(changed=True, stdout=out, stderr=err, diff=diff)) else: return (True, dict(changed=False)) def get_field_of_deb(m, deb_file, field="Version"): cmd_dpkg = m.get_bin_path("dpkg", True) cmd = cmd_dpkg + " --field %s %s" % (deb_file, field) rc, stdout, stderr = m.run_command(cmd) if rc != 0: m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) return to_native(stdout).strip('\n') def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options): changed=False deps_to_install = [] pkgs_to_install = [] for deb_file in debs.split(','): try: pkg = apt.debfile.DebPackage(deb_file) pkg_name = get_field_of_deb(m, deb_file, "Package") pkg_version = get_field_of_deb(m, deb_file, "Version") try: installed_pkg = apt.Cache()[pkg_name] installed_version = installed_pkg.installed.version if package_version_compare(pkg_version, installed_version) == 0: # Does not need to down-/upgrade, move on to next package continue except Exception: # Must not be installed, continue with installation pass # Check if package is installable if not pkg.check() and not force: m.fail_json(msg=pkg._failure_string) # add any missing deps to the list of deps we need # to install so they're all done in one shot deps_to_install.extend(pkg.missing_deps) except Exception: e = get_exception() m.fail_json(msg="Unable to install package: %s" % str(e)) # and add this deb to the list of packages to install pkgs_to_install.append(deb_file) # install the deps through apt retvals = {} if len(deps_to_install) > 0: (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache, install_recommends=install_recommends, dpkg_options=expand_dpkg_options(dpkg_options)) if not success: m.fail_json(**retvals) changed = retvals.get('changed', False) if len(pkgs_to_install) > 0: options = ' '.join(["--%s"% x for x in dpkg_options.split(",")]) if m.check_mode: options += " --simulate" if force: options += " --force-all" cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) rc, out, err = m.run_command(cmd) if "stdout" in retvals: stdout = retvals["stdout"] + out else: stdout = out if "diff" in retvals: diff = retvals["diff"] if 'prepared' in diff: diff['prepared'] += '\n\n' + out else: diff = parse_diff(out) if "stderr" in retvals: stderr = retvals["stderr"] + err else: stderr = err if rc == 0: m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff) else: m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) else: m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''), diff=retvals.get('diff', '')) def remove(m, pkgspec, cache, purge=False, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False): pkg_list = [] pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') if installed or (has_files and purge): pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if len(packages) == 0: m.exit_json(changed=False) else: if force: force_yes = '--force-yes' else: force_yes = '' if purge: purge = '--purge' else: purge = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes ,autoremove, check_arg, packages) rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} if rc: m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc) m.exit_json(changed=True, stdout=out, stderr=err, diff=diff) def upgrade(m, mode="yes", force=False, default_release=None, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): if m.check_mode: check_arg = '--simulate' else: check_arg = '' apt_cmd = None prompt_regex = None if mode == "dist": # apt-get dist-upgrade apt_cmd = APT_GET_CMD upgrade_command = "dist-upgrade" elif mode == "full": # aptitude full-upgrade apt_cmd = APTITUDE_CMD upgrade_command = "full-upgrade" else: # aptitude safe-upgrade # mode=yes # default apt_cmd = APTITUDE_CMD upgrade_command = "safe-upgrade" prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])" if force: if apt_cmd == APT_GET_CMD: force_yes = '--force-yes' else: force_yes = '--assume-yes --allow-untrusted' else: force_yes = '' apt_cmd_path = m.get_bin_path(apt_cmd, required=True) cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, check_arg, upgrade_command) if default_release: cmd += " -t '%s'" % (default_release,) rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex) if m._diff: diff = parse_diff(out) else: diff = {} if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc) if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): m.exit_json(changed=False, msg=out, stdout=out, stderr=err) m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff) def download(module, deb): tempdir = os.path.dirname(__file__) package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1])) # When downloading a deb, how much of the deb to download before # saving to a tempfile (64k) BUFSIZE = 65536 try: rsp, info = fetch_url(module, deb, method='GET') if info['status'] != 200: module.fail_json(msg="Failed to download %s, %s" % (deb, info['msg'])) # Ensure file is open in binary mode for Python 3 f = open(package, 'wb') # Read 1kb at a time to save on ram while True: data = rsp.read(BUFSIZE) data = to_bytes(data, errors='surrogate_or_strict') if len(data) < 1: break # End of file, break while loop f.write(data) f.close() deb = package except Exception: e = get_exception() module.fail_json(msg="Failure downloading %s, %s" % (deb, e)) return deb def get_cache_mtime(): """Return mtime of a valid apt cache file. Stat the apt cache file and if no cache file is found return 0 :returns: ``int`` """ if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH): return os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime elif os.path.exists(APT_LISTS_PATH): return os.stat(APT_LISTS_PATH).st_mtime else: return 0 def get_updated_cache_time(): """Return the mtime time stamp and the updated cache time. Always retrieve the mtime of the apt cache or set the `cache_mtime` variable to 0 :returns: ``tuple`` """ cache_mtime = get_cache_mtime() mtimestamp = datetime.datetime.fromtimestamp(cache_mtime) updated_cache_time = int(time.mktime(mtimestamp.timetuple())) return mtimestamp, updated_cache_time # https://github.com/ansible/ansible-modules-core/issues/2951 def get_cache(module): '''Attempt to get the cache object and update till it works''' cache = None try: cache = apt.Cache() except SystemError: e = get_exception() if '/var/lib/apt/lists/' in str(e).lower(): # update cache until files are fixed or retries exceeded retries = 0 while retries < 2: (rc, so, se) = module.run_command(['apt-get', 'update', '-q']) retries += 1 if rc == 0: break if rc != 0: module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (str(e), str(so) + str(se)), rc=rc) # try again cache = apt.Cache() else: module.fail_json(msg=str(e)) return cache def main(): module = AnsibleModule( argument_spec = dict( state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']), update_cache = dict(aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int', default=0), purge = dict(default=False, type='bool'), package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None, type='path'), default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS), autoremove = dict(type='bool', aliases=['autoclean']), only_upgrade = dict(type='bool', default=False), allow_unauthenticated = dict(default='no', aliases=['allow-unauthenticated'], type='bool'), ), mutually_exclusive = [['package', 'upgrade', 'deb']], required_one_of = [['package', 'upgrade', 'update_cache', 'deb', 'autoremove']], supports_check_mode = True ) module.run_command_environ_update = APT_ENV_VARS if not HAS_PYTHON_APT: if module.check_mode: module.fail_json(msg="%s must be installed to use check mode. " "If run normally this module can auto-install it." % PYTHON_APT) try: module.run_command(['apt-get', 'update'], check_rc=True) module.run_command(['apt-get', 'install', PYTHON_APT, '-y', '-q'], check_rc=True) global apt, apt_pkg import apt import apt.debfile import apt_pkg except ImportError: module.fail_json(msg="Could not import python modules: apt, apt_pkg. " "Please install %s package." % PYTHON_APT) global APTITUDE_CMD APTITUDE_CMD = module.get_bin_path("aptitude", False) global APT_GET_CMD APT_GET_CMD = module.get_bin_path("apt-get") p = module.params if p['upgrade'] == 'no': p['upgrade'] = None if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") updated_cache = False updated_cache_time = 0 install_recommends = p['install_recommends'] allow_unauthenticated = p['allow_unauthenticated'] dpkg_options = expand_dpkg_options(p['dpkg_options']) autoremove = p['autoremove'] # Deal with deprecated aliases if p['state'] == 'installed': p['state'] = 'present' if p['state'] == 'removed': p['state'] = 'absent' # Get the cache object cache = get_cache(module) try: if p['default_release']: try: apt_pkg.config['APT::Default-Release'] = p['default_release'] except AttributeError: apt_pkg.Config['APT::Default-Release'] = p['default_release'] # reopen cache w/ modified config cache.open(progress=None) mtimestamp, updated_cache_time = get_updated_cache_time() # Cache valid time is default 0, which will update the cache if # needed and `update_cache` was set to true updated_cache = False if p['update_cache']: now = datetime.datetime.now() tdelta = datetime.timedelta(seconds=p['cache_valid_time']) if not mtimestamp + tdelta >= now: # Retry to update the cache up to 3 times for retry in range(3): try: cache.update() break except apt.cache.FetchFailedException: pass else: module.fail_json(msg='Failed to update apt cache.') cache.open(progress=None) updated_cache = True mtimestamp, updated_cache_time = get_updated_cache_time() # If there is nothing else to do exit. This will set state as # changed based on if the cache was updated. if not p['package'] and not p['upgrade'] and not p['deb']: module.exit_json( changed=updated_cache, cache_updated=updated_cache, cache_update_time=updated_cache_time ) force_yes = p['force'] if p['upgrade']: upgrade(module, p['upgrade'], force_yes, p['default_release'], dpkg_options) if p['deb']: if p['state'] != 'present': module.fail_json(msg="deb only supports state=present") if '://' in p['deb']: p['deb'] = download(module, p['deb']) install_deb(module, p['deb'], cache, install_recommends=install_recommends, allow_unauthenticated=allow_unauthenticated, force=force_yes, dpkg_options=p['dpkg_options']) packages = p['package'] latest = p['state'] == 'latest' if packages: for package in packages: if package.count('=') > 1: module.fail_json(msg="invalid package spec: %s" % package) if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if p['state'] in ('latest', 'present', 'build-dep'): state_upgrade = False state_builddep = False if p['state'] == 'latest': state_upgrade = True if p['state'] == 'build-dep': state_builddep = True success, retvals = install( module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep, autoremove=autoremove, only_upgrade=p['only_upgrade'], allow_unauthenticated=allow_unauthenticated ) # Store if the cache has been updated retvals['cache_updated'] = updated_cache # Store when the update time was last retvals['cache_update_time'] = updated_cache_time if success: module.exit_json(**retvals) else: module.fail_json(**retvals) elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove) except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") except apt.cache.FetchFailedException: module.fail_json(msg="Could not fetch updated apt files") if __name__ == "__main__": main()
gptech/ansible
lib/ansible/modules/packaging/os/apt.py
Python
gpl-3.0
35,672
0.003644