text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os
# We'll render HTML templates and access data sent by POST
# using the request object from flask. Redirect and url_for
# will be used to redirect the user once the upload is done
# and send_from_directory will help us to send/show on the
# browser the file that the user just uploaded
from flask import Flask, render_template, request, flash, redirect, url_for, send_from_directory
from app import app
from werkzeug.utils import secure_filename
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/', methods=['GET','POST'])
@app.route('/index', methods=['GET','POST'])
def index():
if request.method == 'POST':
# check if the post request has the file part
filename = []
for upfile in ['filewohr','filewhr']:
if upfile not in request.files:
flash('No file part')
return redirect(request.url)
# Get the name of the uploaded file
file = request.files[upfile]
# if user does not select file, browser also
# submits a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename.append(secure_filename(file.filename))
# Move the file form the temporary folder to the upload folder
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename[-1]))
else:
flash('Not allowed file')
return redirect(request.url)
# Render the file template
return render_template('file.html',
folder = app.config['UPLOAD_FOLDER'],
filenamewohr = filename[0],
filenamewhr = filename[1],
scroll = 'results')
return render_template('index.html')
| davidbradway/fusefit | webui/app/views.py | Python | mit | 2,160 | 0.008333 |
import time
from lib.DomainObjects.EyeosCard import EyeosCard
from lib.Errors.EyeosAPIError import EyeosAPIError
from lib.EyeosApi.EyeosApiCall import EyeosApiCall
from lib.Settings import Settings
from lib.Wrappers.Logger import Logger
class Logout:
def __init__(self, injected_proxy_ip=None, injected_eyeos_api_call=None):
self.settings = Settings().getSettings()
self.proxy_ip = injected_proxy_ip or self.settings['general']['public_hostname']
self.logger = Logger(__name__)
self.eyeos_api_call = injected_eyeos_api_call or EyeosApiCall()
def logout(self, card):
self.logger.info("Retrieving a valid card...")
data = {
'timestamp': int(time.time())
}
logout_url = "https://{0}/relay/presence/v1/routingKey/logout/userEvent/logout".format(self.proxy_ip)
self.logger.debug('POST request to: {0}'.format(logout_url))
req = self.eyeos_api_call.post(logout_url, verify=False, data=data, card=card)
if req.status_code != 200:
raise ValueError("Error logging out with user")
| Open365/Open365 | lib/EyeosApi/Logout.py | Python | agpl-3.0 | 1,097 | 0.002735 |
# LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.186
# Contact: [email protected]
import tkinter
from tkinter import messagebox # eliminate
from view.pretty_names import prettify # eliminate
from functools import partial
import shelve
import copy
from .core_data import CoreData
from .recipes import restr_keys
from .pulp2dim import *
from pulp import *
import time
solver = GLPK(msg=0)
#solver = PULP_CBC_CMD(msg=0)
#solver = None
# Based on https://scaron.info/blog/linear-programming-in-python-with-cvxopt.html,
# it seems the glpk solver provided by cvxopt is much faster than the one provided by pulp.
# Unfortunately I can't get cvxopt to run on Windows 7 with Python 3.6, so I'm sticking with
# pulp for now.
class LpRecipeProblem(LpProblem):
def __init__(self, name, max_or_min, core_data):
'''Basic LP problem constraints that always hold'''
#super().__init__()
LpProblem.__init__(self, name, max_or_min)
#CoreData.__init__(self)
self.ingredient_dict = core_data.ingredient_dict
self.oxide_dict = core_data.oxide_dict
self.other_dict = core_data.other_dict
self.ingredient_analyses = core_data.ingredient_analyses
self.other_attr_dict = core_data.other_attr_dict
self.lp_var = {} # self.lp_var is a dictionary for the variables in the linear programming problem
# Create variables used to normalize:
for total in ['ingredient_total', 'fluxes_total', 'ox_mass_total', 'ox_mole_total']:
self.lp_var[total] = pulp.LpVariable(total, 0, None, pulp.LpContinuous)
for ox in self.oxide_dict:
self.lp_var['mole_'+ox] = pulp.LpVariable('mole_'+ox, 0, None, pulp.LpContinuous)
self.lp_var['mass_'+ox] = pulp.LpVariable('mass_'+ox, 0, None, pulp.LpContinuous)
# Relate mole percent and unity:
self += self.lp_var['mole_'+ox] * self.oxide_dict[ox].molar_mass == self.lp_var['mass_'+ox]
self += self.lp_var['fluxes_total'] == sum(self.oxide_dict[ox].flux * self.lp_var['mole_'+ox] for ox in self.oxide_dict)
self += self.lp_var['ox_mass_total'] == sum(self.lp_var['mass_'+ox] for ox in self.oxide_dict)
self += self.lp_var['ox_mole_total'] == sum(self.lp_var['mole_'+ox] for ox in self.oxide_dict)
for i in self.other_attr_dict:
self.lp_var['other_attr_'+i] = pulp.LpVariable('other_attr_'+i, 0, None, pulp.LpContinuous)
# May move the next section out of __init__
for index in self.ingredient_dict:
ing = 'ingredient_'+index
self.lp_var[ing] = pulp.LpVariable(ing, 0, None, pulp.LpContinuous)
# Relate ingredients, oxides and other attributes:
self.update_ingredient_analyses()
self += self.lp_var['ingredient_total'] == sum(self.lp_var['ingredient_'+index] for index in self.ingredient_dict), 'ing_total'
for index in self.other_dict:
ot = 'other_'+index
coefs = self.other_dict[index].numerator_coefs
linear_combo = [(self.lp_var[key], coefs[key]) for key in coefs]
self.lp_var[ot] = pulp.LpVariable(ot, 0, None, pulp.LpContinuous)
# Relate this variable to the other variables:
self += self.lp_var[ot] == LpAffineExpression(linear_combo), ot
def update_ingredient_analyses(self):
"To be run when the composition of any ingredient is changed. May be better to do this for a specific ingredient"
for ox in self.oxide_dict:
self.constraints[ox] = sum(self.ingredient_analyses[j][ox] * self.lp_var['ingredient_'+j]/100 \
for j in self.ingredient_dict if ox in self.ingredient_analyses[j]) \
== self.lp_var['mass_'+ox]
for i in self.other_attr_dict:
self.constraints['other_attr_'+i] = sum(self.ingredient_dict[j].other_attributes[i] * self.lp_var['ingredient_'+j]/100 \
for j in self.ingredient_dict if i in self.ingredient_dict[j].other_attributes) \
== self.lp_var['other_attr_'+i]
def remove_ingredient(self, i, core_data):
try:
core_data.remove_ingredient(i)
except:
pass
## self._variables.remove(self.lp_var['ingredient_'+i])
# The commented-out line above doesn't work in general since self.lp_var['ingredient_'+i] is regarded as
# being equal to all entries of self._variables, so it removes the first entry. Instead, we need to use 'is'.
for k, j in enumerate(self._variables):
if j is self.lp_var['ingredient_'+i]:
del self._variables[k]
try:
del self.constraints['ingredient_'+i+'_lower'] # Is this necessary?
del self.constraints['ingredient_'+i+'_upper'] # Is this necessary?
except:
pass
self.constraints['ing_total'] = self.lp_var['ingredient_total'] == \
sum(self.lp_var['ingredient_'+j] for j in self.ingredient_dict)
self.update_ingredient_analyses()
def add_ingredient(self, i, core_data):
pass
def update_other_restrictions(self):
"To be run when CoreData.other_dict is changed. May be better to do this for a specific other restriction"
for i in self.other_dict:
ot = 'other_'+i
coefs = self.other_dict[i].numerator_coefs
linear_combo = [(self.lp_var[key], coefs[key]) for key in coefs]
self.constraints[ot] = self.lp_var[ot] == LpAffineExpression(linear_combo)
def remove_other_restriction(self, i, core_data):
try:
core_data.remove_other_restriction(i)
except:
pass
## self._variables.remove(self.lp_var['other_'+i])
# The commented-out line above doesn't work in general since self.lp_var['other_'+i] is regarded as
# being equal to all entries of self._variables, so it removes the first entry. Instead, we need to use 'is'.
ot = 'other_'+i
try:
del self.constraints[ot]
except:
pass
for k, j in enumerate(self._variables):
if j is self.lp_var[ot]:
del self._variables[k]
try:
del self.constraints[ot+'_lower'] # Is this necessary?
del self.constraints[ot+'_upper'] # Is this necessary?
except:
pass
##Proposed rearrangement: Move variables and constraints relating to ingredients and other restrictions
##from LpRecipeProblem.__init__ to LpRecipeProblem.calc_restrictions.
##Add default bounds to recipe initialization
##Change format of normalizations
def calc_restrictions(self, recipe, restr_dict): # first update recipe.
# Should be able to construct a reduced restr_dict from recipe
t0 = time.process_time()
# First, test for obvious errors
if sum(self.oxide_dict[ox].flux for ox in recipe.oxides) == 0:
messagebox.showerror(" ", 'No flux! You have to give a flux.')
return
# Run tests to see if the denominators of other restrictions are identically zero?
for key in recipe.restriction_keys:
if recipe.lower_bounds[key] > recipe.upper_bounds[key]:
res = restr_dict[key]
messagebox.showerror(" ", 'Incompatible ' + print_res_type(res.normalization) + 'bounds on ' + prettify(res.name))
return
delta = 0.1**9
selected_fluxes = recipe.fluxes()
sum_UMF_low = sum(recipe.lower_bounds['umf_'+ox] for ox in selected_fluxes)
if sum_UMF_low > 1 + delta:
messagebox.showerror(" ", 'The sum of the UMF flux lower bounds is '+str(sum_UMF_low)
+'. It should be at most 1. Decrease one of the lower bounds by '+str(sum_UMF_low-1)
+' or more.') #will be a problem if they're all < sum_UMF_low-1))
return
sum_UMF_upp = sum(recipe.upper_bounds['umf_'+ox] for ox in selected_fluxes)
if sum_UMF_upp < 1 - delta:
messagebox.showerror(" ", 'The sum of the UMF flux upper bounds is '+str(sum_UMF_upp)
+'. It should be at least 1. Increase one of the upper bounds by '+str(1-sum_UMF_low)
+' or more.')
return
for t in ['mass_perc_', 'mole_perc_']:
sum_t_low = sum(recipe.lower_bounds[t+ox] for ox in recipe.oxides)
if sum_t_low > 100 + delta:
messagebox.showerror(" ", 'The sum of the ' + prettify(t) + ' lower bounds is '+str(sum_t_low)
+'. It should be at most 100. Decrease one of the lower bounds by '+str(sum_t_low-100)
+' or more.') #will be a problem if they're all < sum_t_low-100)
return
sum_t_upp = sum(recipe.upper_bounds[t+ox] for ox in recipe.oxides)
if sum_t_upp < 100 - delta:
messagebox.showerror(" ", 'The sum of the ' + prettify(t) + ' upper bounds is '+str(sum_t_upp)
+'. It should be at least 100. Increase one of the upper bounds by '+str(100-sum_t_upp)
+' or more.')
return
sum_ing_low = sum(recipe.lower_bounds['ingredient_'+index] for index in recipe.ingredients)
if sum_ing_low > 100 + delta:
messagebox.showerror(" ", 'The sum of the ingredient lower bounds is '+str(sum_ing_low)
+'. It should be at most 100. Decrease one of the lower bounds by '+str(sum_ing_low-100)
+' or more.') #will be a problem if they're all < sum_ing_low-100)
return
sum_ing_upp = sum(recipe.upper_bounds['ingredient_'+index] for index in recipe.ingredients)
if sum_ing_upp < 100 - delta:
messagebox.showerror(" ", 'The sum of the ingredient upper bounds is '+str(sum_ing_upp)
+'. It should be at least 100. Increase one of the upper bounds by '+str(100-sum_ing_upp)
+' or more.')
return
#t0 = time.process_time()
for index in self.ingredient_dict:
ing = 'ingredient_'+index
if index in recipe.ingredients:
ing_low = 0.01*recipe.lower_bounds[ing]
ing_upp = 0.01*recipe.upper_bounds[ing]
else:
ing_low = 0
ing_upp = 0
self.constraints[ing+'_lower'] = self.lp_var[ing] >= ing_low*self.lp_var['ingredient_total'] # ingredient lower bounds
self.constraints[ing+'_upper'] = self.lp_var[ing] <= ing_upp*self.lp_var['ingredient_total'] # ingredient upper bounds
t1 = time.process_time() # The next section takes a while, perhaps because the dictionary self.lp_var is long.
# May be better to split it.
for ox in self.oxide_dict:
if ox in recipe.oxides:
self.constraints[ox+'_umf_lower'] = self.lp_var['mole_'+ox] >= recipe.lower_bounds['umf_'+ox]*self.lp_var['fluxes_total'] # oxide UMF lower bounds
self.constraints[ox+'_umf_upper'] = self.lp_var['mole_'+ox] <= recipe.upper_bounds['umf_'+ox]*self.lp_var['fluxes_total'] # oxide UMF upper bounds
self.constraints[ox+'_wt_%_lower'] = self.lp_var['mass_'+ox] >= 0.01*recipe.lower_bounds['mass_perc_'+ox]*self.lp_var['ox_mass_total'] # oxide weight % lower bounds
self.constraints[ox+'_wt_%_upper'] = self.lp_var['mass_'+ox] <= 0.01*recipe.upper_bounds['mass_perc_'+ox]*self.lp_var['ox_mass_total'] # oxide weight % upper bounds
self.constraints[ox+'_mol_%_lower'] = self.lp_var['mole_'+ox] >= 0.01*recipe.lower_bounds['mole_perc_'+ox]*self.lp_var['ox_mole_total'] # oxide mol % lower bounds
self.constraints[ox+'_mol_%_upper'] = self.lp_var['mole_'+ox] <= 0.01*recipe.upper_bounds['mole_perc_'+ox]*self.lp_var['ox_mole_total'] # oxide mol % upper bounds
else:
try:
del self.constraints[ox+'_umf_lower']
del self.constraints[ox+'_umf_upper']
del self.constraints[ox+'_wt_%_lower']
del self.constraints[ox+'_wt_%_upper']
del self.constraints[ox+'_mol_%_lower']
del self.constraints[ox+'_mol_%_upper']
except:
pass
## if 'KNaO' in self.oxides:
## prob += self.lp_var['KNaO_umf'] == self.lp_var['K2O_umf'] + self.lp_var['Na2O_umf']
## prob += self.lp_var['KNaO_wt_%'] == lp_var['K2O_wt_%'] + lp_var['Na2O_wt_%']
for index in self.other_dict:
if index in recipe.other:
other_norm = self.linear_combination(self.other_dict[index].normalization)
self.constraints['other_'+index+'_lower'] = self.lp_var['other_'+index] >= recipe.lower_bounds['other_'+index]*other_norm # lower bound
self.constraints['other_'+index+'_upper'] = self.lp_var['other_'+index] <= recipe.upper_bounds['other_'+index]*other_norm # upper bound
else:
try:
del self.constraints['other_'+index+'_lower']
del self.constraints['other_'+index+'_upper']
except:
pass
# Finally, we're ready to calculate the upper and lower bounds imposed on all the variables
calc_bounds = {-1:{}, 1:{}}
for key in recipe.restriction_keys:
res = restr_dict[key]
norm = self.linear_combination(res.normalization)
self.constraints['normalization'] = norm == 1 # Apply the normalization of the restriction in question
# Apparently this doesn't slow things down a whole lot
for eps in [1, -1]: # calculate lower and upper bounds.
self += eps*self.lp_var[res.objective_func], res.name
self.writeLP('constraints.lp')
self.solve(solver)
if self.status == 1:
calc_bounds[eps][key] = eps*pulp.value(self.objective)
#prob.writeLP('constraints.lp')
else:
messagebox.showerror(" ", LpStatus[self.status])
self.writeLP('constraints.lp')
return
t2 = time.process_time()
#print(t2 - t0)
return {'lower':calc_bounds[-1], 'upper':calc_bounds[1]}
def calc_2d_projection(self, recipe, restr_dict): # This is designed to be run when only the x and y variables have changed; it does not take
# into account changes to upper and lower bounds. It should be possible to detect when the
# user has clicked in one of the entry boxes since the last time calc_restrictions was run,
# and give a warning in this case. Something like, if you have changed any bounds, click
# 'Calculate restrictions' to apply them.
if len(recipe.variables) == 2:
x_var = restr_dict[recipe.variables['x']]
y_var = restr_dict[recipe.variables['y']]
x_norm = self.linear_combination(x_var.normalization)
y_norm = self.linear_combination(y_var.normalization)
vertices = self.two_dim_projection(self.lp_var[x_var.objective_func], self.lp_var[y_var.objective_func], x_norm, y_norm, solver) # defined in pulp2dim file
return vertices
else:
print("Select two variables first")
def linear_combination(self, coefs):
"""Returns a linear combination of lp_vars, based on the dictionary coefs"""
linear_combo = [(self.lp_var[key], val) for key, val in coefs.items() if val != 0]
return LpAffineExpression(linear_combo)
| PieterMostert/Lipgloss | model/lipgloss/lp_recipe_problem.py | Python | gpl-3.0 | 17,317 | 0.009875 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.client import OFPClient
from nova import flags
from nova import log as logging
from nova.network import linux_net
from nova.openstack.common import cfg
from nova import utils
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule(
'FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
| savi-dev/quantum | quantum/plugins/ryu/nova/linux_net.py | Python | apache-2.0 | 2,805 | 0 |
class Operation:
"""
Operation, single operation within execution plan.
"""
def __init__(self, name, args=None):
"""
Create a new operation.
Args:
name: string that represents the name of the operation
args: operation arguments
"""
self.name = name
self.args = args
self.children = []
def append_child(self, child):
if not isinstance(child, Operation) or self is child:
raise Exception("child must be Operation")
self.children.append(child)
return self
def child_count(self):
return len(self.children)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Operation):
return False
return (self.name == o.name and self.args == o.args)
def __str__(self) -> str:
args_str = "" if self.args is None else f" | {self.args}"
return f"{self.name}{args_str}"
class ExecutionPlan:
"""
ExecutionPlan, collection of operations.
"""
def __init__(self, plan):
"""
Create a new execution plan.
Args:
plan: array of strings that represents the collection operations
the output from GRAPH.EXPLAIN
"""
if not isinstance(plan, list):
raise Exception("plan must be an array")
self.plan = plan
self.structured_plan = self._operation_tree()
def _compare_operations(self, root_a, root_b):
"""
Compare execution plan operation tree
Return: True if operation trees are equal, False otherwise
"""
# compare current root
if root_a != root_b:
return False
# make sure root have the same number of children
if root_a.child_count() != root_b.child_count():
return False
# recursively compare children
for i in range(root_a.child_count()):
if not self._compare_operations(root_a.children[i], root_b.children[i]):
return False
return True
def __str__(self) -> str:
def aggraget_str(str_children):
return "\n".join([" " + line for str_child in str_children for line in str_child.splitlines()])
def combine_str(x, y):
return f"{x}\n{y}"
return self._operation_traverse(self.structured_plan, str, aggraget_str, combine_str)
def __eq__(self, o: object) -> bool:
""" Compares two execution plans
Return: True if the two plans are equal False otherwise
"""
# make sure 'o' is an execution-plan
if not isinstance(o, ExecutionPlan):
return False
# get root for both plans
root_a = self.structured_plan
root_b = o.structured_plan
# compare execution trees
return self._compare_operations(root_a, root_b)
def _operation_traverse(self, op, op_f, aggregate_f, combine_f):
"""
Traverse operation tree recursively applying functions
Args:
op: operation to traverse
op_f: function applied for each operation
aggregate_f: aggregation function applied for all children of a single operation
combine_f: combine function applied for the operation result and the children result
"""
# apply op_f for each operation
op_res = op_f(op)
if len(op.children) == 0:
return op_res # no children return
else:
# apply _operation_traverse recursively
children = [self._operation_traverse(child, op_f, aggregate_f, combine_f) for child in op.children]
# combine the operation result with the children aggregated result
return combine_f(op_res, aggregate_f(children))
def _operation_tree(self):
""" Build the operation tree from the string representation """
# initial state
i = 0
level = 0
stack = []
current = None
# iterate plan operations
while i < len(self.plan):
current_op = self.plan[i]
op_level = current_op.count(" ")
if op_level == level:
# if the operation level equal to the current level
# set the current operation and move next
args = current_op.split("|")
current = Operation(args[0].strip(), None if len(args) == 1 else args[1].strip())
i += 1
elif op_level == level + 1:
# if the operation is child of the current operation
# add it as child and set as current operation
args = current_op.split("|")
child = Operation(args[0].strip(), None if len(args) == 1 else args[1].strip())
current.append_child(child)
stack.append(current)
current = child
level += 1
i += 1
elif op_level < level:
# if the operation is not child of current operation
# go back to it's parent operation
levels_back = level - op_level + 1
for _ in range(levels_back):
current = stack.pop()
level -= levels_back
else:
raise Exception("corrupted plan")
return stack[0]
| swilly22/redisgraph-py | redisgraph/execution_plan.py | Python | bsd-2-clause | 5,416 | 0.001477 |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the Blogger servers.
For documentation on the Blogger API, see:
http://code.google.com/apis/blogger/
"""
__author__ = '[email protected] (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.blogger.data
import atom.data
import atom.http_core
# List user's blogs, takes a user ID, or 'default'.
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
# Takes a blog ID.
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
# Takes a blog ID.
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
# Takes a blog ID and post ID.
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
# Takes a blog ID.
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
# Takes a blog ID.
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
class BloggerClient(gdata.client.GDClient):
api_version = '2'
auth_service = 'blogger'
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
def get_blogs(self, user_id='default', auth_token=None,
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetBlogs = get_blogs
def get_posts(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
**kwargs):
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPosts = get_posts
def get_pages(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
**kwargs):
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPages = get_pages
def get_post_comments(self, blog_id, post_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, desired_class=desired_class,
query=query, **kwargs)
GetPostComments = get_post_comments
def get_blog_comments(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetBlogComments = get_blog_comments
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
**kwargs)
GetBlogArchive = get_blog_archive
def add_post(self, blog_id, title, body, labels=None, draft=False,
auth_token=None, title_type='text', body_type='html', **kwargs):
# Construct an atom Entry for the blog post to be sent to the server.
new_entry = gdata.blogger.data.BlogPost(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if labels:
for label in labels:
new_entry.add_label(label)
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
AddPost = add_post
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPage(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
AddPage = add_page
def add_comment(self, blog_id, post_id, body, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.Comment(
content=atom.data.Content(text=body, type=body_type))
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, **kwargs)
AddComment = add_comment
def update(self, entry, auth_token=None, **kwargs):
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing an update.
old_etag = entry.etag
entry.etag = None
response = gdata.client.GDClient.update(self, entry,
auth_token=auth_token, **kwargs)
entry.etag = old_etag
return response
Update = update
def delete(self, entry_or_uri, auth_token=None, **kwargs):
if isinstance(entry_or_uri, (str, atom.http_core.Uri)):
return gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing a delete.
old_etag = entry_or_uri.etag
entry_or_uri.etag = None
response = gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# TODO: if GDClient.delete raises and exception, the entry's etag may be
# left as None. Should revisit this logic.
entry_or_uri.etag = old_etag
return response
Delete = delete
class Query(gdata.client.Query):
def __init__(self, order_by=None, **kwargs):
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
| webmedic/booker | src/gdata/blogger/client.py | Python | mit | 6,686 | 0.005085 |
from .spec import BASIC_PROPS_SET, encode_basic_properties
def encode_message(frame, headers, body, frame_size):
"""Encode message headers and body as a sequence of frames."""
for f in frame.encode():
yield f
props, headers = split_headers(headers, BASIC_PROPS_SET)
if headers:
props['headers'] = headers
yield encode_basic_properties(len(body), props)
for chunk in encode_body(body, frame_size):
yield chunk
def split_headers(user_headers, properties_set):
"""Split bitfield properties from named headers."""
props = {}
headers = {}
for key, value in user_headers.iteritems():
if key in properties_set:
props[key] = value
else:
headers[key] = value
return props, headers
def encode_body(body, frame_size):
"""Generate a sequence of chunks for body where each chunk is less than frame_size"""
limit = frame_size - 7 - 1 # spec is broken...
while body:
payload, body = body[:limit], body[limit:]
yield (0x03, payload)
| seatme/nucleon.amqp | nucleon/amqp/encoding.py | Python | lgpl-3.0 | 1,062 | 0.001883 |
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from models import UserOpenidAssociation
class OpenIDInline(admin.StackedInline):
model = UserOpenidAssociation
class UserAdminWithOpenIDs(UserAdmin):
inlines = [OpenIDInline]
# Add OpenIDs to the user admin, but only if User has been registered
try:
admin.site.unregister(User)
admin.site.register(User, UserAdminWithOpenIDs)
except NotRegistered:
pass
#from models import Nonce, Association
#admin.site.register(Nonce)
#admin.site.register(Association)
| indro/t2c | apps/external_apps/django_openid/admin.py | Python | mit | 658 | 0.009119 |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2019
import os
import sys
import time
import threading
if 'GEVENT_TEST' not in os.environ and 'CASSANDRA_TEST' not in os.environ and sys.version_info >= (3, 5, 3):
# Background RPC application
#
# Spawn the background RPC app that the tests will throw
# requests at.
import tests.apps.grpc_server
from .stan_server import StanServicer
stan_servicer = StanServicer()
rpc_server_thread = threading.Thread(target=stan_servicer.start_server)
rpc_server_thread.daemon = True
rpc_server_thread.name = "Background RPC app"
print("Starting background RPC app...")
rpc_server_thread.start()
time.sleep(1) | instana/python-sensor | tests/apps/grpc_server/__init__.py | Python | mit | 709 | 0.002821 |
from flow.commands.service import ServiceCommand
from flow.configuration.inject.broker import BrokerConfiguration
from flow.configuration.inject.redis_conf import RedisConfiguration
from flow.configuration.inject.service_locator import ServiceLocatorConfiguration
from flow.shell_command.fork.handler import ForkShellCommandMessageHandler
import logging
LOG = logging.getLogger(__name__)
class ForkShellCommand(ServiceCommand):
injector_modules = [
BrokerConfiguration,
RedisConfiguration,
ServiceLocatorConfiguration,
]
def _setup(self, *args, **kwargs):
self.handlers = [self.injector.get(ForkShellCommandMessageHandler)]
return ServiceCommand._setup(self, *args, **kwargs)
| genome/flow-core | flow/shell_command/fork/commands/service.py | Python | agpl-3.0 | 747 | 0.001339 |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
cfg.BoolOpt('enable_tunneling', default=False,
help=_("Enable tunneling support.")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use.")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge.")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge.")),
cfg.StrOpt('local_ip', default='',
help=_("Local IP address of GRE tunnel endpoints.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>. "
"Deprecated for ofagent.")),
cfg.BoolOpt('use_veth_interconnection', default=False,
help=_("Use veths instead of patch ports to interconnect the "
"integration bridge to physical bridges.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan).")),
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ML2 l2population mechanism driver to learn "
"remote MAC and IPs and improve tunnel scalability.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported. "
"Requires OVS 2.1 and ML2 l2population driver. "
"Allows the switch (when supporting an overlay) "
"to respond to an ARP request locally without "
"performing a costly ARP broadcast into the overlay.")),
cfg.BoolOpt('dont_fragment', default=True,
help=_("Set or un-set the don't fragment (DF) bit on "
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
cfg.BoolOpt('enable_distributed_routing', default=False,
help=_("Make the l2 agent run in DVR mode.")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
| uni2u/neutron | neutron/plugins/openvswitch/common/config.py | Python | apache-2.0 | 4,210 | 0 |
#! /usr/bin/env python3
# Written by Martin v. Löwis <[email protected]>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import os
import sys
import ast
import getopt
import struct
import array
from email.parser import HeaderParser
__version__ = "1.1"
MESSAGES = {}
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
lines = open(infile, 'rb').readlines()
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = None
fuzzy = 0
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceeded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(l)
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding)
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
open(outfile,"wb").write(output)
except IOError as msg:
print(msg, file=sys.stderr)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__, file=sys.stderr)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more information.", file=sys.stderr)
return
for filename in args:
make(filename, outfile)
if __name__ == '__main__':
main()
| LaoZhongGu/kbengine | kbe/src/lib/python/Tools/i18n/msgfmt.py | Python | lgpl-3.0 | 7,051 | 0.002979 |
from __future__ import absolute_import
import zope.interface.interface
from zope.interface.adapter import AdapterLookup as _AdapterLookup
from zope.interface.adapter import AdapterRegistry as _AdapterRegistry
from zope.interface.registry import Components, ComponentLookupError
__all__ = ('Registry',)
NO_CONTRACTS = 0
USE_CONTRACTS = 1
USE_CONTRACTS_WARN = 2
class AdapterLookup(_AdapterLookup):
def lookup(self, required, provided, name=u'', default=None):
factory = super(AdapterLookup, self).lookup(
required, provided, name=name, default=default)
if factory is None or self._registry.level == NO_CONTRACTS:
return factory
contract = getattr(provided, '__contract__', None)
if contract is not None:
return contract.bind_adapter(factory, self._registry.logger)
return factory
class AdapterRegistry(_AdapterRegistry):
level = NO_CONTRACTS
logger = None
LookupClass = AdapterLookup
def __init__(self, bases=(), logger=None):
self.logger = logger
super(AdapterRegistry, self).__init__(bases=bases)
def enable_contracts(self, level):
self.level = level
class Registry(Components):
""" Registry """
def __init__(self, name='', bases=(),
use_contracts=NO_CONTRACTS, flavor=None, logger=None):
self._use_contracts = use_contracts
self._flavor = flavor
self._logger = logger
super(Registry, self).__init__(name, bases)
def _init_registries(self):
self.adapters = AdapterRegistry(logger=self._logger)
self.utilities = AdapterRegistry(logger=self._logger)
@property
def flavor(self):
return self._flavor
def enable_contracts(self, warn_only=False):
if warn_only:
self._use_contracts = USE_CONTRACTS_WARN
self.adapters.enable_contracts(USE_CONTRACTS_WARN)
else:
self._use_contracts = USE_CONTRACTS
self.adapters.enable_contracts(USE_CONTRACTS)
def _adapter_hook(self, interface, object, name='', default=None):
return self.queryAdapter(object, interface, name, default)
def install(self, use_contracts=False):
zope.interface.interface.adapter_hooks.append(self._adapter_hook)
if use_contracts:
self.enable_contracts()
def uninstall(self):
if self._adapter_hook in zope.interface.interface.adapter_hooks:
zope.interface.interface.adapter_hooks.remove(self._adapter_hook)
def queryAdapter(self, object, interface, name=u'', default=None):
if isinstance(object, (tuple, list)):
adapter = self.adapters.queryMultiAdapter(
object, interface, name, default)
else:
adapter = self.adapters.queryAdapter(
object, interface, name, default)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract and adapter is not None:
return contract(adapter, logger=self._logger)
return adapter
def getAdapter(self, object, interface, name=u''):
adapter = self.adapters.queryAdapter(object, interface, name)
if adapter is None:
raise ComponentLookupError(object, interface, name)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract:
return contract(adapter, logger=self._logger)
return adapter
def __enter__(self):
self.install()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.uninstall()
return False
| fafhrd91/mdl | mdl/registry.py | Python | apache-2.0 | 3,760 | 0 |
import json
import os
class TestFixtureMixin(object):
def get_json_data(self, filename):
import environ
full_path = (environ.Path(__file__) - 1).root
fixture_path = None
max_levels = 4
current_level = 1
while fixture_path is None:
new_path = '{}{}{}'.format(full_path, os.sep, 'fixtures')
if os.path.exists(new_path):
fixture_path = new_path
else:
full_path = os.path.split(full_path)[0]
if current_level == max_levels:
break
current_level += 1
if fixture_path is None:
started_at = (environ.Path(__file__) - 1).root
raise ValueError('Could not find fixtures folder in {}'.format(started_at))
json_filename = '{}{}{}'.format(fixture_path, os.sep, filename)
with open(json_filename, 'r', encoding='utf-8') as jfile:
json_data = json.load(jfile)
return json_data
| luiscberrocal/django-test-tools | tests/mixins.py | Python | mit | 993 | 0.001007 |
from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSMetaData (TestCase):
def testConstants(self):
self.assertIsInstance(NSMetadataQueryDidStartGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryGatheringProgressNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidFinishGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidUpdateNotification, unicode)
self.assertIsInstance(NSMetadataQueryResultContentRelevanceAttribute, unicode)
self.assertIsInstance(NSMetadataQueryUserHomeScope, unicode)
self.assertIsInstance(NSMetadataQueryLocalComputerScope, unicode)
self.assertIsInstance(NSMetadataQueryNetworkScope, unicode)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertIsInstance(NSMetadataQueryLocalDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDataScope, unicode)
self.assertIsInstance(NSMetadataItemFSNameKey, unicode)
self.assertIsInstance(NSMetadataItemDisplayNameKey, unicode)
self.assertIsInstance(NSMetadataItemURLKey, unicode)
self.assertIsInstance(NSMetadataItemPathKey, unicode)
self.assertIsInstance(NSMetadataItemFSSizeKey, unicode)
self.assertIsInstance(NSMetadataItemFSCreationDateKey, unicode)
self.assertIsInstance(NSMetadataItemFSContentChangeDateKey, unicode)
self.assertIsInstance(NSMetadataItemIsUbiquitousKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemHasUnresolvedConflictsKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentUploadedKey, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSMetadataQuery.startQuery)
self.assertResultIsBOOL(NSMetadataQuery.isStarted)
self.assertResultIsBOOL(NSMetadataQuery.isGathering)
self.assertResultIsBOOL(NSMetadataQuery.isStopped)
if __name__ == "__main__":
main()
| albertz/music-player | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmetadata.py | Python | bsd-2-clause | 2,524 | 0.003566 |
"""Support for RESTful binary sensors."""
import logging
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .sensor import RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Binary Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(["POST", "GET"]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the REST binary sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
timeout = config.get(CONF_TIMEOUT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
rest.update()
if rest.data is None:
raise PlatformNotReady
# No need to update the sensor now because it will determine its state
# based in the rest resource that has just been retrieved.
add_entities([RestBinarySensor(hass, rest, name, device_class, value_template)])
class RestBinarySensor(BinarySensorDevice):
"""Representation of a REST binary sensor."""
def __init__(self, hass, rest, name, device_class, value_template):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._device_class = device_class
self._state = False
self._previous_data = None
self._value_template = value_template
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return the availability of this sensor."""
return self.rest.data is not None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
response = self.rest.data
if self._value_template is not None:
response = self._value_template.async_render_with_possible_json_value(
self.rest.data, False
)
try:
return bool(int(response))
except ValueError:
return {"true": True, "on": True, "open": True, "yes": True}.get(
response.lower(), False
)
def update(self):
"""Get the latest data from REST API and updates the state."""
self.rest.update()
| fbradyirl/home-assistant | homeassistant/components/rest/binary_sensor.py | Python | apache-2.0 | 4,542 | 0.000881 |
import json
import logging
import asyncio
import random
import socket
from hbmqtt.client import MQTTClient, ClientException
from hbmqtt.mqtt.constants import QOS_1
logging.basicConfig(format='%(asctime)s - %(name)14s - '
'%(levelname)5s - %(message)s')
logger = logging.getLogger("mqtt_test_node")
MQTT_URL = 'mqtt://localhost:1886/'
NODE_ID = 'mqtt_test_node'
LED_VALUE = '0'
DELAY_CHECK = 30 # seconds
def pressure_value():
return '{}°hPa'.format(random.randrange(990, 1015, 1))
NODE_RESOURCES = {'name': {'delay': 0,
'value': lambda x=None: "MQTT test node"},
'os': {'delay': 0,
'value': lambda x=None: "riot"},
'ip': {'delay': 0,
'value': (lambda x=None:
socket.gethostbyname(
socket.gethostname()))},
'board': {'delay': 0, 'value': lambda x=None: "HP"},
'led': {'delay': 0,
'value': lambda x=None: LED_VALUE},
'temperature': {'delay': 5,
'value': (lambda x=None:
'{}°C'
.format(random.randrange(
20, 30, 1)))},
'pressure': {'delay': 10,
'value': (lambda x=None:
'{}hPa'
.format(random.randrange(
990, 1015, 1)))}
}
async def send_check(mqtt_client):
while True:
check_data = json.dumps({'id': NODE_ID})
asyncio.get_event_loop().create_task(publish(
mqtt_client, 'node/check', check_data))
await asyncio.sleep(DELAY_CHECK)
def send_values(mqtt_client):
for resource in NODE_RESOURCES:
topic = 'node/{}/{}'.format(NODE_ID, resource)
delay = NODE_RESOURCES[resource]['delay']
value = NODE_RESOURCES[resource]['value']
asyncio.get_event_loop().create_task(
publish_continuous(mqtt_client, topic, value, delay))
async def start_client():
"""Connect to MQTT broker and subscribe to node check resource."""
global __LED_VALUE__
mqtt_client = MQTTClient()
await mqtt_client.connect(MQTT_URL)
# Subscribe to 'gateway/check' with QOS=1
await mqtt_client.subscribe([('gateway/{}/discover'
.format(NODE_ID), QOS_1)])
await mqtt_client.subscribe([('gateway/{}/led/set'
.format(NODE_ID), QOS_1)])
asyncio.get_event_loop().create_task(send_check(mqtt_client))
asyncio.get_event_loop().create_task(send_values(mqtt_client))
while True:
try:
logger.debug("Waiting for incoming MQTT messages from gateway")
# Blocked here until a message is received
message = await mqtt_client.deliver_message()
except ClientException as ce:
logger.error("Client exception: {}".format(ce))
break
except Exception as exc:
logger.error("General exception: {}".format(exc))
break
packet = message.publish_packet
topic_name = packet.variable_header.topic_name
data = packet.payload.data.decode()
logger.debug("Received message from gateway: {} => {}"
.format(topic_name, data))
if topic_name.endswith("/discover"):
if data == "resources":
topic = 'node/{}/resources'.format(NODE_ID)
value = json.dumps(list(NODE_RESOURCES.keys())).encode()
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, value))
else:
for resource in NODE_RESOURCES:
topic = 'node/{}/{}'.format(NODE_ID, resource)
value = NODE_RESOURCES[resource]['value']
msg = json.dumps({'value': value()})
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, msg))
elif topic_name.endswith("/led/set"):
LED_VALUE = data
topic = 'node/{}/led'.format(NODE_ID)
data = json.dumps({'value': data}, ensure_ascii=False)
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, data.encode()))
else:
logger.debug("Topic not supported: {}".format(topic_name))
async def publish(mqtt_client, topic, value):
if hasattr(value, 'encode'):
value = value.encode()
await mqtt_client.publish(topic, value, qos=QOS_1)
logger.debug("Published '{}' to topic '{}'".format(value.decode(), topic))
async def publish_continuous(mqtt_client, topic, value, delay=0):
while True:
data = json.dumps({'value': value()}, ensure_ascii=False)
await mqtt_client.publish(topic, data.encode('utf-8'), qos=QOS_1)
logger.debug("Published '{}' to topic '{}'".format(data, topic))
if delay == 0:
break
await asyncio.sleep(delay)
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
try:
asyncio.get_event_loop().run_until_complete(start_client())
except KeyboardInterrupt:
logger.info("Exiting")
asyncio.get_event_loop().stop()
| pyaiot/pyaiot | utils/mqtt/mqtt-test-node.py | Python | bsd-3-clause | 5,525 | 0 |
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten ([email protected])
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import time
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
class CombinedBatchSystem(AbstractBatchSystem):
"""Takes two batch systems and a choice function to decide which to issue to.
"""
def __init__(self, config, batchSystem1, batchSystem2, batchSystemChoiceFn):
AbstractBatchSystem.__init__(self, config, 0, 0) #Call the parent constructor
self.batchSystem1 = batchSystem1
self.batchSystem2 = batchSystem2
self.batchSystemChoiceFn = batchSystemChoiceFn
def _jobIDForBatchSystem1(self, id):
return (1, id)
def _isJobIDForBatchSystem1(self, id):
return id[0] == 1
def _jobIDForBatchSystem2(self, id):
return (2, id)
def _isJobIDForBatchSystem2(self, id):
return id[0] == 2
def _strip(self, id):
return id[1]
def issueBatchJob(self, command, memory, cpu):
if self.batchSystemChoiceFn(command, memory, cpu):
return self._jobIDForBatchSystem1(self.batchSystem1.issueBatchJob(command, memory, cpu))
else:
return self._jobIDForBatchSystem2(self.batchSystem2.issueBatchJob(command, memory, cpu))
def killBatchJobs(self, jobIDs):
l, l2 = [], []
for jobID in jobIDs:
if self._isJobIDForBatchSystem1(jobID):
l.append(self._strip(jobID))
else:
assert self._isJobIDForBatchSystem2(jobID)
l2.append(self._strip(jobID))
self.batchSystem1.killBatchJobs(l)
self.batchSystem2.killBatchJobs(l2)
def getIssuedBatchJobIDs(self):
return [ self._jobIDForBatchSystem1(id) for id in self.batchSystem1.getIssuedBatchJobIDs() ] + [ self._jobIDForBatchSystem2(id) for id in self.batchSystem2.getIssuedBatchJobIDs() ]
def getRunningBatchJobIDs(self):
return [ self._jobIDForBatchSystem1(id) for id in self.batchSystem1.getRunningBatchJobIDs() ] + [ self._jobIDForBatchSystem2(id) for id in self.batchSystem2.getRunningBatchJobIDs() ]
def getUpdatedBatchJob(self, maxWait):
endTime = time.time() + maxWait
while 1:
updatedJob = self.batchSystem2.getUpdatedBatchJob(0) #Small positive values of wait seem to
if updatedJob != None:
return (self._jobIDForBatchSystem2(updatedJob[0]), updatedJob[1])
updatedJob = self.batchSystem1.getUpdatedBatchJob(0)
if updatedJob != None:
return (self._jobIDForBatchSystem1(updatedJob[0]), updatedJob[1])
remaining = endTime - time.time()
if remaining <= 0:
return None
time.sleep(0.01)
# FIXME: This should be a static method
def getRescueBatchJobFrequency(self):
return min(self.batchSystem1.getRescueBatchJobFrequency(), self.batchSystem2.getRescueBatchJobFrequency())
| BD2KGenomics/toil-old | src/toil/batchSystems/combinedBatchSystem.py | Python | mit | 4,039 | 0.013122 |
from botapi.settings import *
DEBUG = True
ALLOWED_HOSTS = ['*']
| naelstrof/PugBot-Discord-Django | botapi/apache/override.py | Python | mit | 66 | 0 |
import tweepy
from pymongo import MongoClient
class RaffleSlave:
"The class responsbile for a single raffle, a new instance for each individual raffle"
Params = None
api = None
alive = True
def __init__(self, hashtag, max, id, owner ):
self.Params = {}
self.Params['max'] = max
self.Params['hashtag'] = hashtag
self.Params[ '_id' ] = id
self.Params[ 'owner' ] = owner
auth = tweepy.OAuthHandler( '5Xr8HX71XetZYmGV86AmcEgVo', '85ql1GsrOLTRre0AqqprX9Xtm5SkMOWzJk9OVJPRiLM8bm72JA' )
auth.set_access_token( '832250876551110658-MLGfJUjJH6Ktwlf51AQQlSO9QPcp3ew', 'UvCcyNqwH3X7u2KfRWeYvlOWxN2k1ONfjrlpxRK1Shj33' )
self.api = tweepy.API( auth )
def update(self):
public_tweets = self.api.search( '@'+self.Params['owner']+' #'+self.Params['hashtag'] )
client = MongoClient()
db = client.raftl
tweetcollection = db.tweets
followers = self.api.followers_ids(self.Params['owner'])
for tweet in public_tweets:
tweetcollection.update_one( {'_id':tweet.id}, {'$set': {'_id':tweet.id, 'user_id':tweet.author.id, 'following':tweet.author.id in followers,'raffle_id':self.Params['_id'], 'body':tweet.text, 'username':tweet.author.screen_name, 'profile_img':tweet.author.profile_image_url_https } }, True )
#tweetcollection.update_one( {'_id':tweet.id}, {'$set': {'_id':tweet.id, 'user_id':tweet.author.id, 'following':True,'raffle_id':self.Params['_id'], 'body':tweet.text },'$unset':{'drawn':"" } }, True )
def getParams(self):
return self.Params
def checkAlive(self):
return self.alive
| asmi92/odu-acm | raffleslave.py | Python | mit | 1,668 | 0.020983 |
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Set a user's password
If someone other than a user changes that user's password (e.g., Helpdesk
resets it) then the password will need to be changed the first time it
is used. This is so the end-user is the only one who knows the password.
The IPA password policy controls how often a password may be changed,
what strength requirements exist, and the length of the password history.
EXAMPLES:
To reset your own password:
ipa passwd
To change another user's password:
ipa passwd tuser1
""")
register = Registry()
@register()
class passwd(Command):
__doc__ = _("Set a user's password.")
takes_args = (
parameters.Str(
'principal',
cli_name='user',
label=_(u'User name'),
default_from=DefaultFrom(lambda : None),
# FIXME:
# lambda: krb_utils.get_principal()
autofill=True,
no_convert=True,
),
parameters.Password(
'password',
label=_(u'New Password'),
confirm=True,
),
parameters.Password(
'current_password',
label=_(u'Current Password'),
default_from=DefaultFrom(lambda principal: None, 'principal'),
# FIXME:
# lambda principal: get_current_password(principal)
autofill=True,
),
)
takes_options = (
parameters.Password(
'otp',
required=False,
label=_(u'OTP'),
doc=_(u'One Time Password'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
bool,
doc=_(u'True means the operation was successful'),
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| redhatrises/freeipa | ipaclient/remote_plugins/2_164/passwd.py | Python | gpl-3.0 | 2,428 | 0.000824 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import rest.models
from .base import from_django_model
WsAuthGroup = from_django_model(rest.models.WsAuthGroup)
| lavalamp-/ws-backend-community | lib/sqlalchemy/models/auth.py | Python | gpl-3.0 | 178 | 0 |
""" Parser for the negative goals.
Allows to parse sets of the form, e.g., {i-j-k : i!=j & j!=k & i!=k} U
{i,j : i<3 & j<=3}, meaning that i should not know whether j knows the secret
of k, for i, j, k distinct, and that i should not know the secret of j for
either i = 1, 2 and j = 1, 2, 3.
Also allows instantiated negative goals of the form {1-2-3, 1-3}, meaning that 1
should not know whether 2 knows the secret of 3 and the secret of 3 (equivalent
to {i-j-k : i=1 & j=2 & k=3} U {i-j : i=1 & j=3}).
"""
from pypeg2 import *
import re
""" Description of the grammar of negative goals.
Comp ::= = | != | <= | >= | < | >
Int ::= <integer>
AgtName ::= <lower-case letter> | AgtName<lower-case letter> | AgtName<digit>
Cst ::= AgtName Comp AgtName | AgtName Comp Int
Csts ::= Cst | Csts & Csts
Agts ::= AgtName | Agts-Agts
AgtsInst ::= Int | AgtsInst-AgtsInst
AgtsInsts ::= AgtsInst | AgtsInsts, AgtsInsts
Set ::= {Agts : Csts} | {AgtsInsts}
Sets ::= Set | Sets U Sets
"""
""" Comparison operator.
"""
class Comp(str):
grammar = re.compile(r'(=|!=|<=|>=|<|>)')
""" Integer.
"""
class Int(int):
grammar = attr('nb', re.compile(r'[1-9]\d*'))
""" Name of an agent: a lower case letter possibly followed by lower case
letters and numbers.
"""
class AgtName(str):
grammar = attr('name', re.compile(r'[a-z]([a-z]|[0-9])*'))
""" Simple constraint: a comparison between two agents or an agent name and an
integer.
"""
class Cst(List):
grammar = AgtName, Comp, [AgtName, Int]
def __repr__(self):
return self[0].name + ' ' + self[1] + ' ' + \
(self[2].name if type(self[2]) == AgtName else self[2].nb)
""" Conjunction of constraints, separared by '&'.
"""
class Csts(List):
grammar = csl(Cst, separator='&')
def __repr__(self):
return ' & '.join(str(cst) for cst in self)
""" Sequence of agents, separated by '-'.
"""
class Agts(List):
grammar = csl(AgtName, separator='-')
def __repr__(self):
return '-'.join(i.name for i in self)
""" Sequence of 'instantiated' agents (that is, integers), separated by '-'.
"""
class AgtsInst(List):
grammar = csl(Int, separator='-')
def __repr__(self):
return '-'.join(i.nb for i in self)
""" Several sequences of instantiated agents, separated by ','.
"""
class AgtsInsts(List):
grammar = csl(AgtsInst, separator=',')
def __repr__(self):
return ', '.join(str(ai) for ai in self)
""" Set: either agents followed by constraints (specified by ':'), or sequences
of instantiated agents, separated by ','.
"""
class Set(List):
grammar = '{', [(Agts, ':', Csts), AgtsInsts], '}'
def __repr__(self):
return '{' + str(self[0]) + \
(' : ' + str(self[1]) if type(self[0]) == Agts else '') + '}'
""" Union of sets, separated by 'U'.
"""
class Sets(List):
grammar = csl(Set, separator='U')
def __repr__(self):
return ' U '.join(str(s) for s in self)
""" Parses a string as Sets.
"""
def parseSet(s):
try:
res = parse(s, Sets)
except SyntaxError:
print('Error: syntax error in negative goals.')
sys.exit(1)
return res
# test1 = '{i1-j-k : i1>=1 & j<2} U {i-j: i!=j} U {i}'
# test2 = '{1-10-6}'
# test3 = test1 + ' U ' + test2
# ast = parse(test3, Sets)
# print(ast)
| FaustineMaffre/GossipProblem-PDDL-generator | neggoalsparser/parser.py | Python | mit | 3,303 | 0.003633 |
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import Contact
class ContactViewTest(TestCase):
def setUp(self):
self.url = reverse('contact')
self.response = self.client.get(self.url)
def test_contact_view_is_accessible(self):
self.assertEqual(self.response.status_code, 200)
def test_contact_view_should_have_form_tag(self):
expected = '<form action="." method="post">'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_firstname_input(self):
expected = '<label>Firstname: '
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_firstname" maxlength="100" name="firstname" '
expected += 'type="text" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_lastname_and_input(self):
expected = '<label>Last Name:</label>'
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_lastname" maxlength="100" name="lastname" '
expected += 'type="text" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_email_and_input(self):
expected = '<label>Email:</label>'
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_email" maxlength="100" name="email" '
expected += 'type="email" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_submit_button(self):
expected = '<input type="submit" value="Submit">'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_accessible_by_post(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, 200)
@patch('contacts.views.GeoIP')
def test_submit_contact_data_successfully(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': '[email protected]'
}
self.client.post(self.url, data=data)
contact = Contact.objects.get(firstname='John')
self.assertEqual(contact.firstname, 'John')
self.assertEqual(contact.lastname, 'Smith')
self.assertEqual(contact.email, '[email protected]')
self.assertEqual(contact.ip, '58.137.162.34')
self.assertEqual(contact.lat, '13.754')
self.assertEqual(contact.lng, '100.5014')
def test_submit_contact_data_without_firstname_should_not_save_data(self):
data = {
'firstname': '',
'lastname': 'Smith',
'email': '[email protected]'
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.filter(lastname='Smith').count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_lastname_should_not_save_data(self):
data = {
'firstname': 'John',
'lastname': '',
'email': '[email protected]'
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.all().count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_email_should_not_save_data(self):
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': ''
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.filter(lastname='Smith').count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_firstname_should_get_error_message(
self):
data = {
'firstname': '',
'lastname': 'Smith',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
def test_submit_contact_data_without_email_should_get_error_message(
self
):
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': ''
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
def test_submit_contact_data_without_lastname_should_get_error_message(
self
):
data = {
'firstname': 'John',
'lastname': '',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_redirect_to_thank_you_page_successfully(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': '[email protected]'
}
response = self.client.post(
self.url,
data=data
)
self.assertRedirects(
response,
'/thankyou/?firstname=John',
status_code=302,
target_status_code=200
)
@patch('contacts.views.GeoIP')
def test_redirected_page_should_contain_firstname(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': '[email protected]'
}
response = self.client.post(
self.url,
data=data,
follow=True
)
expected = 'Firstname: John'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_lastname(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'lnwBoss',
'lastname': 'yong',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data, follow=True)
expected = 'Lastname: yong'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_call_geoip_api_successfully(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': '[email protected]'
}
response = self.client.post(
self.url,
data=data
)
mock.return_value_getGeoIP.assert_once_with()
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_ip(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'lnwBoss',
'lastname': 'yong',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data, follow=True)
expected = 'IP: 58.137.162.34'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_lat(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'lnwBoss',
'lastname': 'yong',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data, follow=True)
expected = 'Lat: 13.754'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_lng(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'lnwBoss',
'lastname': 'yong',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data, follow=True)
expected = 'Lng: 100.5014'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_email(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'lnwBoss',
'lastname': 'yong',
'email': '[email protected]'
}
response = self.client.post(self.url, data=data, follow=True)
expected = 'Email: [email protected]'
self.assertContains(response, expected, status_code=200)
class ThankYouViewTest(TestCase):
def setUp(self):
self.url = reverse('thankyou')
self.response = self.client.get(self.url)
def test_thank_you_view_is_accessible(self):
self.assertEqual(self.response.status_code, 200)
def test_thank_you_page_should_contain_title_thank_you(self):
expected = '<h1>Thank you</h1>'
self.assertContains(self.response, expected, status_code=200)
| prontointern/django-contact-form | django_contact_form_project/contacts/tests/test_views.py | Python | mit | 14,146 | 0.000141 |
import turtle
import random
# Let's create our turtle and call him Simon!
simon = turtle.Turtle()
# We'll set the background to black
turtle.bgcolor("black")
# This is our list of colours
colors = ["red", "green", "blue"]
# We need to ask the user their name
name = turtle.textinput("Name", "What is your name?")
simon.penup()
for number in range(30): # We'll draw the name 30 times
simon.forward(number * 10)
simon.write(name, font=("Arial", 12, "bold")) # This writes the name and chooses a font
simon.right(92)
simon.pencolor(random.choice(colors)) # This chooses a random colour
| SimonDevon/simple-python-shapes | name-draw1.py | Python | mit | 605 | 0.006612 |
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Miscellaneous validation of user-entered data
#
# Copyright (C) 2005 Junta de Andalucía
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd.
# Copyright (C) 2015 Manjaro (http://manjaro.org)
#
# Validation library.
# Created by Antonio Olmo <aolmo#emergya._info> on 26 jul 2005.
from gi.repository import Gtk
def check_grub_device(device):
"""Check that the user entered a valid boot device.
@return True if the device is valid, False if it is not."""
import re
import os
regex = re.compile(r'^/dev/([a-zA-Z0-9]+|mapper/[a-zA-Z0-9_]+)$')
if regex.search(device):
if not os.path.exists(device):
return False
return True
# (device[,part-num])
regex = re.compile(r'^\((hd|fd)[0-9]+(,[0-9]+)*\)$')
if regex.search(device):
return True
else:
return False
NAME_LENGTH = 1
NAME_BADCHAR = 2
NAME_BADHYPHEN = 3
NAME_BADDOTS = 4
def check(element, value):
if element == 'username':
return check_username(value)
if element == 'hostname':
return check_hostname(value)
def check_username(name):
""" Check the correctness of a proposed user name.
@return empty list (valid) or list of:
- C{NAME_LENGTH} wrong length.
- C{NAME_BADCHAR} contains invalid characters.
- C{NAME_BADHYPHEN} starts or ends with a hyphen.
- C{NAME_BADDOTS} contains consecutive/initial/final dots."""
import re
result = set()
if len(name) < 1 or len(name) > 40:
result.add(NAME_LENGTH)
regex = re.compile(r'^[a-z0-9.\-]+$')
if not regex.search(name):
result.add(NAME_BADCHAR)
if name.startswith('-') or name.endswith('-'):
result.add(NAME_BADHYPHEN)
if '.' in name:
result.add(NAME_BADDOTS)
return sorted(result)
def check_hostname(name):
""" Check the correctness of a proposed host name.
@return empty list (valid) or list of:
- C{NAME_LENGTH} wrong length.
- C{NAME_BADCHAR} contains invalid characters.
- C{NAME_BADHYPHEN} starts or ends with a hyphen.
- C{NAME_BADDOTS} contains consecutive/initial/final dots."""
import re
result = set()
if len(name) < 1 or len(name) > 63:
result.add(NAME_LENGTH)
regex = re.compile(r'^[a-zA-Z0-9.-]+$')
if not regex.search(name):
result.add(NAME_BADCHAR)
if name.startswith('-') or name.endswith('-'):
result.add(NAME_BADHYPHEN)
if '..' in name or name.startswith('.') or name.endswith('.'):
result.add(NAME_BADDOTS)
return sorted(result)
# Based on setPasswordStrength() in Mozilla Seamonkey, which is tri-licensed
# under MPL 1.1, GPL 2.0, and LGPL 2.1.
def password_strength(password):
upper = lower = digit = symbol = 0
for char in password:
if char.isdigit():
digit += 1
elif char.islower():
lower += 1
elif char.isupper():
upper += 1
else:
symbol += 1
length = len(password)
if length > 5:
length = 5
if digit > 3:
digit = 3
if upper > 3:
upper = 3
if symbol > 3:
symbol = 3
strength = (
((length * 0.1) - 0.2) +
(digit * 0.1) +
(symbol * 0.15) +
(upper * 0.1))
if strength > 1:
strength = 1
if strength < 0:
strength = 0
return strength
def human_password_strength(password):
strength = password_strength(password)
length = len(password)
if length == 0:
hint = ''
color = ''
elif length < 6:
hint = _('Password is too short')
color = 'darkred'
elif strength < 0.5:
hint = _('Weak password')
color = 'darkred'
elif strength < 0.75:
hint = _('Fair password')
color = 'darkorange'
elif strength < 0.9:
hint = _('Good password')
color = 'darkgreen'
else:
hint = _('Strong password')
color = 'darkgreen'
return hint, color
def check_password(password, verified_password, password_ok,
password_false, password_error_label,
password_strength, icon_ok, icon_warning,
allow_empty=False):
complete = True
passw = password.get_text()
vpassw = verified_password.get_text()
if passw != vpassw:
complete = False
password_ok.hide()
if passw and (len(vpassw) / float(len(passw)) > 0.8):
txt = _("Passwords do not match")
txt = '<small><span foreground="darkred"><b>{0}</b></span></small>'.format(txt)
password_error_label.set_markup(txt)
password_error_label.show()
else:
password_error_label.hide()
if allow_empty:
password_strength.hide()
elif not passw:
password_strength.hide()
complete = False
else:
(txt, color) = human_password_strength(passw)
txt = '<small><span foreground="{0}"><b>{1}</b></span></small>'.format(color, txt)
password_strength.set_markup(txt)
password_strength.show()
if passw == vpassw:
password_ok.set_from_icon_name(icon_ok, Gtk.IconSize.LARGE_TOOLBAR)
password_ok.show()
password_false.hide()
else:
password_false.set_from_icon_name(icon_warning, Gtk.IconSize.LARGE_TOOLBAR)
password_false.show()
password_ok.hide()
return complete
| manjaro/thus | thus/misc/validation.py | Python | gpl-3.0 | 5,581 | 0.000896 |
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves a saved report, or a report for the specified ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: reports.generate
"""
__author__ = '[email protected] (Dean Lukies)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'--ad_client_id',
help='The ID of the ad client for which to generate a report')
argparser.add_argument(
'--report_id',
help='The ID of the saved report to generate')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adexchangeseller', 'v2.0', __doc__, __file__, parents=[argparser],
scope='https://www.googleapis.com/auth/adexchange.seller.readonly')
# Process flags and read their values.
ad_client_id = flags.ad_client_id
saved_report_id = flags.report_id
try:
# Retrieve report.
if saved_report_id:
result = service.accounts().reports().saved().generate(
savedReportId=saved_report_id, accountId='myaccount').execute()
elif ad_client_id:
result = service.accounts().reports().generate(
accountId='myaccount',
startDate='2014-07-01', endDate='2014-08-01',
filter=['AD_CLIENT_ID==' + ad_client_id],
metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE',
'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK',
'AD_REQUESTS_RPM', 'EARNINGS'],
dimension=['DATE'],
sort=['+DATE']).execute()
else:
argparser.print_help()
sys.exit(1)
# Display headers.
for header in result['headers']:
print '%25s' % header['name'],
print
# Display results.
for row in result['rows']:
for column in row:
print '%25s' % column,
print
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| googleads/googleads-adxseller-examples | python/v2.0/generate_report.py | Python | apache-2.0 | 2,740 | 0.006204 |
"""overflow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from rest_framework import routers
from rest_framework_nested import routers
from stackoverflow import views
router = routers.SimpleRouter()
router.register(r'questions', views.QuestionViewSet)
router.register(r'users', views.UserViewSet)
questions_router = routers.NestedSimpleRouter(router, r'questions', lookup='question')
questions_router.register(r'answers', views.AnswerViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api/', include(questions_router.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
| TIY-Durham/TIY-Overflow | api/overflow/urls.py | Python | gpl-3.0 | 1,416 | 0.000706 |
from thinglang.compiler.opcodes import OpcodeJump
from thinglang.parser.blocks.conditional import Conditional
from thinglang.parser.blocks.loop import Loop
from thinglang.parser.nodes import BaseNode
class BreakStatement(BaseNode):
"""
Jumps to the end of the currently executing loop or conditional
"""
EMITTABLE = True
MUST_CLOSE = False
def __init__(self, raw, source_ref):
super().__init__([])
self.source_ref = source_ref
def compile(self, context): # TODO: assert no children
container = self.ascend(Loop)
if not container:
raise Exception('Cannot break outside of loop') # TODO: should be StructureError
context.append(OpcodeJump(context.jump_out[container]), self.source_ref)
| ytanay/thinglang | thinglang/parser/statements/break_statement.py | Python | mit | 770 | 0.003896 |
from .DiscreteFactor import State, DiscreteFactor
from .CPD import TabularCPD
from .JointProbabilityDistribution import JointProbabilityDistribution
__all__ = ['TabularCPD',
'DiscreteFactor',
'State'
]
| khalibartan/pgmpy | pgmpy/factors/discrete/__init__.py | Python | mit | 236 | 0 |
from django.apps import AppConfig
class NexusFrontEndConfig(AppConfig):
name = 'nexus_front_end'
| utarsuno/urbtek | nexus_django/nexus_front_end/apps.py | Python | apache-2.0 | 103 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale Exceptions Ingore Approve Directly",
'version': '8.0.1.0.0',
'category': 'Product',
'sequence': 14,
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'summary': 'Allow to define purchase prices on different currencies using\
replenishment cost field',
"description": """
Sale Exceptions Ingore Approve Directly
=======================================
When Ignoring a sale Exception, approve directly the sale order
""",
"depends": [
"sale_exceptions",
],
'external_dependencies': {
},
"data": [
'wizard/sale_exception_confirm_view.xml',
'views/sale_view.xml',
],
'demo': [
],
'test': [
],
"installable": True,
'auto_install': False,
'application': False,
}
| sysadminmatmoz/ingadhoc | sale_exceptions_ignore_approve/__openerp__.py | Python | agpl-3.0 | 1,775 | 0 |
# Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import date, timedelta
from decimal import Decimal
from six.moves import zip
from django.test import TestCase
from silver.models import DocumentEntry, Proforma, Invoice
from silver.tests.factories import (ProformaFactory, InvoiceFactory,
DocumentEntryFactory, CustomerFactory)
class TestInvoice(TestCase):
def test_pay_invoice_related_proforma_state_change_to_paid(self):
proforma = ProformaFactory.create()
proforma.issue()
proforma.create_invoice()
assert proforma.related_document.state == Invoice.STATES.ISSUED
proforma.related_document.pay()
assert proforma.related_document.state == Invoice.STATES.PAID
assert proforma.state == Proforma.STATES.PAID
def test_clone_invoice_into_draft(self):
invoice = InvoiceFactory.create()
invoice.issue()
invoice.pay()
entries = DocumentEntryFactory.create_batch(3)
invoice.invoice_entries.add(*entries)
clone = invoice.clone_into_draft()
assert clone.state == Invoice.STATES.DRAFT
assert clone.paid_date is None
assert clone.issue_date is None
assert clone.related_document is None
assert (clone.series != invoice.series or
clone.number != invoice.number)
assert clone.sales_tax_percent == invoice.sales_tax_percent
assert clone.sales_tax_name == invoice.sales_tax_name
assert not clone.archived_customer
assert not clone.archived_provider
assert clone.customer == invoice.customer
assert clone.provider == invoice.provider
assert clone.currency == invoice.currency
assert clone._last_state == clone.state
assert clone.pk != invoice.pk
assert clone.id != invoice.id
assert not clone.pdf
assert clone.invoice_entries.count() == 3
assert invoice.invoice_entries.count() == 3
entry_fields = [entry.name for entry in DocumentEntry._meta.get_fields()]
for clone_entry, original_entry in zip(clone.invoice_entries.all(),
invoice.invoice_entries.all()):
for entry in entry_fields:
if entry not in ('id', 'proforma', 'invoice'):
assert getattr(clone_entry, entry) == \
getattr(original_entry, entry)
assert invoice.state == Invoice.STATES.PAID
def test_cancel_issued_invoice_with_related_proforma(self):
proforma = ProformaFactory.create()
proforma.issue()
if not proforma.related_document:
proforma.create_invoice()
proforma.related_document.cancel()
assert proforma.related_document.state == proforma.state == Invoice.STATES.CANCELED
def _get_decimal_places(self, number):
return max(0, -number.as_tuple().exponent)
def test_invoice_total_decimal_points(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
assert self._get_decimal_places(invoice.total) == 2
def test_invoice_total_before_tax_decimal_places(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
assert self._get_decimal_places(invoice.total_before_tax) == 2
def test_invoice_tax_value_decimal_places(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
assert self._get_decimal_places(invoice.tax_value) == 2
def test_invoice_total_with_tax_integrity(self):
invoice_entries = DocumentEntryFactory.create_batch(5)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
self.assertEqual(invoice.total, invoice.total_before_tax + invoice.tax_value)
def test_draft_invoice_series_number(self):
invoice = InvoiceFactory.create()
invoice.number = None
assert invoice.series_number == '%s-draft-id:%d' % (invoice.series,
invoice.pk)
invoice.series = None
assert invoice.series_number == 'draft-id:%d' % invoice.pk
def test_issues_invoice_series_number(self):
invoice = InvoiceFactory.create(state=Invoice.STATES.ISSUED)
assert invoice.series_number == '%s-%s' % (invoice.series,
invoice.number)
def test_invoice_due_today_queryset(self):
invoices = InvoiceFactory.create_batch(5)
invoices[0].due_date = date.today()
invoices[0].save()
invoices[1].due_date = date.today()
invoices[1].issue()
invoices[2].due_date = date.today() - timedelta(days=1)
invoices[2].issue()
invoices[2].pay()
invoices[3].due_date = date.today()
invoices[3].issue()
invoices[3].cancel()
invoices[4].due_date = date.today() + timedelta(days=1)
invoices[4].issue()
queryset = Invoice.objects.due_today()
assert queryset.count() == 1
assert invoices[1] in queryset
def test_invoice_due_this_month_queryset(self):
invoices = InvoiceFactory.create_batch(4)
invoices[0].due_date = date.today().replace(day=20)
invoices[0].issue()
invoices[1].due_date = date.today().replace(day=1)
invoices[1].issue()
invoices[2].due_date = date.today() - timedelta(days=31)
invoices[2].issue()
invoices[3].issue()
invoices[3].cancel()
queryset = Invoice.objects.due_this_month()
assert queryset.count() == 2
for invoice in invoices[:2]:
assert invoice in queryset
def test_invoice_overdue_queryset(self):
invoices = InvoiceFactory.create_batch(3)
invoices[0].due_date = date.today() - timedelta(days=1)
invoices[0].issue()
invoices[1].due_date = date.today() - timedelta(days=3)
invoices[1].issue()
invoices[2].due_date = date.today() - timedelta(days=31)
invoices[2].issue()
invoices[2].pay()
queryset = Invoice.objects.overdue()
assert queryset.count() == 2
for invoice in invoices[:2]:
assert invoice in queryset
def test_invoice_overdue_since_last_month_queryset(self):
invoices = InvoiceFactory.create_batch(3)
invoices[0].due_date = date.today().replace(day=1)
invoices[0].issue()
invoices[1].due_date = date.today() - timedelta(days=31)
invoices[1].issue()
queryset = Invoice.objects.overdue_since_last_month()
assert queryset.count() == 1
assert invoices[1] in queryset
def test_customer_currency_used_for_transaction_currency(self):
customer = CustomerFactory.create(currency='EUR')
invoice = InvoiceFactory.create(customer=customer,
transaction_currency=None)
self.assertEqual(invoice.transaction_currency, 'EUR')
def test_invoice_currency_used_for_transaction_currency(self):
customer = CustomerFactory.create(currency=None)
invoice = InvoiceFactory.create(customer=customer,
currency='EUR',
transaction_currency=None)
self.assertEqual(invoice.transaction_currency, 'EUR')
| PressLabs/silver | silver/tests/unit/test_invoice.py | Python | apache-2.0 | 8,323 | 0.00036 |
#!/usr/bin/env python3
"""
Find characters deep in the expanded string, for fun.
"""
import sys
from collections import Counter
def real_step(s, rules):
out = ""
for i in range(len(s)):
out += s[i]
k = s[i:i+2]
if k in rules:
out += rules[k]
return out
def step(cnt, rules):
ncnt = Counter()
for k, v in cnt.items():
if k in rules:
c = rules[k]
ncnt[k[0] + c] += v
ncnt[c + k[1]] += v
else:
ncnt[k] += v
return ncnt
def size(s, n, rules):
cnt = Counter(s[i:i+2] for i in range(len(s)-1))
for _ in range(n):
cnt = step(cnt, rules)
lcnt = Counter(s[0])
for k, v in cnt.items():
lcnt[k[1]] += v
return sum(lcnt.values())
def get_char(s, idx, iters, rules):
for i in range(iters):
h = len(s) // 2
first = s[:h+1]
sz = size(first, iters - i, rules)
if idx < sz:
s = real_step(first, rules)
else:
s = real_step(s[h:], rules)
idx -= sz - 1
return s[idx]
def main(args):
data = [s.strip() for s in sys.stdin]
s = data[0]
rules = dict(x.split(" -> ") for x in data[2:])
# Make sure it works
t = s
for i in range(4):
t = real_step(t, rules)
for idx in range(len(t)):
c = get_char(s, idx, 4, rules)
assert t[idx] == c
# find some random characters deep into it
print(size(s, 40, rules))
start = 7311752324710
out = ""
for i in range(10):
out += get_char(s, start + i, 40, rules)
print(out)
if __name__ == '__main__':
main(sys.argv)
| msullivan/advent-of-code | 2021/14balt.py | Python | mit | 1,674 | 0 |
#!/usr/bin/env python
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
readme = path.join(here, 'README.md')
try:
from pypandoc import convert
long_description = convert(readme, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
with open(readme, 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='callipy',
description='Calling IPython notebooks with arguments',
long_description=long_description,
version='0.3.2',
author='Damien Drix',
author_email='[email protected]',
url='https://github.com/damiendr/callipy',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: IPython',
],
py_modules=['callipy'],
install_requires=[
"runipy",
"ipython",
],
)
| damiendr/callipy | setup.py | Python | bsd-2-clause | 1,056 | 0.004735 |
# coding: utf8
# ! /usr/env/python
"""profiler.py component to create profiles with user-defined endpoints."""
from collections import OrderedDict
import numpy as np
from matplotlib import cm, colors, pyplot as plt
from landlab.components.profiler.base_profiler import _BaseProfiler
class Profiler(_BaseProfiler):
"""Extract and plot profiles set up using points within a grid.
The profile is constructed from the first to final point in ``endpoints``.
Endpoints are located at grid nodes. Two successive endpoints bound a
profile segment. A profile with one segment is a straight line. The
segments of a profile with multiple segments meet at endpoints. The grid
nodes along the profile are sampled, including the segment endpoints. The
extracted quantity of the node is retained. No interpolation is conducted
even for profile traces that reside between nodes.
The structure of the profile in a model grid is diagrammed below. The grid
contains nine columns and nine rows. The profile is constructed from three
endpoints that bound two segments. Here, ``o`` indicates a segment
endpoint, ``.`` and ``*`` are sample nodes of the first and second segment,
respectively. ``X`` are nodes not included in the profile. The first
segment begins in the lower-left and continues horizontally and almost
reaches the right boundary. The second segment is joined to the first in
the lower-right of the grid and it continues diagonally to the upper-left.
Segments have seven sample points each (nodes at endpoints are also
sampled). The segments share the second endpoint. Segment and sample
ordering is dictated by the ordering of endpoints. If the horizontal
segment is the first segment, the endpoints used to construct this profile
must be ordered: lower-left, lower-right, and then upper-left.::
X X X X X X X X X
X o X X X X X X X
X X * X X X X X X
X X X * X X X X X
X X X X * X X X X
X X X X X * X X X
X X X X X X * X X
X o . . . . . o X
X X X X X X X X X
The node IDs and distances along the profile are stored in a data structure
called ``data_structure``. It is a dictionary with keys indicating the
segment IDs that are enumerated along the profile.
By default, a unique color will be assigned to each segment. To change the
color, a user can change values stored in ``data_structure``. Additionally,
a ``cmap`` keyword argument can provide some user control over the color at
the instantiation of the component.
The data structure of the example above will look as follows:
.. code-block:: python
{0: {
'ids': [10, 11, 12, 13, 14, 15, 16],
'distances': [0, 1, 2, 3, 4, 5, 6]
'color': (0.27, 0, 0.33, 1)
},
1: {
'ids': [16, 24, 32, 40, 48, 56, 64],
'distances': [6, 7.41, 8.83, 10.24, 11.66, 13.07, 14.49]
'color': (0.13, 0.57, 0.55, 1)
}
}
Examples
--------
Create a model grid with the same dimensions as the diagram above.
>>> from landlab import RasterModelGrid
>>> from landlab.components import Profiler
>>> import numpy as np
>>> mg = RasterModelGrid((10, 10), 10)
>>> mg.at_node['topographic__elevation'] = mg.node_x * mg.node_y
Create a profile with three endpoints. This profile is laid out the same as
the diagram above.
>>> endpoints = [10, 16, 64]
>>> profiler = Profiler(mg, endpoints)
>>> profiler.run_one_step()
The keys of the data structure are the segment ids.
>>> profiler.data_structure.keys()
odict_keys([0, 1])
The data structure contains data of segment samples. Below is the first
segment.
>>> profiler.data_structure[0]['ids']
array([10, 11, 12, 13, 14, 15, 16])
>>> profiler.data_structure[0]['distances']
array([ 0., 10., 20., 30., 40., 50., 60.])
>>> np.round(profiler.data_structure[0]['color'], decimals=2)
array([ 0.27, 0. , 0.33, 1. ])
Note that the first node of the second segment is the same as the final
node of the first segment.
>>> profiler.data_structure[1]['ids']
array([16, 26, 35, 45, 54, 64])
Alternative to nodes, profiles can be instantiated with coordinates.
>>> profiler = Profiler(mg, [(10, 10), (70, 10), (10, 70)])
Endpoints can also be set with a combination of coordinates and nodes.
>>> profiler = Profiler(mg, [(10, 10), 16, (10, 70)])
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
None Listed
"""
_name = "Profiler"
_unit_agnostic = True
def __init__(self, grid, endpoints, cmap="viridis"):
"""Instantiate Profiler.
Parameters
----------
grid : RasterModelGrid
A landlab RasterModelGrid.
endpoints : list of node id integers or coordinate tuples
The endpoints that bound segments of the profile. Endpoints can be
node ids and/or tuples of coordinates (x, y, where these
coordinates are the measurement from the grid lower-left). The list
can be a mix of node ids and coordinate tuples. The profile begins
with the first element of `endpoints` and continues in the order of
this list.
cmap : str
A valid matplotlib cmap string. Default is "viridis".
"""
super().__init__(grid)
self._cmap = plt.get_cmap(cmap)
if not isinstance(endpoints, list) or len(endpoints) < 2:
msg = (
"`endpoints` must be a list of at least 2 node IDs or a "
"list of at least two tuples where each tuple contains the "
"x, y coordinates of endpoints."
)
raise ValueError(msg)
# Check if `endpoints` are within grid bounds while setting
# `_end_nodes`.
self._end_nodes = []
for point in endpoints:
node, _ = self._get_node_and_coords(point)
self._end_nodes.append(node)
@property
def data_structure(self):
"""OrderedDict defining the profile.
The node IDs and distances along the profile are stored in
``data_structure``. It is a dictionary with keys of the segment ID.
The value of each key is itself a dictionary of the segment attributes.
First, 'ids' contains a list of the node IDs of segment samples ordered
from the start to the end of the segment. It includes the endpoints.
Second, 'distances' contains a list of along-profile distances that
mirrors the list in 'ids'. Finally, 'color' is an RGBA tuple indicating
the color for the segment.
"""
return self._data_struct
def _create_profile_structure(self):
"""Create the data structure of the profile.
The profile is processed by segment. Segments are bound by successive
endpoints. The cumulative distance along the profile is accumulated by
iteratively adding segment lengths.
"""
self._data_struct = OrderedDict()
grid = self._grid
endnodes = self._end_nodes
cum_dist = 0
for i_endpt in range(len(endnodes) - 1):
# Get the endpoints and samples of the segment.
start_node, start_xy = self._get_node_and_coords(endnodes[i_endpt])
end_node, end_xy = self._get_node_and_coords(endnodes[i_endpt + 1])
sample_nodes = self._get_sample_nodes(start_node, end_node)
# Calculate the along-profile distance of samples along the
# segment.
n_samples = len(sample_nodes)
sample_distances = np.empty(n_samples, dtype=float)
for i_sample, node in enumerate(sample_nodes):
sample_xy = grid.xy_of_node[node]
pt = self._project_point_onto_line(sample_xy, start_xy, end_xy)
d = grid.calc_distances_of_nodes_to_point(pt, node_subset=start_node)
sample_distances[i_sample] = d
# Store the segment data.
self._data_struct[i_endpt] = {
"ids": np.array(sample_nodes),
"distances": sample_distances + cum_dist,
}
cum_dist += max(sample_distances)
self._assign_colors()
self._create_flat_structures()
def _assign_colors(self, color_mapping=None):
"""Assign a unique color for each segment.
Parameters
----------
color_mapping : str
Color map name.
"""
if color_mapping is None:
segment_count = len(self._data_struct)
norm = colors.Normalize(vmin=0, vmax=segment_count)
mappable = cm.ScalarMappable(norm=norm, cmap=self._cmap)
color_mapping = {
segment_id: mappable.to_rgba(idx)
for idx, segment_id in enumerate(self._data_struct)
}
for segment_id in self._data_struct:
self._data_struct[segment_id]["color"] = color_mapping[segment_id]
def _create_flat_structures(self):
"""Create expected flattened structures for ids, distances, and colors."""
self._nodes = []
self._distance_along_profile = []
self._colors = []
for segment_id in self._data_struct:
self._nodes.append(self._data_struct[segment_id]["ids"])
self._distance_along_profile.append(
self._data_struct[segment_id]["distances"]
)
self._colors.append(self._data_struct[segment_id]["color"])
def _get_node_and_coords(self, point):
"""Get the node and coordinates for a point.
This method handles the option that endpoints can be a node or tuple.
The grid methods called here verify if the point is within the grid.
"""
if isinstance(point, (float, int, np.integer)):
return point, self._grid.xy_of_node[point]
elif isinstance(point, (tuple, list, np.ndarray)) and len(point) == 2:
return self._grid.find_nearest_node(point), point
else:
raise TypeError(
"each element of `endpoints` must be a number "
"representing a node id or a tuple of node x, y "
"coordinates"
)
def _get_sample_nodes(self, start_node, end_node):
"""Get the profile sample nodes using Bresenham's line algorithm.
Parameters
----------
start_node, end_node : integer
The node id of a profile endpoint.
Returns
-------
list of integers
The node ids of the profile samples.
Notes
-----
See: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
"""
# Get node column and row numbers to act as the coordinates.
y0, x0 = np.argwhere(self._grid.nodes == start_node)[0]
y1, x1 = np.argwhere(self._grid.nodes == end_node)[0]
dx = x1 - x0
dy = y1 - y0
trace_is_steep = abs(dy) > abs(dx)
if trace_is_steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
flipped_nodes = x0 > x1
if flipped_nodes:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = x1 - x0
dy = y1 - y0
error = int(dx / 2.0)
if y0 < y1:
y_step = 1
else:
y_step = -1
# Iterate within the bounding box to identify the profile sample nodes.
samples = []
y = y0
for x in range(x0, x1 + 1):
if trace_is_steep:
coord = (x, y)
else:
coord = (y, x)
samples.append(self._grid.grid_coords_to_node_id(*coord))
error -= abs(dy)
if error < 0:
y += y_step
error += dx
if flipped_nodes:
samples.reverse()
return samples
def _project_point_onto_line(self, p, ep0, ep1):
"""Get the coordinates along a line nearest to a point.
Parameters
----------
p : tuple of floats
The x, y coordinates of the point to project onto the line.
ep0, ep1 : tuple of floats
The endpoints of the line. Each endpoint is a tuple of x, y
coordinates.
Returns
-------
tuple
The x, y coordinates along a line (bounded by `ep1` and `ep2`) that
is nearest to `p`.
"""
dx, dy = ep1[0] - ep0[0], ep1[1] - ep0[1]
determinant = dx * dx + dy * dy
coeff = (dy * (p[1] - ep0[1]) + dx * (p[0] - ep0[0])) / determinant
return ep0[0] + coeff * dx, ep0[1] + coeff * dy
def plot_profiles_in_map_view(
self, field="topographic__elevation", endpoints_only=True, **kwds
):
"""Plot profile locations in map view.
This method overrides the method in ``_BaseProfiler`` to set the
default of ``endpoints_only`` to True.
Parameters
----------
field : field name or nnode array
Array of the at-node-field to plot as the 2D map values.
Default value is the at-node field 'topographic__elevation'.
endpoints_only : boolean
Boolean where False indicates every node along the profile is
plotted, or True (default) indicating only segment endpoints are
plotted.
**kwds : dictionary
Keyword arguments to pass to imshow_grid.
"""
super().plot_profiles_in_map_view(field, endpoints_only=endpoints_only, **kwds)
| landlab/landlab | landlab/components/profiler/profiler.py | Python | mit | 13,854 | 0.000217 |
import os
with open("validation_classes.csv", "r") as f:
rows = f.readlines()
rows = rows[1:-1]
rows = [x for x in rows if x != "\n"]
path = "dataset/val/"
for row in rows:
rsplit = row.split(";")
filename = rsplit[0]
c = int(rsplit[1])
new_filename = format(c,'05d') + "_" + filename
if os.path.exists(path + filename):
os.rename(path + filename, path + new_filename)
| alessiamarcolini/deepstreet | utils/format_validation_filenames_util.py | Python | mit | 408 | 0.002451 |
import math
def linear(x):
return x
def quad(x):
return x*x
def quadout(x):
return 1 -quad(x)
def cubic(x):
return x*x*x
def cubicout(x):
return 1 - cubic(x)
def quint(x):
return x*x*x*x
def quintout(x):
return 1-quint(x)
def sine(x):
return -math.cos(p * (math.pi * .5)) + 1
def sineout(x):
return 1-sine(x)
def cosine(x):
return -math.sine(p*(math.pi *.5)) + 1
def cosineout(x):
return 1-cosine(x)
ease = {
"linear":linear,
"quad":quad,
"quad-out":quadout,
"cubic":cubic,
"cubic-out":cubicout,
"quint":quint,
"quint-out":quintout,
"sine":sine,
"sine-out":sineout,
"cosine":cosine,
"cosine-out":cosineout,
}
def findDistance(x,y):
if not x or not y:
return 0
else:
return max(x,y)-min(x,y)
class single:
def __init__(self,time,item,exp,mode="linear"):
self.progress = 0
self.rate = time > 0 and 1 / time or 0
self.start = item
self.current = item
self.diff = exp-item
self.mode = mode
self.exp = exp
self.done = False
self.delay = 0
self.initt = 0
def get(self):
return self.current
def update(self,dt):
self.progress = self.progress + self.rate * dt
p = self.progress
x = p >= 1 and 1 or ease[self.mode](p)
self.current = self.start + x*self.diff
if p > 1:
self.done = True
class _to:
def __init__(self,time,obj,var,mode="Linear",done = None,parent):
self.tweens = []
self.var = var
self.obj = obj
self.done = False
self.onComplete = done
self.initt = 0
self.parent = parent
#key val
for i,v in var.items():
if type(v) == int:
item = single(time,getattr(obj,i),v)
list.insert(self.tweens,len(self.tweens)+1,item)
elif type(v) == list:
t = getattr(obj,i)
if type(v) == list:
items = v
no = 0
for var in v:
item = single(time,getattr(t[no]),var)
list.insert(self.tweens,len(self.tweens)+1,item)
no += 1
else:
print("The item: " + v +" for " + i + " is not a number or a list!")
def update(self,dt):
if self.initt > self.delay:
no = 0
items = []
for i,v in self.var.items():
self.tweens[no].update(dt)
setattr(self.obj,i,self.tweens[no].get())
if self.tweens[no].done:
items.insert(len(items)+1,i)
no = no +1
no = 0
for item in self.tweens:
if item.done:
self.tweens.remove(item)
no = no +1
for item in items:
self.var.pop(item, None)
if len(self.tweens) == 0:
self.done = True
if self._after:
self = self._after(self)
else:
if self.onComplete:
self.onComplete()
else:
self.initt += dt
pass
def after(time,var,mode="linear"):
self._after = _to(time,self.obj,var,mode,False,self.parent)
list.insert(self.parent.tweens,len(self.parent.tweens)+1,self._after)
return self._after
def delay(t):
self.delay = t
def stop(self):
list.remove(self.parent.tweens,self)
pass
class Tween():
def __init__(self):
self.tweens = []
pass
# VAR HAS TO BE DICT WITH STR:EXPVAL
def to(self,time,obj,var,mode="Linear",func=None):
mode = mode or "linear"
t = _to(time,obj,var,mode,func,self)
list.insert(self.tweens,len(self.tweens)+1,t)
return
def update(self,dt):
for tween in self.tweens:
tween.update(dt)
if tween.done:
self.tweens.remove(tween)
pass
| devfirefly/PyFlax | Tween.py | Python | mit | 4,128 | 0.03125 |
# coding=utf-8
import zipfile
import re
import os
import hashlib
import json
import logging
from django.shortcuts import render
from django.db.models import Q, Count
from django.core.paginator import Paginator
from rest_framework.views import APIView
from django.conf import settings
from account.models import SUPER_ADMIN
from account.decorators import super_admin_required
from utils.shortcuts import (serializer_invalid_response, error_response,
success_response, paginate, rand_str, error_page)
from .serizalizers import (CreateProblemSerializer, EditProblemSerializer, ProblemSerializer,
ProblemTagSerializer, CreateProblemTagSerializer)
from .models import Problem, ProblemTag
from .decorators import check_user_problem_permission
logger = logging.getLogger("app_info")
def problem_page(request, problem_id):
"""
前台题目详情页
"""
try:
problem = Problem.objects.get(id=problem_id, visible=True)
except Problem.DoesNotExist:
return error_page(request, u"题目不存在")
return render(request, "oj/problem/problem.html", {"problem": problem, "samples": json.loads(problem.samples)})
class ProblemTagAdminAPIView(APIView):
"""
获取所有标签的列表
"""
def get(self, request):
return success_response(ProblemTagSerializer(ProblemTag.objects.all(), many=True).data)
class ProblemAdminAPIView(APIView):
@super_admin_required
def post(self, request):
"""
题目发布json api接口
---
request_serializer: CreateProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = CreateProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.create(title=data["title"],
description=data["description"],
input_description=data["input_description"],
output_description=data["output_description"],
test_case_id=data["test_case_id"],
source=data["source"],
samples=json.dumps(data["samples"]),
time_limit=data["time_limit"],
memory_limit=data["memory_limit"],
difficulty=data["difficulty"],
created_by=request.user,
hint=data["hint"],
visible=data["visible"])
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
@check_user_problem_permission
def put(self, request):
"""
题目编辑json api接口
---
request_serializer: EditProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = EditProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.get(id=data["id"])
problem.title = data["title"]
problem.description = data["description"]
problem.input_description = data["input_description"]
problem.output_description = data["output_description"]
problem.test_case_id = data["test_case_id"]
problem.source = data["source"]
problem.time_limit = data["time_limit"]
problem.memory_limit = data["memory_limit"]
problem.difficulty = data["difficulty"]
problem.samples = json.dumps(data["samples"])
problem.hint = data["hint"]
problem.visible = data["visible"]
# 删除原有的标签的对应关系
problem.tags.remove(*problem.tags.all())
# 重新添加所有的标签
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
problem.save()
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
def get(self, request):
"""
题目分页json api接口
---
response_serializer: ProblemSerializer
"""
problem_id = request.GET.get("problem_id", None)
if problem_id:
try:
# 普通管理员只能获取自己创建的题目
# 超级管理员可以获取全部的题目
problem = Problem.objects.get(id=problem_id)
if request.user.admin_type != SUPER_ADMIN:
problem = problem.get(created_by=request.user)
return success_response(ProblemSerializer(problem).data)
except Problem.DoesNotExist:
return error_response(u"题目不存在")
# 获取问题列表
problems = Problem.objects.all().order_by("-create_time")
if request.user.admin_type != SUPER_ADMIN:
problems = problems.filter(created_by=request.user)
visible = request.GET.get("visible", None)
if visible:
problems = problems.filter(visible=(visible == "true"))
keyword = request.GET.get("keyword", None)
if keyword:
problems = problems.filter(Q(title__contains=keyword) |
Q(description__contains=keyword))
return paginate(request, problems, ProblemSerializer)
class TestCaseUploadAPIView(APIView):
"""
上传题目的测试用例
"""
def _is_legal_test_case_file_name(self, file_name):
# 正整数开头的 .in 或者.out 结尾的
regex = r"^[1-9]\d*\.(in|out)$"
return re.compile(regex).match(file_name) is not None
def post(self, request):
if "file" not in request.FILES:
return error_response(u"文件上传失败")
f = request.FILES["file"]
tmp_zip = "/tmp/" + rand_str() + ".zip"
try:
with open(tmp_zip, "wb") as test_case_zip:
for chunk in f:
test_case_zip.write(chunk)
except IOError as e:
logger.error(e)
return error_response(u"上传失败")
test_case_file = zipfile.ZipFile(tmp_zip, 'r')
name_list = test_case_file.namelist()
l = []
# 如果文件是直接打包的,那么name_list 就是["1.in", "1.out"]这样的
# 如果文件还有一层文件夹test_case,那么name_list就是["test_case/", "test_case/1.in", "test_case/1.out"]
# 现在暂时只支持第一种,先判断一下是什么格式的
# 第一种格式的
if "1.in" in name_list and "1.out" in name_list:
for file_name in name_list:
if self._is_legal_test_case_file_name(file_name):
name = file_name.split(".")
# 有了.in 判断对应的.out 在不在
if name[1] == "in":
if (name[0] + ".out") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".out")
else:
# 有了.out 判断对应的 .in 在不在
if (name[0] + ".in") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".in")
problem_test_dir = rand_str()
test_case_dir = settings.TEST_CASE_DIR + problem_test_dir + "/"
# 得到了合法的测试用例文件列表 然后去解压缩
os.mkdir(test_case_dir)
for name in l:
f = open(test_case_dir + name, "wb")
try:
f.write(test_case_file.read(name).replace("\r\n", "\n"))
except MemoryError:
return error_response(u"单个测试数据体积过大!")
finally:
f.close()
l.sort()
file_info = {"test_case_number": len(l) / 2, "test_cases": {}}
# 计算输出文件的md5
for i in range(len(l) / 2):
md5 = hashlib.md5()
striped_md5 = hashlib.md5()
f = open(test_case_dir + str(i + 1) + ".out", "r")
# 完整文件的md5
while True:
data = f.read(2 ** 8)
if not data:
break
md5.update(data)
# 删除标准输出最后的空格和换行
# 这时只能一次全部读入了,分块读的话,没办法确定文件结尾
f.seek(0)
striped_md5.update(f.read().rstrip())
file_info["test_cases"][str(i + 1)] = {"input_name": str(i + 1) + ".in",
"output_name": str(i + 1) + ".out",
"output_md5": md5.hexdigest(),
"striped_output_md5": striped_md5.hexdigest(),
"output_size": os.path.getsize(test_case_dir + str(i + 1) + ".out")}
# 写入配置文件
open(test_case_dir + "info", "w").write(json.dumps(file_info))
return success_response({"test_case_id": problem_test_dir,
"file_list": {"input": l[0::2],
"output": l[1::2]}})
else:
return error_response(u"测试用例压缩文件格式错误,请保证测试用例文件在根目录下直接压缩")
def problem_list_page(request, page=1):
"""
前台的问题列表
"""
# 正常情况
problems = Problem.objects.filter(visible=True)
# 搜索的情况
keyword = request.GET.get("keyword", None)
if keyword:
problems = problems.filter(Q(title__contains=keyword) | Q(description__contains=keyword))
difficulty_order = request.GET.get("order_by", None)
if difficulty_order:
if difficulty_order[0] == "-":
problems = problems.order_by("-difficulty")
difficulty_order = "difficulty"
else:
problems = problems.order_by("difficulty")
difficulty_order = "-difficulty"
else:
difficulty_order = "difficulty"
# 按照标签筛选
tag_text = request.GET.get("tag", None)
if tag_text:
try:
tag = ProblemTag.objects.get(name=tag_text)
except ProblemTag.DoesNotExist:
return error_page(request, u"标签不存在")
problems = tag.problem_set.all().filter(visible=True)
paginator = Paginator(problems, 20)
try:
current_page = paginator.page(int(page))
except Exception:
return error_page(request, u"不存在的页码")
previous_page = next_page = None
try:
previous_page = current_page.previous_page_number()
except Exception:
pass
try:
next_page = current_page.next_page_number()
except Exception:
pass
if request.user.is_authenticated():
problems_status = json.loads(request.user.problems_status)
else:
problems_status = {}
# 右侧标签列表 按照关联的题目的数量排序 排除题目数量为0的
tags = ProblemTag.objects.annotate(problem_number=Count("problem")).filter(problem_number__gt=0).order_by("-problem_number")
return render(request, "oj/problem/problem_list.html",
{"problems": current_page, "page": int(page),
"previous_page": previous_page, "next_page": next_page,
"keyword": keyword, "tag": tag_text,"problems_status": problems_status,
"tags": tags, "difficulty_order": difficulty_order})
| hxsf/OnlineJudge | problem/views.py | Python | mit | 12,819 | 0.001668 |
'''
Limevideo urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from lib import jsunpack
from lib import captcha_lib
class LimevideoResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "limevideo"
domains = [ "limevideo.net" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
data.update({'method_free': 'Continue to Video'})
html = self.net.http_POST(url, data).content
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
for name, value in r:
data[name] = value
data.update(captcha_lib.do_captcha(html))
html = self.net.http_POST(url, data).content
sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>(eval\('
sPattern += 'function\(p,a,c,k,e,d\)(?!.+player_ads.+).+np_vid.+?)'
sPattern += '\s+?</script>'
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
if r:
sJavascript = r.group(1)
sUnpacked = jsunpack.unpack(sJavascript)
sPattern = '<embed id="np_vid"type="video/divx"src="(.+?)'
sPattern += '"custommode='
r = re.search(sPattern, sUnpacked)
if r:
return r.group(1)
else:
num = re.compile('false\|(.+?)\|(.+?)\|(.+?)\|(.+?)\|divx').findall(html)
for u1, u2, u3, u4 in num:
urlz = u4 + '.' + u3 + '.' + u2 + '.' + u1
pre = 'http://' + urlz + ':182/d/'
preb = re.compile('custommode\|(.+?)\|(.+?)\|182').findall(html)
for ext, link in preb:
r = pre + link + '/video.' + ext
return r
def get_url(self, host, media_id):
return 'http://www.limevideo.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9a-zA-Z]+)', url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?limevideo.net/' +
'[0-9A-Za-z]+', url) or
'limevideo' in host)
| xmbcrios/xmbcrios.repository | script.module.urlresolver/lib/urlresolver/plugins/limevideo.py | Python | gpl-2.0 | 3,459 | 0.005204 |
import unittest
from dpark.serialize import dump_closure, load_closure
class TestSerialize(unittest.TestCase):
def testNameError(self):
def foo():
print x
dumped_func = dump_closure(foo)
func = load_closure(dumped_func)
self.assertRaises(NameError, func)
x = 10
def testNoneAsFreeVar(self):
y = None
x = 10
def foo():
return (x, y)
dumped_func = dump_closure(foo)
func = load_closure(dumped_func)
self.assertEqual(func(), (x, y))
| quxiaolong1504/dpark | tests/test_serialize.py | Python | bsd-3-clause | 590 | 0.008475 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs import *
from scap.model.xs.AnnotatedType import AnnotatedType
logger = logging.getLogger(__name__)
class ExtensionType(AnnotatedType):
MODEL_MAP = {
'elements': [
{'tag_name': 'group', 'list': 'tags', 'class': 'GroupType', 'min': 0},
{'tag_name': 'all', 'list': 'tags', 'class': 'AllType', 'min': 0},
{'tag_name': 'choice', 'list': 'tags', 'class': 'ChoiceElement', 'min': 0},
{'tag_name': 'sequence', 'list': 'tags', 'class': 'GroupType', 'min': 0},
{'tag_name': 'attribute', 'list': 'tags', 'class': 'AttributeType', 'min': 0, 'max': None},
{'tag_name': 'attributeGroup', 'list': 'tags', 'class': 'AttributeGroupType', 'min': 0, 'max': None},
{'tag_name': 'anyAttribute', 'list': 'tags', 'class': 'WildcardType', 'min': 0},
],
'attributes': {
'base': {'type': 'QNameType', 'required': True},
}
}
def get_defs(self, schema, top_level):
logger.debug('Base: ' + self.base)
# TODO unable to map xmlns because ET doesn't retain it
base_ns, base_name = [self.base.partition(':')[i] for i in [0,2]]
top_level.set_super_module(base_ns)
top_level.set_super_class(base_name)
return super(ExtensionType, self).get_defs(schema, top_level)
| cjaymes/pyscap | src/scap/model/xs/ExtensionType.py | Python | gpl-3.0 | 2,037 | 0.003927 |
import os
import re
import json
import importlib
# Django Libraries
from django.http import HttpResponse, HttpResponseServerError
# CloudScape Libraries
from cloudscape.common import config
from cloudscape.common import logger
from cloudscape.common.vars import T_BASE
from cloudscape.engine.api.base import APIBase
from cloudscape.common.utils import JSONTemplate
from cloudscape.engine.api.auth.key import APIKey
from cloudscape.engine.api.auth.acl import ACLGateway
from cloudscape.common.utils import valid, invalid
from cloudscape.engine.api.auth.token import APIToken
from cloudscape.engine.api.app.auth.models import DBAuthEndpoints
from cloudscape.engine.api.app.user.models import DBUserDetails
# Configuration / Logger
CONF = config.parse()
LOG = logger.create('cloudscape.engine.api.core.request', CONF.server.log)
def dispatch(request):
"""
The entry point for all API requests. Called for all endpoints from the Django
URLs file. Creates a new instance of the EndpointManager class, and returns any
HTTP response to the client that opened the API request.
:param request: The Django request object
:type request: object
:rtype: object
"""
try:
# Return the response from the endpoint handler
return EndpointManager(request).handler()
# Critical server error
except Exception as e:
LOG.exception('Internal server error: %s' % str(e))
# Return a 500 error
return HttpResponseServerError('Internal server error, please contact your administrator.')
class EndpointManager:
"""
The endpoint request manager class. Serves as the entry point for all API request,
both for authentication requests, and already authenticated requests. Constructs
the base API class, loads API utilities, and performs a number of other functions
to prepare the API for the incoming request.
The EndpointManager class is instantiated by the dispatch method, which is called
by the Django URLs module file. It is initialized with the Django request object.
"""
def __init__(self, request):
self.request_raw = request
# Request properties
self.method = None
self.request = None
self.endpoint = None
self.action = None
self.path = None
# Request endpoint handler
self.handler_obj = None
# API parameters
self.api_name = None
self.api_mod = None
self.api_class = None
self.api_user = None
self.api_group = None
# API base object
self.api_base = None
# Request error
def _req_error(self, err):
err_response = {
'message': 'An error occured when processing the API request',
'endpoint': self.endpoint,
'error': err
}
LOG.error('%s:%s' % (self.endpoint, err))
return HttpResponse(json.dumps(err_response), content_type='application/json', status=400)
def _authenticate(self):
"""
Authenticate the API request.
"""
# Set the API user and group
self.api_user = self.request['api_user']
self.api_group = None if not ('api_group' in self.request) else self.request['api_group']
LOG.info('Authenticating API user: %s, group=%s' % (self.api_user, repr(self.api_group)))
# Authenticate key for token requests
if self.endpoint == 'auth/get':
auth_status = APIKey().validate(self.request)
if not auth_status['valid']:
return self._req_error(auth_status['content'])
LOG.info('API key authentication successfull for user: %s' % self.api_user)
# Authenticate token for API requests
else:
if not APIToken().validate(self.request):
return self._req_error('Failed to validate API token for user \'%s\'' % self.api_user)
LOG.info('API token authentication successfull for user: %s' % self.api_user)
# Check for a user account
if DBUserDetails.objects.filter(username=self.api_user).count():
# If no API group was supplied
if not self.api_group:
return self._req_error('User accounts must supply a group UUID when making a request using the <api_group> parameter')
# Make sure the group exists and the user is a member
is_member = False
for group in DBUserDetails.objects.get(username=self.api_user).get_groups():
if group['uuid'] == self.api_group:
is_member = True
break
# If the user is not a member of the group
if not is_member:
return self._req_error('User account <%s> is not a member of group <%s>' % (self.api_user, self.api_group))
# Validate the request
def _validate(self):
# Request body / method
self.request = json.loads(self.request_raw.body)
self.method = self.request_raw.META['REQUEST_METHOD']
# Make sure a request action is set
if not 'action' in self.request:
return self._req_error('Request body requires an <action> parameter for endpoint pathing')
self.action = self.request['action']
# Get the request path
self.path = re.compile('^\/(.*$)').sub(r'\g<1>', self.request_raw.META['PATH_INFO'])
# Set the request endpoint
self.endpoint = '%s/%s' % (self.path, self.action)
# Map the path to a module, class, and API name
self.handler_obj = EndpointMapper(self.endpoint, self.method).handler()
if not self.handler_obj['valid']:
return self._req_error(self.handler_obj['content'])
# Validate the request body
request_err = JSONTemplate(self.handler_obj['content']['api_map']).validate(self.request)
if request_err:
return self._req_error(request_err)
# Set the handler objects
self.api_name = self.handler_obj['content']['api_name']
self.api_mod = self.handler_obj['content']['api_mod']
self.api_class = self.handler_obj['content']['api_class']
self.api_utils = self.handler_obj['content']['api_utils']
def handler(self):
"""
The endpoint manager request handler. Performs a number of validation steps before
passing off the request to the API utility class.
1.) Looks for the base required request parameters
2.) Maps the endpoint and request action to an API utility and validates the request body
3.) Authenticates the user and API key/token
4.) Initializes any required Socket.IO connections for web clients
5.) Launches the API utility class to process the request
6.) Returns either an HTTP response with the status of the request
"""
# Parse the request
try:
validate_err = self._validate()
if validate_err:
return validate_err
except Exception as e:
LOG.exception('Exception while validating request: %s' % str(e))
return self._req_error('Internal server error, failed to validate the request')
# Authenticate the request
try:
auth_err = self._authenticate()
if auth_err:
return auth_err
except Exception as e:
LOG.exception('Exception while authenticating the request: %s' % str(e))
return self._req_error('Internal server error, failed to authenticate the request')
# Check the request against ACLs
acl_gateway = ACLGateway(self.request, self.endpoint, self.api_user)
# If the user is not authorized for this endpoint/object combination
if not acl_gateway.authorized:
return self._req_error(acl_gateway.auth_error)
# Set up the API base
try:
# Create an instance of the APIBase and run the constructor
api_obj = APIBase(
name = self.api_name,
endpoint = self.endpoint,
utils = self.api_utils,
acl = acl_gateway
).construct(self.request_raw)
# Make sure the construct ran successfully
if not api_obj['valid']:
return self._req_error(api_obj['content'])
# Set the API base object for endpoint utilities
self.api_base = api_obj['content']
# Failed to setup the APIBase
except Exception as e:
LOG.exception('Failed to set up API base: %s' % str(e))
return self._req_error('Internal server, failed to set up API base')
# Load the handler module and class
handler_mod = importlib.import_module(self.api_mod)
handler_class = getattr(handler_mod, self.api_class)
handler_inst = handler_class(self.api_base)
# Launch the request handler and return the response
try:
response = handler_inst.launch()
# Critical error when running handler
except Exception as e:
LOG.exception('Exeption while running API handler: %s' % str(e))
return self._req_error('Encountered API handler error')
# Close any open SocketIO connections
self.api_base.socket.disconnect()
# Return either a valid or invalid request response
if response['valid']:
return self.api_base.log.success(response['content'], response['data'])
return self.api_base.log.error(code=response['code'], log_msg=response['content'])
class EndpointMapper:
"""
API class used to construct the endpoint map. Scans the endpoint request templates
in the API templates directory to construct a map used to load required utilities
and modules, as well as validate the request for each endpoint. Each map also contains
ACL parameters used when constructing the ACL database tables.
"""
def __init__(self, endpoint=None, method=None):
"""
Construct the EndpointMapper class.
@param endpoint: The endpoint path
@type endpoint: str
@param method: The request method
@type method: str
"""
self.endpoint = endpoint
self.method = method
self.map = {}
def _merge_auth(self,j,e):
"""
Helper method used to merge token authentication parameters into the endpoint
request map. Mainly so I don't have to redundantly include the same code in
every map. Also makes modification much easier.
"""
# Ignore the authentication endpoint, as this is used to retrieve the token
if e == 'auth/get':
return
# Required / optional connection parameters
j['root']['_required'].extend(['api_user', 'api_token', 'action'])
j['root']['_optional'].extend(['api_group'])
# Connection parameter types
t = { 'api_user': 'str', 'api_token': 'str', 'action': 'str', 'api_group': 'uuid4' }
for k,v in t.iteritems():
j['root']['_children'][k] = { '_type': v }
def _merge_socket(self,j):
"""
Merge request parameters for web socket request. Used for handling connections
being passed along by the Socket.IO API proxy.
"""
# Load the socket request validator map
sv = json.loads(open('%s/socket.json' % T_BASE, 'r').read())
# Make sure the '_children' key exists
if not '_children' in j['root']:
j['root']['_children'] = {}
# Merge the socket parameters map
j['root']['_children']['socket'] = sv
j['root']['_optional'].append('socket')
def _build_map(self):
"""
Load all endpoint definitions.
"""
for endpoint in list(DBAuthEndpoints.objects.all().values()):
# Try to load the request map
try:
endpoint_rmap = json.loads(endpoint['rmap'])
# Map base object
rmap_base = {
'root': endpoint_rmap
}
# Merge the web socket request validator
self._merge_socket(rmap_base)
# Merge the authentication request validation parameters
self._merge_auth(rmap_base, endpoint['name'])
# Load the endpoint request handler module string
self.map[endpoint['name']] = {
'module': endpoint['mod'],
'class': endpoint['cls'],
'name': endpoint['name'],
'desc': endpoint['desc'],
'method': endpoint['method'],
'utils': None if not endpoint['utils'] else json.loads(endpoint['utils']),
'json': rmap_base
}
# Error constructing request map, skip to next endpoint map
except Exception as e:
LOG.exception('Failed to load request map for endpoint <%s>: %s ' % (endpoint['name'], str(e)))
continue
# All template maps constructed
return valid(LOG.info('Constructed API template map'))
def handler(self):
"""
Main method for constructing and returning the endpoint map.
@return valid|invalid
"""
map_rsp = self._build_map()
if not map_rsp['valid']:
return map_rsp
# Request path missing
if not self.endpoint:
return invalid(LOG.error('Missing request endpoint'))
# Invalid request path
if not self.endpoint in self.map:
return invalid(LOG.error('Unsupported request endpoint: <%s>' % self.endpoint))
# Verify the request method
if self.method != self.map[self.endpoint]['method']:
return invalid(LOG.error('Unsupported request method <%s> for endpoint <%s>' % (self.method, self.endpoint)))
# Get the API module, class handler, and name
self.handler_obj = {
'api_mod': self.map[self.endpoint]['module'],
'api_class': self.map[self.endpoint]['class'],
'api_name': self.map[self.endpoint]['name'],
'api_utils': self.map[self.endpoint]['utils'],
'api_map': self.map[self.endpoint]['json']
}
LOG.info('Parsed handler object for API endpoint <%s>: %s' % (self.endpoint, self.handler_obj))
# Return the handler module path
return valid(self.handler_obj) | djtaylor/cloudscape-DEPRECATED | python/cloudscape/engine/api/core/request.py | Python | gpl-3.0 | 15,200 | 0.010724 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
import pbr.version
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone.openstack.common import service
from keystone.openstack.common import systemd
from keystone.server import common
from keystone import service as keystone_service
CONF = cfg.CONF
class ServerWrapper(object):
"""Wraps a Server with some launching info & capabilities."""
def __init__(self, server, workers):
self.server = server
self.workers = workers
def launch_with(self, launcher):
self.server.listen()
if self.workers > 1:
# Use multi-process launcher
launcher.launch_service(self.server, self.workers)
else:
# Use single process launcher
launcher.launch_service(self.server)
def create_server(conf, name, host, port, workers):
app = keystone_service.loadapp('config:%s' % conf, name)
server = environment.Server(app, host=host, port=port,
keepalive=CONF.eventlet_server.tcp_keepalive,
keepidle=CONF.eventlet_server.tcp_keepidle)
if CONF.eventlet_server_ssl.enable:
server.set_ssl(CONF.eventlet_server_ssl.certfile,
CONF.eventlet_server_ssl.keyfile,
CONF.eventlet_server_ssl.ca_certs,
CONF.eventlet_server_ssl.cert_required)
return name, ServerWrapper(server, workers)
def serve(*servers):
logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
'in favor of running in a WSGI server (e.g. mod_wsgi). '
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
launcher = service.ProcessLauncher()
else:
launcher = service.ServiceLauncher()
for name, server in servers:
try:
server.launch_with(launcher)
except socket.error:
logging.exception(_('Failed to start the %(name)s server') % {
'name': name})
raise
# notify calling process we are ready to serve
systemd.notify_once()
for name, server in servers:
launcher.wait()
def _get_workers(worker_type_config_opt):
# Get the value from config, if the config value is None (not set), return
# the number of cpus with a minimum of 2.
worker_count = CONF.eventlet_server.get(worker_type_config_opt)
if not worker_count:
worker_count = max(2, processutils.get_worker_count())
return worker_count
def configure_threading():
monkeypatch_thread = not CONF.standard_threads
pydev_debug_url = utils.setup_remote_pydev_debug()
if pydev_debug_url:
# in order to work around errors caused by monkey patching we have to
# set the thread to False. An explanation is here:
# http://lists.openstack.org/pipermail/openstack-dev/2012-August/
# 000794.html
monkeypatch_thread = False
environment.use_eventlet(monkeypatch_thread)
def run(possible_topdir):
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(
version=pbr.version.VersionInfo('keystone').version_string(),
config_files=config_files,
pre_setup_logging_fn=configure_threading)
paste_config = config.find_paste_config()
def create_servers():
public_worker_count = _get_workers('public_workers')
servers = []
servers.append(create_server(paste_config,
'main',
CONF.eventlet_server.public_bind_host,
CONF.eventlet_server.public_port,
public_worker_count))
return servers
_unused, servers = common.setup_backends(
startup_application_fn=create_servers)
serve(*servers)
| darren-wang/ks3 | keystone/server/eventlet.py | Python | apache-2.0 | 5,193 | 0.001541 |
#! python3
'''
mcbd.py - Saves and loads pieces of text from/to the clipboard to/from a
shelf type file.
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keywords.
'''
'''
Say you have the boring task of filling out many forms in a web page or
software with several text fields. The clipboard saves you from typing
the same text over and over again. But only one thing can be on the
clipboard at a time. If you have several different pieces of text that
you need to copy and paste, you have to keep highlighting and copying
the same few things over and over again. You can write a Python
program to keep track of multiple pieces of text.
'''
'''
Extend the multiclipboard program in this chapter so that it has a
delete <keyword> command line argument that will delete a keyword from
the shelf. Then add a delete command line argument that will delete all
keywords.
'''
import pyperclip
import shelve
import sys
import textwrap
def print_usage():
print(textwrap.dedent(
'''
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keywords.
'''))
mcbShelf = shelve.open('mcb') # file created if not already existing
# save or delete specified keywords
if len(sys.argv) == 3:
if sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
print('clipboard saved under keyword:', sys.argv[2])
elif sys.argv[1].lower() == 'delete':
del mcbShelf[sys.argv[2]]
print('deleted keyword:', sys.argv[2])
# list or delete all keywords or fetch one
elif len(sys.argv) == 2:
if sys.argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
print('all keywords copied to clipboard')
elif sys.argv[1].lower() == 'delete':
mcbShelf.clear()
print('all keywords deleted')
elif sys.argv[1] in mcbShelf:
pyperclip.copy(mcbShelf[sys.argv[1]])
print('copied to clipboard for keyword:', sys.argv[1])
else:
print('no such keyword:', sys.argv[1])
print_usage()
else:
print_usage()
mcbShelf.close()
| apaksoy/automatetheboringstuff | practice projects/chap 08/multiclipboard with deletion chap 8/mcbd.py | Python | mit | 2,608 | 0.003451 |
# -*- coding: utf-8 -*-
"""This file contains utility functions."""
import logging
import re
# Illegal Unicode characters for XML.
ILLEGAL_XML_RE = re.compile(
ur'[\x00-\x08\x0b-\x1f\x7f-\x84\x86-\x9f'
ur'\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]')
def IsText(bytes_in, encoding=None):
"""Examine the bytes in and determine if they are indicative of a text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is a text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or method
supplied character encoding. Otherwise it will make the assumption the byte
sequence is not text, but a byte sequence.
Args:
bytes_in: The byte sequence passed to the method that needs examination.
encoding: Optional encoding to test, if not defined only ASCII, UTF-8 and
UTF-16 are tried.
Returns:
Boolean value indicating whether or not the byte sequence is a text or not.
"""
# TODO: Improve speed and accuracy of this method.
# Start with the assumption we are dealing with a text.
is_ascii = True
# Check if this is ASCII text string.
for char in bytes_in:
if not 31 < ord(char) < 128:
is_ascii = False
break
# We have an ASCII string.
if is_ascii:
return is_ascii
# Is this already a unicode text?
if isinstance(bytes_in, unicode):
return True
# Check if this is UTF-8
try:
_ = bytes_in.decode('utf-8')
return True
except UnicodeDecodeError:
pass
# TODO: UTF 16 decode is successful in too
# many edge cases where we are not really dealing with
# a text at all. Leaving this out for now, consider
# re-enabling or making a better determination.
#try:
# _ = bytes_in.decode('utf-16-le')
# return True
#except UnicodeDecodeError:
# pass
if encoding:
try:
_ = bytes_in.decode(encoding)
return True
except UnicodeDecodeError:
pass
except LookupError:
logging.error(
u'String encoding not recognized: {0:s}'.format(encoding))
return False
def GetUnicodeString(string):
"""Converts the string to Unicode if necessary."""
if not isinstance(string, unicode):
return str(string).decode('utf8', 'ignore')
return string
def GetInodeValue(inode_raw):
"""Read in a 'raw' inode value and try to convert it into an integer.
Args:
inode_raw: A string or an int inode value.
Returns:
An integer inode value.
"""
if isinstance(inode_raw, (int, long)):
return inode_raw
if isinstance(inode_raw, float):
return int(inode_raw)
try:
return int(inode_raw)
except ValueError:
# Let's do one more attempt.
inode_string, _, _ = str(inode_raw).partition('-')
try:
return int(inode_string)
except ValueError:
return -1
def RemoveIllegalXMLCharacters(string, replacement=u'\ufffd'):
"""Removes illegal Unicode characters for XML.
Args:
string: A string to replace all illegal characters for XML.
replacement: A replacement character to use in replacement of all
found illegal characters.
Return:
A string where all illegal Unicode characters for XML have been removed.
If the input is not a string it will be returned unchanged."""
if isinstance(string, basestring):
return ILLEGAL_XML_RE.sub(replacement, string)
return string
| ostree/plaso | plaso/lib/utils.py | Python | apache-2.0 | 3,494 | 0.013165 |
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| harmy/kbengine | kbe/res/scripts/common/Lib/sre_constants.py | Python | lgpl-3.0 | 7,444 | 0.002284 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine.clients import client_plugin
from heat.engine import constraints
from manilaclient import client as manila_client
from manilaclient import exceptions
MANILACLIENT_VERSION = "1"
class ManilaClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [SHARE] = ['share']
def _create(self):
endpoint_type = self._get_client_option('manila', 'endpoint_type')
endpoint = self.url_for(service_type=self.SHARE,
endpoint_type=endpoint_type)
args = {
'service_catalog_url': endpoint,
'input_auth_token': self.auth_token
}
client = manila_client.Client(MANILACLIENT_VERSION, **args)
return client
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
@staticmethod
def _find_resource_by_id_or_name(id_or_name, resource_list,
resource_type_name):
"""The method is trying to find id or name in item_list
The method searches item with id_or_name in list and returns it.
If there is more than one value or no values then it raises an
exception
:param id_or_name: resource id or name
:param resource_list: list of resources
:param resource_type_name: name of resource type that will be used
for exceptions
:raises NotFound, NoUniqueMatch
:return: resource or generate an exception otherwise
"""
search_result_by_id = [res for res in resource_list
if res.id == id_or_name]
if search_result_by_id:
return search_result_by_id[0]
else:
# try to find resource by name
search_result_by_name = [res for res in resource_list
if res.name == id_or_name]
match_count = len(search_result_by_name)
if match_count > 1:
message = ("Ambiguous {0} name '{1}'. Found more than one "
"{0} for this name in Manila."
).format(resource_type_name, id_or_name)
raise exceptions.NoUniqueMatch(message)
elif match_count == 1:
return search_result_by_name[0]
else:
message = ("{0} '{1}' was not found in Manila. Please "
"use the identity of existing {0} in Heat "
"template.").format(resource_type_name, id_or_name)
raise exceptions.NotFound(message=message)
def get_share_type(self, share_type_identity):
return self._find_resource_by_id_or_name(
share_type_identity,
self.client().share_types.list(),
"share type"
)
def get_share_network(self, share_network_identity):
return self._find_resource_by_id_or_name(
share_network_identity,
self.client().share_networks.list(),
"share network"
)
def get_share_snapshot(self, snapshot_identity):
return self._find_resource_by_id_or_name(
snapshot_identity,
self.client().share_snapshots.list(),
"share snapshot"
)
def get_security_service(self, service_identity):
return self._find_resource_by_id_or_name(
service_identity,
self.client().security_services.list(),
'security service'
)
class ManilaShareBaseConstraint(constraints.BaseCustomConstraint):
# check that exceptions module has been loaded. Without this check
# doc tests on gates will fail
expected_exceptions = (exceptions.NotFound, exceptions.NoUniqueMatch)
def validate_with_client(self, client, resource_id):
getattr(client.client_plugin("manila"), self.resource_getter_name)(
resource_id)
class ManilaShareNetworkConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_network'
class ManilaShareTypeConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_type'
class ManilaShareSnapshotConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_snapshot'
| miguelgrinberg/heat | heat/engine/clients/os/manila.py | Python | apache-2.0 | 5,040 | 0 |
class BaiduSearch(object):
def __init__(self):
pass
def __call__(self, client, api, **kw):
"""
client --
client
api --
"""
client.driver.get("http://www.baidu.com")
input = client.e("#kw")
input.clear()
input.send_keys(kw['keyword'])
submit = client.e("#su")
submit.click()
#path = client.real_path("screen.png")
client.screenshot_as_file("screen.png")
result_list = client.es(".result tr div.f13")
for item in result_list:
print item.text
print "kw:%s" % str(kw)
| emop/webrobot | hello_baidu/libs/actions.py | Python | gpl-2.0 | 725 | 0.01931 |
import os
from flask import render_template
from markdown import markdown
__all__ = ["DirContents", "Doc"]
class DirContents(object):
def __init__(self, dir, name=''):
self.dir = dir
if name != '':
self.name = name
else:
self.name = dir
def get_contents(self):
if not os.path.isdir(self.dir):
os.mkdir(self.dir)
return os.listdir(self.dir)
def html(self):
return render_template('dir_contents.html', dir=self)
class Doc(object):
def __init__(self, text):
self.text = text
def __html__(self):
return markdown(self.text)
| aivuk/formcreator | formcreator/blocks/__init__.py | Python | bsd-2-clause | 647 | 0.003091 |
""" doc2xml.py
Converts docx files representing a proposed rule into the type of XML we'd
expect from the Federal Register.
Executing: python doc2xml.py file.docx
Writes XML to stdout
Installation:
* Install libxml2 via a package manager
* pip install -e git+https://github.com/savoirfairelinux/python-docx.git#egg=docx
Known limitations:
* Ignores images, tables, equations, similar
* Isn't aware of some bullets and other paragraph markers
* Uses bold and italics (along with string matching) to determine what
headers exist. If the docx uses custom style sheets instead, it won't
work
* Only processes the preamble data, not the CFR changes
""" # noqa
from __future__ import print_function
import re
import sys
from itertools import tee
from lxml import etree
import docx
h2_re = re.compile('[A-Z]\.')
h3_re = re.compile('\d\d?\.')
def text_subel(root, tag, text, **attrs):
"""Util method for allowing a one-liner"""
subel = etree.SubElement(root, tag, **attrs)
subel.text = text
return subel
def has_inline_label(par):
return len(par.runs) > 1 and par.runs[0].bold
def is_heading(par, level):
bold = all(run.bold for run in par.runs if run.text.strip())
italics = all(run.italic for run in par.runs if run.text.strip())
l2_marker = bool(h2_re.match(par.text.strip()))
l3_marker = bool(h3_re.match(par.text.strip()))
if level == 1:
return bold
elif level == 2:
return italics and l2_marker
elif level == 3:
return l3_marker
else:
return False
class Builder(object):
def __init__(self, paragraphs, xml_root):
self._paragraphs = iter(paragraphs) # always iterable
self.xml_root = xml_root
def takewhile(self, fn):
while fn(self.head_p):
yield next(self._paragraphs)
def dropwhile(self, fn):
while fn(self.head_p):
next(self._paragraphs)
return self
def skip_header(self):
def not_header(par):
return not (par.text.strip() and par.runs[0].bold and
not any(c.isdigit() for c in par.text))
self.dropwhile(not_header)
return self
def skip_whitespace(self):
self.dropwhile(lambda p: not p.text.strip())
return self
@property
def head_p(self): # peek; non-destructive
copy1, copy2 = tee(self._paragraphs)
self._paragraphs = copy2
return next(copy1)
def consume_text(self):
return next(self._paragraphs).text.strip()
def intro_header(self, parent, start_p):
label_to_tag = {
'AGENCY': 'AGY',
'ACTION': 'ACT',
'SUMMARY': 'SUM',
'DATES': 'DATES',
'ADDRESSES': 'ADD',
'FOR FURTHER INFORMATION CONTACT': 'FURINF',
}
label = next((l for l in label_to_tag if start_p.text.startswith(l)),
None)
if label:
sub_el = etree.SubElement(parent, label_to_tag[label])
text_subel(sub_el, 'HD', label + ':', SOURCE='HED')
else:
sub_el = etree.SubElement(parent, "UNKNOWN")
text_subel(sub_el, 'HD', start_p.runs[0].text, SOURCE='HED')
return sub_el
def intro_sections(self, preamb):
intro = self.takewhile(
lambda p: not p.text.startswith('SUPPLEMENTARY'))
current_section = None
for par in intro:
if has_inline_label(par):
current_section = self.intro_header(preamb, par)
sub_p = etree.SubElement(current_section, 'P')
text = ''.join(r.text for r in par.runs[1:])
# strip the beginning colon as it's part of the label
sub_p.text = text.lstrip(':').strip()
elif current_section is not None:
sub_p = etree.SubElement(current_section, 'P')
sub_p.text = par.text.strip()
def preamble(self):
preamb = etree.SubElement(self.xml_root, 'PREAMB')
text_subel(preamb, 'AGENCY', self.consume_text())
self.skip_whitespace()
if not self.head_p.text[:1].isdigit():
text_subel(preamb, 'SUBAGENCY', self.consume_text())
self.skip_whitespace()
for tag in ('CFR', 'DEPDOC', 'RIN', 'SUBJECT'):
text_subel(preamb, tag, self.consume_text())
self.skip_whitespace()
self.intro_sections(preamb)
return self
def suplinf(self):
suplinf = etree.SubElement(self.xml_root, 'SUPLINF')
text_subel(suplinf, 'HD', self.consume_text(), SOURCE='HED')
self.dropwhile(lambda p: not is_heading(p, 1))
non_cfr = self.takewhile(
lambda p: not p.text.startswith('List of Subjects'))
for par in non_cfr:
if not par.text.strip():
continue
elif is_heading(par, 1):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD1')
elif is_heading(par, 2):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD2')
elif is_heading(par, 3):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD3')
else:
text_subel(suplinf, 'P', par.text.strip())
def parse(filename):
"""Pulls out and prints some fields/paragraphs from an FR notice"""
builder = Builder(docx.Document(filename).paragraphs,
etree.Element('PRORULE'))
builder.skip_header()
builder.preamble()
builder.skip_whitespace()
builder.suplinf()
return builder.xml_root
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python doc2xml.py file.docx") # noqa
else:
print(etree.tounicode(parse(sys.argv[1]), pretty_print=True)) # noqa
| tadhg-ohiggins/regulations-parser | doc2xml.py | Python | cc0-1.0 | 5,854 | 0.001025 |
#!/usr/bin/env python3
# -*-coding:UTF-8 -*
import os
import re
import sys
import time
import redis
import datetime
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'packages'))
import Item
import Term
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import ConfigLoader
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
if __name__ == '__main__':
start_deb = time.time()
config_loader = ConfigLoader.ConfigLoader()
r_serv_term_stats = config_loader.get_redis_conn("ARDB_Trending")
r_serv_termfreq = config_loader.get_redis_conn("ARDB_TermFreq")
config_loader = None
r_serv_term_stats.flushdb()
#convert all regex:
all_regex = r_serv_termfreq.smembers('TrackedRegexSet')
for regex in all_regex:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(regex)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(regex)) )
new_term = regex[1:-1]
res = Term.parse_json_term_to_add({"term": new_term, "type": 'regex', "tags": tags, "mails": mails, "level": 1}, '[email protected]')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('regex_{}'.format(regex))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Regex Removed: {}'.format(regex))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedRegexSet', regex)
all_tokens = r_serv_termfreq.smembers('TrackedSetTermSet')
for token in all_tokens:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(token)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(token)) )
res = Term.parse_json_term_to_add({"term": token, "type": 'word', "tags": tags, "mails": mails, "level": 1}, '[email protected]')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(token))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Token Removed: {}'.format(token))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetTermSet', token)
all_set = r_serv_termfreq.smembers('TrackedSetSet')
for curr_set in all_set:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(curr_set)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(curr_set)) )
to_remove = ',{}'.format(curr_set.split(',')[-1])
new_set = rreplace(curr_set, to_remove, '', 1)
new_set = new_set[2:]
new_set = new_set.replace(',', '')
res = Term.parse_json_term_to_add({"term": new_set, "type": 'set', "nb_words": 1, "tags": tags, "mails": mails, "level": 1}, '[email protected]')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(curr_set))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Set Removed: {}'.format(curr_set))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetSet', curr_set)
r_serv_termfreq.flushdb()
#Set current ail version
r_serv.set('ail:version', 'v2.2')
#Set current ail version
r_serv.hset('ail:update_date', 'v2.2', datetime.datetime.now().strftime("%Y%m%d"))
| CIRCL/AIL-framework | update/v2.2/Update.py | Python | agpl-3.0 | 4,172 | 0.006951 |
from abstractKeyboardMapping import AbstractKeyboardMapping
import evdev
class Qwerty(AbstractKeyboardMapping):
def __init__(self):
super(AbstractKeyboardMapping, self).__init__()
def solo(self):
return { "!" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_1],
"@" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_2],
"#" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_3],
"$" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_4],
"%" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_5],
"^" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_6],
"&" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_7],
"*" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_8],
"(" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_9],
")" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_0],
"_" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_MINUS],
"+" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_EQUAL],
"{" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_LEFTBRACE],
"}" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_RIGHTBRACE],
":" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_SEMICOLON],
"\"" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_APOSTROPHE],
"|" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_BACKSLASH],
"<" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_COMMA],
">" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_DOT],
"?" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_SLASH],
}
def multi(self):
#no multi keys I think
return {}
| dictation-toolbox/aenea | server/linux_wayland/qwerty.py | Python | lgpl-3.0 | 1,666 | 0.042017 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import abc
import os
import glob
import logging
import json
import warnings
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 18, 2012"
logger = logging.getLogger(__name__)
class AbstractDrone(MSONable, metaclass=abc.ABCMeta):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen's as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
# Since multiple files are ambiguous, we will always read
# the one that it the last one alphabetically.
filepath = sorted(vasprun_files)[-1]
warnings.warn("%d vasprun.xml.* found. %s is being parsed." %
(len(vasprun_files), filepath))
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
# entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = sorted(glob.glob(os.path.join(path, filename + "*")))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename == "POTCAR" else files[0]
elif len(files) > 1:
# Since multiple files are ambiguous, we will always
# use the first one for POSCAR and the last one
# alphabetically for CONTCAR and OSZICAR.
if filename == "POSCAR":
files_to_parse[filename] = files[0]
else:
files_to_parse[filename] = files[-1]
warnings.warn(
"%d files found. %s is being parsed." %
(len(files), files_to_parse[filename]))
poscar, contcar, incar, potcar, oszicar, dynmat = [None]*6
if 'POSCAR' in files_to_parse:
poscar = Poscar.from_file(files_to_parse["POSCAR"])
if 'CONTCAR' in files_to_parse:
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
if 'INCAR' in files_to_parse:
incar = Incar.from_file(files_to_parse["INCAR"])
if 'POTCAR' in files_to_parse:
potcar = Potcar.from_file(files_to_parse["POTCAR"])
if 'OSZICAR' in files_to_parse:
oszicar = Oszicar(files_to_parse["OSZICAR"])
if 'DYNMAT' in files_to_parse:
dynmat = Dynmat(files_to_parse["DYNMAT"])
param = {"hubbards":{}}
if poscar is not None and incar is not None and "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
param["is_hubbard"] = (
incar.get("LDAU", True) and sum(param["hubbards"].values()) > 0
) if incar is not None else False
param["run_type"] = None
if incar is not None:
param["run_type"] = Vasprun.run_type
# param["history"] = _get_transformation_history(path)
param["potcar_spec"] = potcar.spec if potcar is not None else None
energy = oszicar.final_energy if oszicar is not None else Vasprun.final_energy
structure = contcar.structure if contcar is not None\
else poscar.structure
initial_vol = poscar.structure.volume if poscar is not None else \
None
final_vol = contcar.structure.volume if contcar is not None else \
None
delta_volume = None
if initial_vol is not None and final_vol is not None:
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if dynmat is not None:
data['phonon_frequencies'] = dynmat.get_phonon_frequencies()
if self._inc_structure:
entry = ComputedStructureEntry(
structure, energy, parameters=param, data=data
)
else:
entry = ComputedEntry(
structure.composition, energy, parameters=param, data=data
)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters is None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data is None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge",
"spin_multiplicity", "route_parameters"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
| blondegeek/pymatgen | pymatgen/apps/borg/hive.py | Python | mit | 15,762 | 0.000444 |
#!/usr/bin/env python
#
# BBB-Network-Ammeter
#
# Copyright (c) 2016, Forest Crossman <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from datetime import datetime
from lxml import etree
from flask import Flask, Response
from Adafruit_BBIO import ADC
app = Flask(__name__)
def get_current():
voltage = get_adc_voltage()
current = 109.2 * voltage + 5.3688
return current
def get_adc_voltage():
# Read a value from the ADC
value = ADC.read("P9_39") # AIN0
# Convert the number to a voltage
voltage = value * 1.8
return voltage
@app.route("/sample")
def sample():
voltage = get_adc_voltage()
return Response("{:.03f} V".format(voltage))
@app.route("/probe")
def probe():
'''Generate a response for probe requests'''
mtconnect_schema = "urn:mtconnect.org:MTConnectDevices:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectDevices_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectDevices = etree.Element("MTConnectDevices",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectDevices.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectDevices, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Devices = etree.SubElement(MTConnectDevices, "Devices")
Device = etree.SubElement(Devices, "Device",
id="dev",
iso841Class="6",
name="currentSensor",
sampleInterval="10",
uuid="0",
)
Description = etree.SubElement(Device, "Description",
manufacturer="RPI MILL",
)
DataItems_0 = etree.SubElement(Device, "DataItems")
DataItem_0 = etree.SubElement(DataItems_0, "DataItem",
category="EVENT",
id="avail",
type="MACHINE_ON",
)
Components_0 = etree.SubElement(Device, "Components")
Axes = etree.SubElement(Components_0, "Axes", id="ax", name="Axes")
Components_1 = etree.SubElement(Axes, "Components")
Linear = etree.SubElement(Components_1, "Linear", id="x1", name="X")
DataItems_1 = etree.SubElement(Linear, "DataItems")
DataItem_1 = etree.SubElement(DataItems_1, "DataItem",
category="SAMPLE",
id="current1",
name="current1",
nativeUnits="AMPERE",
subType="ACTUAL",
type="CURRENT",
units="AMPERE",
)
response = etree.tostring(MTConnectDevices,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
@app.route("/current")
def current():
mtconnect_schema = "urn:mtconnect.org:MTConnectStreams:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectStreams_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectStreams = etree.Element("MTConnectStreams",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectStreams.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectStreams, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Streams = etree.SubElement(MTConnectStreams, "Streams")
DeviceStream = etree.SubElement(Streams, "DeviceStream",
name="VMC-3Axis",
uuid="0",
)
ComponentStream = etree.SubElement(DeviceStream, "ComponentStream",
component="Rotary",
name="C",
componentId="c1",
)
Samples = etree.SubElement(ComponentStream, "Samples")
Current = etree.SubElement(Samples, "Current",
dataItemId="c2",
timestamp=datetime.utcnow().isoformat(),
name="Scurrent",
sequence="8403169415",
subType="ACTUAL",
)
Current.text = "{current:.03f}".format(current=get_current())
Events = etree.SubElement(ComponentStream, "Events")
MachineMode = etree.SubElement(Events, "MachineMode",
dataItemId="machineMode",
timestamp=datetime.utcnow().isoformat(),
name="Cmode",
sequence="18"
)
MachineMode.text = "ON"
response = etree.tostring(MTConnectStreams,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
if __name__ == "__main__":
ADC.setup()
app.run(host='0.0.0.0', debug=False)
| cyrozap/BBB-Network-Ammeter | server.py | Python | isc | 5,648 | 0.011863 |
# (c) 2019, Evgeni Golov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Foreman documentation fragment
DOCUMENTATION = '''
requirements:
- requests
options:
server_url:
description:
- URL of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_SERVER_URL) will be used instead.
required: true
type: str
username:
description:
- Username accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_USERNAME) will be used instead.
required: true
type: str
password:
description:
- Password of the user accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_PASSWORD) will be used instead.
required: true
type: str
validate_certs:
description:
- Whether or not to verify the TLS certificates of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_VALIDATE_CERTS) will be used instead.
default: true
type: bool
'''
NESTED_PARAMETERS = '''
options:
parameters:
description:
- Entity domain specific host parameters
required: false
type: list
elements: dict
suboptions:
name:
description:
- Name of the parameter
required: true
type: str
value:
description:
- Value of the parameter
required: true
type: raw
parameter_type:
description:
- Type of the parameter
default: 'string'
choices:
- 'string'
- 'boolean'
- 'integer'
- 'real'
- 'array'
- 'hash'
- 'yaml'
- 'json'
type: str
'''
OS_FAMILY = '''
options:
os_family:
description:
- The OS family the entity shall be assigned with.
required: false
choices:
- AIX
- Altlinux
- Archlinux
- Coreos
- Debian
- Freebsd
- Gentoo
- Junos
- NXOS
- Rancheros
- Redhat
- Solaris
- Suse
- Windows
- Xenserver
type: str
'''
TAXONOMY = '''
options:
organizations:
description: List of organizations the entity should be assigned to
type: list
elements: str
locations:
description: List of locations the entity should be assigned to
type: list
elements: str
'''
ENTITY_STATE = '''
options:
state:
description:
- State of the entity
default: present
choices:
- present
- absent
type: str
'''
ENTITY_STATE_WITH_DEFAULTS = '''
options:
state:
description:
- State of the entity
- C(present_with_defaults) will ensure the entity exists, but won't update existing ones
default: present
choices:
- present
- present_with_defaults
- absent
type: str
'''
HOST_OPTIONS = '''
options:
compute_resource:
description: Compute resource name
required: false
type: str
compute_profile:
description: Compute profile name
required: false
type: str
domain:
description: Domain name
required: false
type: str
subnet:
description: IPv4 Subnet name
required: false
type: str
subnet6:
description: IPv6 Subnet name
required: false
type: str
root_pass:
description:
- Root password.
- Will result in the entity always being updated, as the current password cannot be retrieved.
type: str
required: false
realm:
description: Realm name
required: false
type: str
architecture:
description: Architecture name
required: False
type: str
medium:
aliases: [ media ]
description:
- Medium name
- Mutually exclusive with I(kickstart_repository).
required: False
type: str
pxe_loader:
description: PXE Bootloader
required: false
choices:
- PXELinux BIOS
- PXELinux UEFI
- Grub UEFI
- Grub2 BIOS
- Grub2 ELF
- Grub2 UEFI
- Grub2 UEFI SecureBoot
- Grub2 UEFI HTTP
- Grub2 UEFI HTTPS
- Grub2 UEFI HTTPS SecureBoot
- iPXE Embedded
- iPXE UEFI HTTP
- iPXE Chain BIOS
- iPXE Chain UEFI
- None
type: str
ptable:
description: Partition table name
required: False
type: str
environment:
description: Puppet environment name
required: false
type: str
puppetclasses:
description: List of puppet classes to include in this host group. Must exist for hostgroup's puppet environment.
required: false
type: list
elements: str
config_groups:
description: Config groups list
required: false
type: list
elements: str
puppet_proxy:
description: Puppet server proxy name
required: false
type: str
puppet_ca_proxy:
description: Puppet CA proxy name
required: false
type: str
openscap_proxy:
description:
- OpenSCAP proxy name.
- Only available when the OpenSCAP plugin is installed.
required: false
type: str
content_source:
description:
- Content source.
- Only available for Katello installations.
required: false
type: str
lifecycle_environment:
description:
- Lifecycle environment.
- Only available for Katello installations.
required: false
type: str
kickstart_repository:
description:
- Kickstart repository name.
- You need to provide this to use the "Synced Content" feature.
- Mutually exclusive with I(medium).
- Only available for Katello installations.
required: false
type: str
content_view:
description:
- Content view.
- Only available for Katello installations.
required: false
type: str
activation_keys:
description:
- Activation Keys used for deployment.
- Comma separated list.
- Only available for Katello installations.
required: false
type: str
'''
ORGANIZATION = '''
options:
organization:
description:
- Organization that the entity is in
required: true
type: str
'''
SCAP_DATASTREAM = '''
options:
scap_file:
description:
- File containing XML DataStream content.
- Required when creating a new DataStream.
required: false
type: path
original_filename:
description:
- Original file name of the XML file.
- If unset, the filename of I(scap_file) will be used.
required: false
type: str
'''
OPERATINGSYSTEMS = '''
options:
operatingsystems:
description:
- List of operating systems the entity should be assigned to.
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
required: false
type: list
elements: str
'''
OPERATINGSYSTEM = '''
options:
operatingsystem:
description:
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
type: str
required: False
'''
INFOMODULE = '''
options:
name:
description:
- Name of the resource to fetch information for.
- Mutually exclusive with I(search).
required: false
type: str
location:
description:
- Label of the Location to scope the search for.
required: false
type: str
organization:
description:
- Name of the Organization to scope the search for.
required: false
type: str
search:
description:
- Search query to use
- If None, and I(name) is not set, all resources are returned.
- Mutually exclusive with I(name).
type: str
'''
INFOMODULEWITHOUTNAME = '''
options:
location:
description:
- Label of the Location to scope the search for.
required: false
type: str
organization:
description:
- Name of the Organization to scope the search for.
required: false
type: str
search:
description:
- Search query to use
- If None, all resources are returned.
type: str
'''
KATELLOINFOMODULE = '''
options:
organization:
required: true
'''
| ATIX-AG/foreman-ansible-modules | plugins/doc_fragments/foreman.py | Python | gpl-3.0 | 9,183 | 0.001198 |
from __future__ import print_function
import numpy as np
import os
import sys
import time
import tensorflow as tf
import load_datasets as ld
import datetime as dt
import ilt_two_layers as ilt
from sklearn.metrics import mean_squared_error
import tensorflow.python.client
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('train', False, ' If True, run training & save model, otherwise -- load a previously saved model and evaluate it')
# Multi-GPU settings
flags.DEFINE_integer('num_gpus',2,'Number of GPUs in the system')
flags.DEFINE_string('tower_name','ivy','Tower names')
# Split the training data into batches. Each hurricane is 193 records. Batch sizes are usually 2^k
# When batch size equals to 0, or exceeds available data, use the whole dataset
# Large batch sizes produce more accurate update gradients, but the training is slower
flags.DEFINE_integer('batch_size', 57*193, 'Batch size. Divides evenly into the dataset size of 193')
# Save models in this directory
flags.DEFINE_string('checkpoints_dir', './models/save_two_layers_32_64_sept', 'Directory to store checkpoints')
# Statistics
flags.DEFINE_string('summaries_dir','./logs','Summaries directory')
# Evaluation
# Output dataset
flags.DEFINE_string('output','./test_tracks_out/isabel_test_track_out.dat','When model evaluation, output the data here')
# Input dataset
flags.DEFINE_string('input','./test_tracks/isabel_test_track.dat','Dataset for input')
def fill_feed_dict(data_set, inputs_pl, outputs_pl, train):
"""
Returns feed dictionary for TF.
data_set -- dataset
inputs_pl -- TF placeholder for inputs
outputs_pl -- TF placeholder for outputs
train -- if TRUE, then return DS in batches for training. Otherwise, return complete DS for validation/testing
"""
if train:
batch_size = FLAGS.batch_size
else:
batch_size = 0
# Read next batch of data from the dataset
inputs, outputs = data_set.next_batch(batch_size = batch_size)
# Create dictionary for return
feed_dict = {
inputs_pl: inputs,
outputs_pl: outputs
}
return feed_dict
def tower_loss(x, y_, scope):
"""
Calculate the total loss on a single tower
x, y_ -- inputs and expected outputs
scope -- unique prefix identifying the tower
Note: The graph is created on /cpu:0. The code below reuses the graph
"""
# Run inference and calculate the losses. The losses are stored in the collection
# so skip the returns
outputs = ilt.inference(x)
_ = ilt.loss(outputs, y_)
# Read the losses from the collection and sum them up
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
loss_avg = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay, name='avg')
loss_avg_op = loss_avg.apply(losses+[total_loss])
with tf.control_dependencies([loss_avg_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""
Calculate the average gradient for each shared variable across all towers
tower_grads -- list of lists of tuples (gradient, variable)
"""
average_grads = []
# zip(*tower_grads) puts grads for each variable together
# grad_and_vars is a tuple of tuples ((grad_gpu1, var1),(grad_gpu2, var1))
for grad_and_vars in zip(*tower_grads):
grads = []
# g each individual gradient
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g,0)
grads.append(expanded_g)
# grad average gradient across the gpu's
grad = tf.concat(0,grads)
grad = tf.reduce_mean(grad,0)
# get the variable as the second element from the first tuple
v = grad_and_vars[0][1]
# combine the gradient and append it to the average_grads
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""
Build the graph and run training on multiple GPU's
"""
# Assign datasets
train_dataset, valid_dataset, test_dataset = ld.read_data_sets()
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Prepare placeholders for inputs and expected outputs
x = tf.placeholder(tf.float32, [None, FLAGS.input_vars], name='x-input') # Note: these are normalized inputs
y_ = tf.placeholder(tf.float32, [None, FLAGS.output_vars], name = 'y-input')
# Create variables for input and output data moments and initialize them with train datasets' moments
input_means = tf.get_variable('input_means', trainable = False,
initializer = tf.convert_to_tensor(train_dataset.input_moments[0]))
input_stds = tf.get_variable('input_stds', trainable = False,
initializer = tf.convert_to_tensor(train_dataset.input_moments[1]))
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate, global_step, FLAGS.max_steps,
FLAGS.learning_rate_decay, staircase=False)
# create a standard gradient descent optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# tower_grads -- list of gradients (list of list of tuples like (grad1, var1))
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d'%i): # make sure TF runs the code on the GPU:%d tower
with tf.name_scope('%s_%d' % (FLAGS.tower_name, i)) as scope:
# Construct the entire ANN, but share the vars across the towers
loss = tower_loss(x, y_, scope)
# Make sure that the vars are reused for the next tower
tf.get_variable_scope().reuse_variables()
#summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# calculate the gradients and add them to the list
grads = optimizer.compute_gradients(loss)
tower_grads.append(grads)
# Add this here in case we need to get outputs after training is complete
outputs = ilt.inference(x)
#summaries.append(tf.scalar_summary('MSE',loss))
# calculate average gradients & apply gradients to the model
grads, v = zip(*average_gradients(tower_grads))
grads, _ = tf.clip_by_global_norm(grads, 1.25)
apply_gradient_op = optimizer.apply_gradients(zip(grads,v), global_step = global_step)
#for grad, var in grads:
#if grad is not None:
#summaries.append(tf.histogram_summary(var.op.name+'/gradients', grad))
#summaries.append(tf.scalar_summary(var.op.name+'/sparsity',tf.nn.zero_fraction(var)))
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_avg_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
train_op = tf.group(apply_gradient_op, variables_averages_op)
train_op = apply_gradient_op
#merged = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session(config = tf.ConfigProto(
allow_soft_placement = True, # allows to utilize GPU's & CPU's
log_device_placement = False)) # shows GPU/CPU allocation
# Prepare folders for saving models and its stats
#date_time_stamp = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/train/'+date_time_stamp) #,sess.graph)
#test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/validation/'+date_time_stamp)
saver = tf.train.Saver(tf.all_variables())
# Below is the code for running graph
sess.run(init)
tf.train.start_queue_runners(sess=sess)
valid_loss = 1.0
train_loss = 1.0
train_losses = 0
num_steps = 0
# Main training loop
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# regular training
feed_dict = fill_feed_dict(train_dataset, x, y_, train = True)
#_, train_loss, summary, lr = sess.run([train_op, loss, merged, learning_rate], feed_dict=feed_dict)
_, train_loss, lr = sess.run([train_op, loss, learning_rate], feed_dict=feed_dict)
duration = time.time()-start_time
#train_writer.add_summary(summary,step)
train_losses += train_loss
num_steps += 1
if step%(FLAGS.max_steps//20) == 0:
# check model fit
feed_dict = fill_feed_dict(valid_dataset, x, y_, train = False)
#valid_loss, summary = sess.run([loss, merged], feed_dict = feed_dict)
valid_loss = sess.run(loss, feed_dict = feed_dict)
#test_writer.add_summary(summary,step)
print('Step %d (%.2f op/sec): Training loss: %.5f, Validation loss: %.5f' % (
step, 1.0/duration, np.float32(train_losses/num_steps).item(), np.float32(valid_loss).item()))
train_losses = 0
num_steps = 0
checkpoint_path = os.path.join(FLAGS.checkpoints_dir,'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
print("Training summary: ")
feed_dict = fill_feed_dict(test_dataset, x, y_, train = False)
test_loss = sess.run([loss], feed_dict = feed_dict)
print('Test MSE: %.5f' % (np.float32(test_loss).item()))
outs = outputs.eval(session=sess, feed_dict = feed_dict)
for out_no in range(0,FLAGS.output_vars):
print("Location %d: CC: %.4f, MSE: %.6f"%(
out_no,
np.corrcoef(outs[:,out_no], test_dataset.outputs[:,out_no])[0,1],
mean_squared_error(outs[:,out_no], test_dataset.outputs[:,out_no])))
sess.close()
def run():
"""
Finish building the graph and run it at the default device (CPU or GPU)
"""
# Assign datasets
test_ds = np.loadtxt(FLAGS.input)[:,1:7].reshape((-1, 6)).astype(np.float32)
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Prepare placeholders for inputs and expected outputs
x = tf.placeholder(tf.float32, [None, FLAGS.input_vars], name='x-input')
input_means = tf.get_variable('input_means', shape=[FLAGS.input_vars], trainable = False)
input_stds = tf.get_variable('input_stds', shape=[FLAGS.input_vars], trainable = False)
# Normalize input data
# Here, the data is not normalized, so normalize it using save models' moments before running
x_normalized = tf.div(tf.sub(x,input_means),input_stds)
outputs = ilt.inference(x_normalized)
init = tf.initialize_all_variables()
sess = tf.Session(config = tf.ConfigProto(
allow_soft_placement = False, # allows to utilize GPU's & CPU's
log_device_placement = False)) # shows GPU/CPU allocation
start_time = time.time()
# Below is the code for running graph
sess.run(init)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoints_dir)
if ckpt != None and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model %s restored"%ckpt.model_checkpoint_path)
else:
print("Could not find any checkpoints at %s"%FLAGS.checkpoints_dir)
return
tf.train.start_queue_runners(sess=sess)
out = sess.run(outputs, feed_dict = {x:test_ds})
duration = time.time()-start_time
print('Elapsed time: %.2f sec.' % (duration))
np.savetxt(FLAGS.output,out)
print('Outputs saved as %s'%FLAGS.output)
sess.close()
def main(argv):
if(FLAGS.train):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if tf.gfile.Exists(FLAGS.checkpoints_dir):
tf.gfile.DeleteRecursively(FLAGS.checkpoints_dir)
tf.gfile.MakeDirs(FLAGS.checkpoints_dir)
train()
else:
if tf.gfile.Exists(FLAGS.output+'*'):
tf.gfile.DeleteRecursively(FLAGS.output+'*')
run()
if __name__ == "__main__":
main(sys.argv)
| abezuglov/ANN | Storm-Surge/code/ilt_multi_gpu_feed.py | Python | gpl-3.0 | 12,586 | 0.014063 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 The Simbiose Ventures Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python client to Slicing Dice API"""
import ujson
from . import exceptions
from .api import SlicingDiceAPI
from .url_resources import URLResources
from .utils import validators
class SlicingDice(SlicingDiceAPI):
"""A python interface to Slicing Dice API
Example usage:
To create an object of the SlicingDice:
from pyslicer.api import SlicingDice
sd = SlicingDice('my-token')
To create a column:
column_json = {
'name': 'Pyslicer String Column',
'description': 'Pyslicer example description',
'type': 'string',
'cardinality': 'low'}
print sd.create_column(column_json)
To make a query:
query_json = {
'type': 'count',
'select': [
{
"pyslicer-string-column":
{
"equal": "test_value_1"
}
},
"or",
{
"pyslicer-string-column":
{
"equal": "test_value_2"
}
},
]
}
print sd.query(query_json)
To insert data:
inserting_json = {
'[email protected]': {
'pyslicer-string-column': 'test_value_1',
'pyslicer-integer-column': 42,
},
'[email protected]': {
'pyslicer-string-column': 'test_value_2',
'pyslicer-integer-column': 42,
},
}
print sd.insert(inserting_json)
"""
def __init__(
self, write_key=None, read_key=None, master_key=None,
custom_key=None, use_ssl=True, timeout=60):
"""Instantiate a new SlicingDice object.
Keyword arguments:
key(string or SlicerKey obj) -- Key to access API
use_ssl(bool) -- Define if the request uses verification SSL for
HTTPS requests. Defaults False.(Optional)
timeout(int) -- Define timeout to request,
defaults 60 secs(default 30).
"""
super(SlicingDice, self).__init__(
master_key, write_key, read_key, custom_key, use_ssl, timeout)
def _count_query_wrapper(self, url, query):
"""Validate count query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A count query
"""
sd_count_query = validators.QueryCountValidator(query)
if sd_count_query.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _data_extraction_wrapper(self, url, query):
"""Validate data extraction query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A data extraction query
"""
sd_extraction_result = validators.QueryDataExtractionValidator(query)
if sd_extraction_result.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _saved_query_wrapper(self, url, query, update=False):
"""Validate saved query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A saved query
update(bool) -- Indicates with operation is update a
saved query or not.(default false)
"""
req_type = "post"
if update:
req_type = "put"
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type=req_type,
key_level=2)
def get_database(self):
"""Get a database associated with this client (related to keys passed
on construction)"""
url = SlicingDice.BASE_URL + URLResources.DATABASE
return self._make_request(
url=url,
req_type="get",
key_level=2
)
def create_column(self, data):
"""Create column in Slicing Dice
Keyword arguments:
data -- A dictionary or list on the Slicing Dice column
format.
"""
sd_data = validators.ColumnValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(data),
key_level=1)
def get_columns(self):
"""Get a list of columns"""
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="get",
key_level=2)
def insert(self, data):
"""Insert data into Slicing Dice API
Keyword arguments:
data -- A dictionary in the Slicing Dice data format
format.
"""
sd_data = validators.InsertValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.INSERT
return self._make_request(
url=url,
json_data=ujson.dumps(data),
req_type="post",
key_level=1)
def count_entity(self, query):
"""Make a count entity query
Keyword arguments:
query -- A dictionary in the Slicing Dice query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_ENTITY
return self._count_query_wrapper(url, query)
def count_entity_total(self, dimensions=None):
"""Make a count entity total query
Keyword arguments:
dimensions -- A dictionary containing the dimensions in which
the total query will be performed
"""
query = {}
if dimensions is not None:
query['dimensions'] = dimensions
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_ENTITY_TOTAL
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(query),
key_level=0)
def count_event(self, query):
"""Make a count event query
Keyword arguments:
data -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_EVENT
return self._count_query_wrapper(url, query)
def aggregation(self, query):
"""Make a aggregation query
Keyword arguments:
query -- An aggregation query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_AGGREGATION
if "query" not in query:
raise exceptions.InvalidQueryException(
"The aggregation query must have up the key 'query'.")
columns = query["query"]
if len(columns) > 5:
raise exceptions.MaxLimitException(
"The aggregation query must have up to 5 columns per request.")
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def top_values(self, query):
"""Make a top values query
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_TOP_VALUES
sd_query_top_values = validators.QueryValidator(query)
if sd_query_top_values.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def exists_entity(self, ids, dimension=None):
"""Make a exists entity query
Keyword arguments:
ids -- A list with entities to check if exists
dimension -- In which dimension entities check be checked
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_EXISTS_ENTITY
if len(ids) > 100:
raise exceptions.MaxLimitException(
"The query exists entity must have up to 100 ids.")
query = {
'ids': ids
}
if dimension:
query['dimension'] = dimension
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def get_saved_query(self, query_name):
"""Get a saved query
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + query_name
return self._make_request(
url=url,
req_type="get",
key_level=0)
def get_saved_queries(self):
"""Get all saved queries
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED
return self._make_request(
url=url,
req_type="get",
key_level=2)
def delete_saved_query(self, query_name):
"""Delete a saved query
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + query_name
return self._make_request(
url=url,
req_type="delete",
key_level=2
)
def create_saved_query(self, query):
"""Get a list of queries saved
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED
return self._saved_query_wrapper(url, query)
def update_saved_query(self, name, query):
"""Get a list of queries saved
Keyword arguments:
name -- The name of the saved query to update
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + name
return self._saved_query_wrapper(url, query, True)
def result(self, query):
"""Get a data extraction result
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_DATA_EXTRACTION_RESULT
return self._data_extraction_wrapper(url, query)
def score(self, query):
"""Get a data extraction score
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_DATA_EXTRACTION_SCORE
return self._data_extraction_wrapper(url, query)
def sql(self, query):
""" Make a sql query to SlicingDice
:param query: the query written in SQL format
:return: The response from the SlicingDice
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SQL
return self._make_request(
url=url,
string_data=query,
req_type="post",
key_level=0,
content_type='application/sql')
def delete(self, query):
"""Make a delete request
Keyword arguments:
query -- The query that represents the data to be deleted
"""
url = SlicingDice.BASE_URL + URLResources.DELETE
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=2)
def update(self, query):
"""Make a update request
Keyword arguments:
query -- The query that represents the data to be updated
"""
url = SlicingDice.BASE_URL + URLResources.UPDATE
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=2)
| SlicingDice/slicingdice-python | pyslicer/client.py | Python | mit | 12,918 | 0.000077 |
### Copyright (C) 2009 Vincent Legoll <[email protected]>
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions
### are met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
### THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
### IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
### OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
### IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
### INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
### NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
### DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
### THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
### (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
### THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import _vc
import svn
class Vc(svn.Vc):
CMD = "cdv"
NAME = "Codeville"
VC_DIR = ".cdv"
PATCH_INDEX_RE = "^[+]{3} (.+)$"
state_map = {"M": _vc.STATE_MODIFIED,}
def get_working_directory(self, workdir):
return self.root
def _get_matches(self, directory):
entries = _vc.popen([self.CMD, "status"], cwd=directory).read()
for line in entries.split("\n")[1:-1]:
yield line[3:], line[0], ""
| oliver/meld | vc/cdv.py | Python | gpl-2.0 | 1,787 | 0.012311 |
# -*- coding: utf-8 -*-
"""Additional helper functions for the optlang solvers.
All functions integrate well with the context manager, meaning that
all operations defined here are automatically reverted when used in a
`with model:` block.
The functions defined here together with the existing model functions should
allow you to implement custom flux analysis methods with ease.
"""
from __future__ import absolute_import
import re
from functools import partial
from collections import namedtuple
from types import ModuleType
from warnings import warn
import optlang
from optlang.symbolics import Basic, Zero
from cobra.exceptions import OptimizationError, OPTLANG_TO_EXCEPTIONS_DICT
from cobra.util.context import get_context
class SolverNotFound(Exception):
"""A simple Exception when a solver can not be found."""
pass
# Define all the solvers that are found in optlang.
solvers = {match.split("_")[0]: getattr(optlang, match)
for match in dir(optlang) if "_interface" in match}
# Defines all the QP solvers implemented in optlang.
qp_solvers = ["cplex"] # QP in gurobi not implemented yet
def linear_reaction_coefficients(model, reactions=None):
"""Coefficient for the reactions in a linear objective.
Parameters
----------
model : cobra model
the model object that defined the objective
reactions : list
an optional list for the reactions to get the coefficients for. All
reactions if left missing.
Returns
-------
dict
A dictionary where the key is the reaction object and the value is
the corresponding coefficient. Empty dictionary if there are no
linear terms in the objective.
"""
linear_coefficients = {}
reactions = model.reactions if not reactions else reactions
try:
objective_expression = model.solver.objective.expression
coefficients = objective_expression.as_coefficients_dict()
except AttributeError:
return linear_coefficients
for rxn in reactions:
forward_coefficient = coefficients.get(rxn.forward_variable, 0)
reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
if forward_coefficient != 0:
if forward_coefficient == -reverse_coefficient:
linear_coefficients[rxn] = float(forward_coefficient)
return linear_coefficients
def _valid_atoms(model, expression):
"""Check whether a sympy expression references the correct variables.
Parameters
----------
model : cobra.Model
The model in which to check for variables.
expression : sympy.Basic
A sympy expression.
Returns
-------
boolean
True if all referenced variables are contained in model, False
otherwise.
"""
atoms = expression.atoms(optlang.interface.Variable)
return all(a.problem is model.solver for a in atoms)
def set_objective(model, value, additive=False):
"""Set the model objective.
Parameters
----------
model : cobra model
The model to set the objective for
value : model.problem.Objective,
e.g. optlang.glpk_interface.Objective, sympy.Basic or dict
If the model objective is linear, the value can be a new Objective
object or a dictionary with linear coefficients where each key is a
reaction and the element the new coefficient (float).
If the objective is not linear and `additive` is true, only values
of class Objective.
additive : bool
If true, add the terms to the current objective, otherwise start with
an empty objective.
"""
interface = model.problem
reverse_value = model.solver.objective.expression
reverse_value = interface.Objective(
reverse_value, direction=model.solver.objective.direction,
sloppy=True)
if isinstance(value, dict):
if not model.objective.is_Linear:
raise ValueError('can only update non-linear objectives '
'additively using object of class '
'model.problem.Objective, not %s' %
type(value))
if not additive:
model.solver.objective = interface.Objective(
Zero, direction=model.solver.objective.direction)
for reaction, coef in value.items():
model.solver.objective.set_linear_coefficients(
{reaction.forward_variable: coef,
reaction.reverse_variable: -coef})
elif isinstance(value, (Basic, optlang.interface.Objective)):
if isinstance(value, Basic):
value = interface.Objective(
value, direction=model.solver.objective.direction,
sloppy=False)
# Check whether expression only uses variables from current model
# clone the objective if not, faster than cloning without checking
if not _valid_atoms(model, value.expression):
value = interface.Objective.clone(value, model=model.solver)
if not additive:
model.solver.objective = value
else:
model.solver.objective += value.expression
else:
raise TypeError(
'%r is not a valid objective for %r.' % (value, model.solver))
context = get_context(model)
if context:
def reset():
model.solver.objective = reverse_value
model.solver.objective.direction = reverse_value.direction
context(reset)
def interface_to_str(interface):
"""Give a string representation for an optlang interface.
Parameters
----------
interface : string, ModuleType
Full name of the interface in optlang or cobra representation.
For instance 'optlang.glpk_interface' or 'optlang-glpk'.
Returns
-------
string
The name of the interface as a string
"""
if isinstance(interface, ModuleType):
interface = interface.__name__
return re.sub(r"optlang.|.interface", "", interface)
def get_solver_name(mip=False, qp=False):
"""Select a solver for a given optimization problem.
Parameters
----------
mip : bool
Does the solver require mixed integer linear programming capabilities?
qp : bool
Does the solver require quadratic programming capabilities?
Returns
-------
string
The name of feasible solver.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
if len(solvers) == 0:
raise SolverNotFound("no solvers installed")
# Those lists need to be updated as optlang implements more solvers
mip_order = ["gurobi", "cplex", "glpk"]
lp_order = ["glpk", "cplex", "gurobi"]
qp_order = ["cplex"]
if mip is False and qp is False:
for solver_name in lp_order:
if solver_name in solvers:
return solver_name
# none of them are in the list order - so return the first one
return list(solvers)[0]
elif qp: # mip does not yet matter for this determination
for solver_name in qp_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no qp-capable solver found")
else:
for solver_name in mip_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no mip-capable solver found")
def choose_solver(model, solver=None, qp=False):
"""Choose a solver given a solver name and model.
This will choose a solver compatible with the model and required
capabilities. Also respects model.solver where it can.
Parameters
----------
model : a cobra model
The model for which to choose the solver.
solver : str, optional
The name of the solver to be used. Optlang solvers should be prefixed
by "optlang-", for instance "optlang-glpk".
qp : boolean, optional
Whether the solver needs Quadratic Programming capabilities.
Returns
-------
legacy : boolean
Whether the returned solver is a legacy (old cobra solvers) version or
an optlang solver (legacy = False).
solver : a cobra or optlang solver interface
Returns a valid solver for the problem. May be a cobra solver or an
optlang interface.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
legacy = False
if solver is None:
solver = model.problem
elif "optlang-" in solver:
solver = interface_to_str(solver)
solver = solvers[solver]
else:
legacy = True
solver = legacy_solvers.solver_dict[solver]
# Check for QP, raise error if no QP solver found
# optlang only since old interface interprets None differently
if qp and interface_to_str(solver) not in qp_solvers:
solver = solvers[get_solver_name(qp=True)]
return legacy, solver
def add_cons_vars_to_problem(model, what, **kwargs):
"""Add variables and constraints to a Model's solver object.
Useful for variables and constraints that can not be expressed with
reactions and lower/upper bounds. Will integrate with the Model's context
manager in order to revert changes upon leaving the context.
Parameters
----------
model : a cobra model
The model to which to add the variables and constraints.
what : list or tuple of optlang variables or constraints.
The variables or constraints to add to the model. Must be of class
`model.problem.Variable` or
`model.problem.Constraint`.
**kwargs : keyword arguments
passed to solver.add()
"""
context = get_context(model)
model.solver.add(what, **kwargs)
if context:
context(partial(model.solver.remove, what))
def remove_cons_vars_from_problem(model, what):
"""Remove variables and constraints from a Model's solver object.
Useful to temporarily remove variables and constraints from a Models's
solver object.
Parameters
----------
model : a cobra model
The model from which to remove the variables and constraints.
what : list or tuple of optlang variables or constraints.
The variables or constraints to remove from the model. Must be of
class `model.problem.Variable` or
`model.problem.Constraint`.
"""
context = get_context(model)
model.solver.remove(what)
if context:
context(partial(model.solver.add, what))
def add_absolute_expression(model, expression, name="abs_var", ub=None,
difference=0, add=True):
"""Add the absolute value of an expression to the model.
Also defines a variable for the absolute value that can be used in other
objectives or constraints.
Parameters
----------
model : a cobra model
The model to which to add the absolute expression.
expression : A sympy expression
Must be a valid expression within the Model's solver object. The
absolute value is applied automatically on the expression.
name : string
The name of the newly created variable.
ub : positive float
The upper bound for the variable.
difference : positive float
The difference between the expression and the variable.
add : bool
Whether to add the variable to the model at once.
Returns
-------
namedtuple
A named tuple with variable and two constraints (upper_constraint,
lower_constraint) describing the new variable and the constraints
that assign the absolute value of the expression to it.
"""
Components = namedtuple('Components', ['variable', 'upper_constraint',
'lower_constraint'])
variable = model.problem.Variable(name, lb=0, ub=ub)
# The following constraints enforce variable > expression and
# variable > -expression
upper_constraint = model.problem.Constraint(expression - variable,
ub=difference,
name="abs_pos_" + name),
lower_constraint = model.problem.Constraint(expression + variable,
lb=difference,
name="abs_neg_" + name)
to_add = Components(variable, upper_constraint, lower_constraint)
if add:
add_cons_vars_to_problem(model, to_add)
return to_add
def fix_objective_as_constraint(model, fraction=1, bound=None,
name='fixed_objective_{}'):
"""Fix current objective as an additional constraint.
When adding constraints to a model, such as done in pFBA which
minimizes total flux, these constraints can become too powerful,
resulting in solutions that satisfy optimality but sacrifices too
much for the original objective function. To avoid that, we can fix
the current objective value as a constraint to ignore solutions that
give a lower (or higher depending on the optimization direction)
objective value than the original model.
When done with the model as a context, the modification to the
objective will be reverted when exiting that context.
Parameters
----------
model : cobra.Model
The model to operate on
fraction : float
The fraction of the optimum the objective is allowed to reach.
bound : float, None
The bound to use instead of fraction of maximum optimal value. If
not None, fraction is ignored.
name : str
Name of the objective. May contain one `{}` placeholder which is filled
with the name of the old objective.
"""
fix_objective_name = name.format(model.objective.name)
if fix_objective_name in model.constraints:
model.solver.remove(fix_objective_name)
if bound is None:
bound = model.slim_optimize(error_value=None) * fraction
if model.objective.direction == 'max':
ub, lb = None, bound
else:
ub, lb = bound, None
constraint = model.problem.Constraint(
model.objective.expression,
name=fix_objective_name, ub=ub, lb=lb)
add_cons_vars_to_problem(model, constraint, sloppy=True)
def check_solver_status(status, raise_error=False):
"""Perform standard checks on a solver's status."""
if status == optlang.interface.OPTIMAL:
return
elif status == optlang.interface.INFEASIBLE and not raise_error:
warn("solver status is '{}'".format(status), UserWarning)
elif status is None:
raise RuntimeError(
"model was not optimized yet or solver context switched")
else:
raise OptimizationError("solver status is '{}'".format(status))
def assert_optimal(model, message='optimization failed'):
"""Assert model solver status is optimal.
Do nothing if model solver status is optimal, otherwise throw
appropriate exception depending on the status.
Parameters
----------
model : cobra.Model
The model to check the solver status for.
message : str (optional)
Message to for the exception if solver status was not optimal.
"""
if model.solver.status != optlang.interface.OPTIMAL:
raise OPTLANG_TO_EXCEPTIONS_DICT[model.solver.status](message)
import cobra.solvers as legacy_solvers # noqa
| zakandrewking/cobrapy | cobra/util/solver.py | Python | lgpl-2.1 | 15,435 | 0 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import cPickle as pickle, time, string
from SPARQLWrapper import SPARQLWrapper, JSON
import rdflib as r, pygraphviz as gv
import pylab as pl
# variaveis principais:
# classes (kk), props,
# vizinhanca_ (de classes)
T=time.time()
U=r.URIRef
def fazQuery(query):
NOW=time.time()
#sparql = SPARQLWrapper("http://200.144.255.210:8082/cidadedemocratica/query")
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print time.time()-NOW
return results["results"]["bindings"]
g=r.Graph()
def G(S,P,O):
global g
g.add((S,P,O))
owl = r.namespace.OWL
rdf = r.namespace.RDF
rdfs = r.namespace.RDFS
ocd = r.Namespace("http://purl.org/socialparticipation/ocd/")
xsd = r.namespace.XSD
notFunctionalProperties=["tagged","contact","supporter"]
notFunctionalProperties_=[ocd+i for i in notFunctionalProperties]
####
# Roteiro de métodos para construção da ontologia baseada nos dados
# data driven ontology
# 0) Triplifica conforme triplificaCD.py
# usa nomes mínimos para propriedades e classes como :body ou :name, classes como
# commentBody ou userName devem ser evitadas
# na triplificação. Podendo ser observadas e adicionadas
# no levantamento da ontologia.
# FEITO
# 0.5) Coloca dados triplificados num endpoint sparql para fazer as queries necessárias
# para o levantamento da ontologia.
# FEITO
# 1) Obtencao de todas as classes
# ?o where { ?s rdf:type ?o }
# com algumas excessoes
PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcty: <http://purl.org/dc/dcmitype/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX sioc: <http://rdfs.org/sioc/ns#>
PREFIX schema: <http://schema.org/>
PREFIX aa: <http://purl.org/socialparticipation/aa/>
PREFIX ocd: <http://purl.org/socialparticipation/ocd/>"""
# CHECK TTM
q="SELECT DISTINCT ?o WHERE {?s rdf:type ?o}"
NOW=time.time()
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para puxar todas as classes"%
(time.time()-NOW,))
classes=[i["o"]["value"] for i in results["results"]["bindings"] if "w3.org" not in i["o"]["value"]]
# 2) Obtem todas as propriedades
# ?p where { ?s ?p ?o. }
# com algumas excessoes
q="SELECT DISTINCT ?p WHERE {?s ?p ?o}"
NOW=time.time()
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para puxar todas as propriedades"%
(time.time()-NOW,))
props=[i["p"]["value"] for i in results["results"]["bindings"] if "w3.org" not in i["p"]["value"]]
props_=[i.split("/")[-1] for i in props]
# 3) Faz estrutura para cada classe e uma figura:
# classe no meio, dados à esquerda, classes à direita
# para cada classe, para cada individuo da classe,
# ver as relacoes estabelecidas com o individuo como
# sujeito e como objeto. Anotar a propriedade e o tipo de dado
# na ponta
# guarda a estrutura de relacionamento da classe.
vizinhanca={}
vizinhanca_={}
for classe in classes:
#res=fazQuery("SELECT DISTINCT ?p (datatype(?o) as ?do) WHERE { ?i a <%s> . ?i ?p ?o }"%(classe,))
NOW=time.time()
print("\n%s antecedente, consequente: "%(classe.split("/")[-1],))
ant=fazQuery("SELECT DISTINCT ?p ?cs (datatype(?s) as ?ds) WHERE { ?i a <%s> . ?s ?p ?i . OPTIONAL { ?s a ?cs . } }"%(classe,))
ant_=[]
for aa in ant:
if "cs" in aa.keys():
tobj=aa["cs"]["value"]
ant_.append((tobj,aa["p"]["value"]))
elif (("ds" in aa.keys()) and ("w3.org" not in aa["p"]["value"])):
tobj=aa["ds"]["value"]
ant_.append((tobj,aa["p"]["value"]))
cons=fazQuery("SELECT DISTINCT ?p ?co (datatype(?o) as ?do) WHERE { ?i a <%s> . ?i ?p ?o . OPTIONAL { ?o a ?co . } }"%(classe,))
cons_=[]
for cc in cons:
if "co" in cc.keys():
tobj=cc["co"]["value"]
cons_.append((cc["p"]["value"],tobj))
elif (("do" in cc.keys()) and ("w3.org" not in cc["p"]["value"])):
tobj=cc["do"]["value"]
cons_.append((cc["p"]["value"],tobj))
elif "/mbox" in cc["p"]["value"]:
tobj="XMLSchema#anyURI"
cons_.append((cc["p"]["value"],tobj))
vizinhanca[classe]=(ant,cons)
vizinhanca_[classe]=(ant_,cons_)
f=open("dumpVV.pickle","wb")
vv=(vizinhanca,vizinhanca_)
pickle.dump(vv,f)
f.close()
fo=open("dumpVV.pickle","rb")
vv_=pickle.load(fo)
fo.close()
kk=vv_[1].keys()
for tkey in kk:
cl=tkey
cl_=cl.split("/")[-1]
print cl_
ex=vv_[1][cl]
A=gv.AGraph(directed=True)
A.graph_attr["label"]=("classe: %s, no namespace interno: http://purl.org/socialparticipation/ocd/"%(cl_,))
for i in xrange(len(ex[0])): # antecedentes
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
print label, elabel
A.add_node(label,style="filled")
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
n=A.get_node(label)
n.attr['color']="#A2F3D1"
print("\n\n")
for i in xrange(len(ex[1])): # consequentes
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
print elabel, label
if "XMLS" in label:
label_=i
else:
label_=label
A.add_node(label_,style="filled")
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
n=A.get_node(label_)
n.attr['label']=label
if "XMLS" in label:
n.attr['color']="#FFE4AA"
else:
n.attr['color']="#A2F3D1"
n=A.get_node(cl_)
n.attr['style']="filled"
n.attr['color']="#6EAA91"
nome=("imgs/classes/%s.png"%(cl_,))
A.draw(nome,prog="dot") # draw to png using circo
print("Wrote %s"%(nome,))
# 4) Faz estrutura geral e figura geral
A=gv.AGraph(directed=True)
A.graph_attr["label"]="Diagrama geral da OCD no namespace interno: http://purl.org/socialparticipation/ocd/"
ii=1
for tkey in kk:
cl_=tkey.split("/")[-1]
if cl_ not in A.nodes():
A.add_node(cl_,style="filled")
n=A.get_node(cl_)
n.attr['color']="#A2F3D1"
ex=vv_[1][tkey]
for i in xrange(len(ex[0])):
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
print elabel
if label not in A.nodes():
A.add_node(label,style="filled")
n=A.get_node(label)
n.attr['color']="#A2F3D1"
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
print("\n\n")
for i in xrange(len(ex[1])):
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
print elabel, label
if "XMLS" in label:
label_=ii; ii+=1
color="#FFE4AA"
else:
label_=label
color="#A2F3D1"
if label_ not in A.nodes():
A.add_node(label_,style="filled")
n=A.get_node(label_)
n.attr['label']=label.split("#")[-1]
n.attr['color']=color
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
e.attr["color"]=color
e.attr["penwidth"]=2
A.draw("imgs/OCD.png",prog="twopi",args="-Granksep=4")
A.draw("imgs/OCD2.png",prog="dot",args="-Granksep=.4 -Gsize='1000,1000'")
print("Wrote geral")
# 4.5) qualificar literais
## ok.
# 5) Observando as triplas, observar hierarquias e conceitos especificos do namespace,
# como commentBody e userName. Ver README.md.
G(ocd.Problem, rdfs.subClassOf,ocd.Post)
G(ocd.Proposal, rdfs.subClassOf,ocd.Post)
G(ocd.supportCount, rdfs.subPropertyOf ,ocd.counting)
G(ocd.inspirationCount, rdfs.subPropertyOf,ocd.counting)
G(ocd.commentCount, rdfs.subPropertyOf ,ocd.counting)
G(ocd.followersCount, rdfs.subPropertyOf ,ocd.counting)
# 6) As propriedades são qualificadas e as restrições de classe aplicadas.
# para cada propriedade, ver o que incide no domínio e no âmbito.
# Ao mesmo tempo, fazer os axiomas de classe
# Também pode ser pulada esta etapa para simplificar ontologia e evitar incompatibilidades com bancos de dados atualizados e maiores detalhes dados pelos especialitas.
# Abaixo estah o roteiro completo de observacao dos dados para extração das estruturas
# básicas, axiomas de propriedade, restrições de classe e visualizações.
# 6.1) estrutura básica e figuras:
P={}
P_={}
import string
for prop in props:
# observar todos os subjeitos com q ela ocorre
# observar todos os objetos com que ela ocorre
# fazer estrutura, plotar cada uma
prop_=prop.split("/")[-1]
suj=fazQuery("SELECT DISTINCT ?cs WHERE { ?s <%s> ?o . ?s a ?cs . }"%(prop,))
obj=fazQuery("SELECT DISTINCT ?co (datatype(?o) as ?do) WHERE { ?s <%s> ?o . OPTIONAL { ?o a ?co . } }"%(prop,))
P[prop_]=(suj,obj)
A=gv.AGraph(directed=True)
A.graph_attr["label"]=("propriedade: %s, no namespace interno: http://purl.org/socialparticipation/ocd/"%(prop_,))
# A.add_node(1,style="filled")
# A.add_node(2,style="filled")
A.add_edge(1,2)
e=A.get_edge(1,2)
e.attr["label"]=prop_
n1=A.get_node(1)
n2=A.get_node(2)
n1.attr['style']="filled"
n2.attr['style']="filled"
n1.attr['color']="blue"
n2.attr['color']="red"
# Agiliza tags dos sujeitos
ts=[i["cs"]["value"].split("/")[-1] for i in suj]
ls=string.join(ts,"<br />")
print "ls: "+ls
#n1.attr['label']=ls
n1.attr['label']=("<%s>"%(ls,))
# Agiliza tags dos objetos
if "mbox" in prop_:
lo="XMLSchema#anyURI"
to=[lo]
else:
to1=[i["co"]["value"].split("/")[-1] for i in obj if "co" in i.keys()]
to2=[i["do"]["value"].split("/")[-1] for i in obj if "do" in i.keys()]
to=to1+to2
lo=string.join(to,"<br />")
P_[prop_]=(ts,to)
print "lo:"+lo
n2.attr['label']=("<%s>"%(lo,))
nome=("imgs/properties/%s.png"%(prop_,))
A.draw(nome,prog="dot") # draw to png using circo
print("Wrote %s"%(nome,))
# variaveis props, classes, vv_, P_
f=open("dumpCheck.pickle","wb")
tudo=(g,props,classes,vv_,P_)
pickle.dump(tudo,f)
f.close()
# CHECKPOINT
o=open("dumpCheck.pickle","rb")
g,props,classes,vv_,P_=pickle.load(o)
o.close()
# 6.2) qualificação das propriedades: range, domain e axioma de propriedade
# owl:ObjectProperty, owl:DatatypeProperty or owl:AnnotationProperty
# Aplicando automaticamente os critérios de
# range, domain, functional ou não
for prop in props:
if prop not in notFunctionalProperties_:
G(U(prop),rdf.type,owl.functionalProperty)
ant,cons=P_[prop.split("/")[-1]]
if len(cons) and ("XMLS" in cons[0]):
G(U(prop), rdf.type, owl.DatatypeProperty)
else:
G(U(prop), rdf.type, owl.ObjectProperty)
if len(ant)>1:
B=r.BNode()
G(U(prop), rdfs.domain, B)
for ant_ in ant:
G(B, owl.unionOf, U(ocd+ant_))
elif ant:
G(U(prop), rdfs.domain, U(ocd+ant[0]))
if len(cons)>1:
B=r.BNode()
G(U(prop), rdfs.range, B)
for cons_ in cons:
G(B, owl.unionOf, U(ocd+cons_))
elif cons:
G(U(prop), rdfs.domain, U(ocd+cons[0]))
# restrições de classe
C={}
Ci={}
Ru={}
Re={}
for classe in classes:
query="SELECT DISTINCT ?p WHERE {?s a <%s>. ?s ?p ?o .}"%(classe,)
props_c=fazQuery(query)
props_c_=[i["p"]["value"] for i in props_c if "22-rdf-syntax" not in i["p"]["value"]]
C[classe]=props_c_
query2="SELECT DISTINCT ?s WHERE {?s a <%s>}"%(classe,)
inds=fazQuery(query2)
inds_=[i["s"]["value"] for i in inds]
Ci[classe]=inds_
for pc in props_c_:
query3="SELECT DISTINCT ?s ?co (datatype(?o) as ?do) WHERE {?s a <%s>. ?s <%s> ?o . OPTIONAL {?o a ?co . }}"%(classe,pc)
inds2=fazQuery(query3)
inds2_=set([i["s"]["value"] for i in inds2])
objs=set([i["co"]["value"] for i in inds2 if "co" in i.keys()])
vals=set([i["do"]["value"] for i in inds2 if "do" in i.keys()])
print "%s --> %s , %s"%(classe, vals, objs)
if len(inds_)==len(inds2_):
print "%s, %s existencial"%(classe,pc)
b_=r.BNode()
G(U(classe), rdfs.subClassOf, b_)
G(b_,rdf.type,owl.Restriction)
G(b_,owl.onProperty,U(pc))
if len(vals):
ob=list(vals)[0]
else:
try:
ob=list(objs)[0]
except:
print classe, pc
ob=0
if ob:
G(b_,owl.someValuesFrom,r.URIRef(ob))
if classe in Re.keys():
Re[classe].append((pc,ob))
else:
Re[classe]=[(pc,ob)]
query4="SELECT DISTINCT ?s WHERE { ?s <%s> ?o .}"%(pc,)
inds3=fazQuery(query4)
inds3_=[i["s"]["value"] for i in inds3]
if len(inds_)==len(inds3_):
print "%s, %s universal"%(classe,pc)
b_=r.BNode()
G(U(classe), rdfs.subClassOf, b_)
G(b_,rdf.type,owl.Restriction)
G(b_,owl.onProperty,U(pc))
if len(vals):
ob=list(vals)[0]
else:
try:
ob=list(objs)[0]
except:
print classe, pc
ob=0
if ob:
G(b_,owl.allValuesFrom,r.URIRef(ob))
if classe in Ru.keys():
Ru[classe].append((pc,ob))
else:
Ru[classe]=[(pc,ob)]
f=open("dumpREST.pickle","wb")
tudo=(g,Re,Ru,C,Ci)
pickle.dump(tudo,f)
f.close()
# CHECKPOINT
fo=open("dumpREST.pickle","rb")
g,Re,Ru,C,Ci=pickle.load(fo)
fo.close()
# 6.1) Enriquece figuras: classes, propriedades e geral
kk=vv_[1].keys()
for tkey in kk:
cl=tkey
cl_=cl.split("/")[-1]
print cl_
ex=vv_[1][cl]
A=gv.AGraph(directed=True)
for i in xrange(len(ex[0])): # antecedentes
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
elabel_=ex[0][i][1]
print label, elabel
A.add_node(label,style="filled")
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
if elabel in notFunctionalProperties:
e.attr["style"]="dashed"
if ex[0][i][0] in Re.keys():
tr=Re[ex[0][i][0]]
pp=[ii[0] for ii in tr]
oo=[ii[1] for ii in tr]
if (elabel_ in pp) and (oo[pp.index(elabel_)]==cl):
e.attr["color"]="#A0E0A0"
print "EXISTENCIAL ANTECEDENTE"
if ex[0][i][0] in Ru.keys():
tr=Ru[ex[0][i][0]]
pp=[ii[0] for ii in tr]
oo=[ii[1] for ii in tr]
if (elabel_ in pp) and (oo[pp.index(elabel_)]==cl):
e.attr["arrowhead"]="inv"
print "EXISTENCIAL ANTECEDENTE"
e.attr["penwidth"]=2.
e.attr["arrowsize"]=2.
n=A.get_node(label)
n.attr['color']="#A2F3D1"
print("\n\n")
for i in xrange(len(ex[1])): # consequentes
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
elabel_=ex[1][i][0]
print elabel, label
if "XMLS" in label:
label_=i
else:
label_=label
A.add_node(label_,style="filled")
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
if elabel in notFunctionalProperties:
e.attr["style"]="dashed"
if cl in Re.keys():
tr=Re[cl]
pp=[ii[0] for ii in tr]
if elabel_ in pp:
e.attr["color"]="#A0E0A0"
print "EXISTENCIAL"
if cl in Ru.keys():
tr=Ru[cl]
pp=[ii[0] for ii in tr]
if elabel_ in pp:
e.attr["arrowhead"]="inv"
e.attr["arrowsize"]=2.
print "UNIVERSAL"
e.attr["penwidth"]=2.
e.attr["arrowsize"]=2.
n=A.get_node(label_)
n.attr['label']=label
if "XMLS" in label:
n.attr['color']="#FFE4AA"
else:
n.attr['color']="#A2F3D1"
n=A.get_node(cl_)
n.attr['style']="filled"
n.attr['color']="#6EAA91"
A.graph_attr["label"]=(r"classe: %s, no namespace interno: http://purl.org/socialparticipation/ocd/.\nAresta tracejada: propriedade nao funcional.\nAresta verde: restricao existencial.\nPonta de seta invertida: restricao universal"%(cl_,))
nome=("imgs/classes_/%s.png"%(cl_,))
A.draw(nome,prog="dot") # draw to png using circo
print("Wrote %s"%(nome,))
# figura geral
A=gv.AGraph(directed=True)
A.graph_attr["label"]=r"Diagrama geral da OCD no namespace interno: http://purl.org/socialparticipation/ocd/\nAresta em verde indica restricao existencial,\ncom a ponta invertida indica restricao universal,\ntracejada indica propriedade nao funcional"
ii=1
for tkey in kk:
cl_=tkey.split("/")[-1]
cl=tkey
if cl_ not in A.nodes():
A.add_node(cl_,style="filled")
n=A.get_node(cl_)
n.attr['color']="#A2F3D1"
ex=vv_[1][tkey]
for i in xrange(len(ex[0])):
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
print elabel
if label not in A.nodes():
A.add_node(label,style="filled")
n=A.get_node(label)
n.attr['color']="#A2F3D1"
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
if elabel in notFunctionalProperties:
e.attr["style"]="dashed"
if ex[0][i][0] in Re.keys():
tr=Re[ex[0][i][0]]
pp=[iii[0] for iii in tr]
oo=[iii[1] for iii in tr]
if (elabel_ in pp) and (oo[pp.index(elabel_)]==cl):
e.attr["color"]="#A0E0A0"
print "EXISTENCIAL ANTECEDENTE"
if ex[0][i][0] in Ru.keys():
tr=Ru[ex[0][i][0]]
pp=[iii[0] for iii in tr]
oo=[iii[1] for iii in tr]
if (elabel_ in pp) and (oo[pp.index(elabel_)]==cl):
e.attr["arrowhead"]="inv"
print "EXISTENCIAL ANTECEDENTE"
e.attr["penwidth"]=2.
e.attr["arrowsize"]=2.
print("\n\n")
for i in xrange(len(ex[1])): # consequentes
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
elabel_=ex[1][i][0]
print elabel, label
if "XMLS" in label:
label_=ii; ii+=1
color="#FFE4AA"
else:
label_=label
color="#A2F3D1"
if label_ not in A.nodes():
A.add_node(label_,style="filled")
n=A.get_node(label_)
n.attr['label']=label.split("#")[-1]
n.attr['color']=color
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
e.attr["color"]=color
e.attr["penwidth"]=2
if elabel in notFunctionalProperties:
e.attr["style"]="dashed"
if cl in Re.keys():
tr=Re[cl]
pp=[iii[0] for iii in tr]
if elabel_ in pp:
e.attr["color"]="#A0E0A0"
print "EXISTENCIAL"
if cl in Ru.keys():
tr=Ru[cl]
pp=[iii[0] for iii in tr]
if elabel_ in pp:
e.attr["arrowhead"]="inv"
e.attr["arrowsize"]=2.
print "UNIVERSAL"
#A.draw("imgs/OCD_.png",prog="twopi",args="-Granksep=14")
#A.draw("imgs/OCD_2.png",prog="dot",args="-Granksep=14 -Gsize='1000,1000'")
A.draw("imgs/OCD_.png",prog="dot")
A.draw("imgs/OCD_2.png",prog="circo")
A.draw("imgs/OCD_3.png",prog="fdp")
A.draw("imgs/OCD_4.png",prog="twopi")
print("Wrote geral _ ")
# figura com as propriedades
for prop in props:
# observar todos os subjeitos com q ela ocorre
# observar todos os objetos com que ela ocorre
# fazer estrutura, plotar cada uma
prop_=prop.split("/")[-1]
#suj=fazQuery("SELECT DISTINCT ?cs WHERE { ?s <%s> ?o . ?s a ?cs . }"%(prop,))
#obj=fazQuery("SELECT DISTINCT ?co (datatype(?o) as ?do) WHERE { ?s <%s> ?o . OPTIONAL { ?o a ?co . } }"%(prop,))
#P[prop_]=(suj,obj)
suj,obj=P_[prop_]
A=gv.AGraph(directed=True)
A.graph_attr["label"]=(r"propriedade: %s, no namespace interno: http://purl.org/socialparticipation/ocd/\nAresta em verde indica restricao existencial,\ncom a ponta invertida indica restricao universal,\ntracejada indica propriedade nao funcional"%(prop_,))
# A.add_node(1,style="filled")
# A.add_node(2,style="filled")
A.add_edge(1,2)
e=A.get_edge(1,2)
e.attr["label"]=prop_
if prop_ in notFunctionalProperties:
#e.attr["style"]="dotted"
e.attr["style"]="dashed"
for cl in Re.keys():
tr=Re[cl]
pp=[iii[0] for iii in tr]
if prop in pp:
e.attr["color"]="#A0E0A0"
print "%s, EXISTENCIAL"%(prop_,)
for cl in Ru.keys():
tr=Ru[cl]
pp=[iii[0] for iii in tr]
if prop in pp:
e.attr["arrowhead"]="inv"
e.attr["arrowsize"]=2.
print "UNIVERSAL"
e.attr["penwidth"]=4
n1=A.get_node(1)
n2=A.get_node(2)
n1.attr['style']="filled"
n2.attr['style']="filled"
n1.attr['color']="blue"
n2.attr['color']="red"
# Agiliza tags dos sujeitos
#ts=[i["cs"]["value"].split("/")[-1] for i in suj]
ts=suj
ls=string.join(ts,"<br />")
print "ls: "+ls
#n1.attr['label']=ls
n1.attr['label']=("<%s>"%(ls,))
# Agiliza tags dos objetos
if "mbox" in prop_:
lo="XMLSchema#anyURI"
to=[lo]
else:
#to1=[i["co"]["value"].split("/")[-1] for i in obj if "co" in i.keys()]
#to2=[i["do"]["value"].split("/")[-1] for i in obj if "do" in i.keys()]
#to=to1+to2
to=obj
lo=string.join(to,"<br />")
P_[prop_]=(ts,to)
print "lo:"+lo
n2.attr['label']=("<%s>"%(lo,))
nome=("imgs/properties_/%s.png"%(prop_,))
A.draw(nome,prog="dot") # draw to png using circo
print("Wrote %s"%(nome,))
# 7) O namespace é relacionado com namespaces externos através de: super classes e propriedades, e equivalentes classes e propriedades.
rdf = r.namespace.RDF
rdfs = r.namespace.RDFS
foaf = r.namespace.FOAF
owl = r.namespace.OWL
dc=r.namespace.DC
dct=r.namespace.DCTERMS
dcty=r.Namespace("http://purl.org/dc/dcmitype/")
gndo=r.Namespace("http://d-nb.info/standards/elementset/gnd#")
sc=r.Namespace("http://schema.org/")
ops = r.Namespace("http://purl.org/socialparticipation/ops#")
sioc = r.Namespace("http://rdfs.org/sioc/ns#")
xsd = r.namespace.XSD
g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops#")
g.namespace_manager.bind("rdf", r.namespace.RDF)
g.namespace_manager.bind("rdfs", r.namespace.RDFS)
g.namespace_manager.bind("foaf", r.namespace.FOAF)
g.namespace_manager.bind("xsd", r.namespace.XSD)
g.namespace_manager.bind("owl", r.namespace.OWL)
g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops#")
g.namespace_manager.bind("dc", "http://purl.org/dc/elements/1.1/")
g.namespace_manager.bind("dct", "http://purl.org/dc/terms/")
g.namespace_manager.bind("dcty", "http://purl.org/dc/dcmitype/")
g.namespace_manager.bind("gndo", "http://d-nb.info/standards/elementset/gnd#")
g.namespace_manager.bind("schema", "http://schema.org/")
g.namespace_manager.bind("sioc", "http://rdfs.org/sioc/ns#")
#g.add((ocd.City, rdfs.subClassOf, ))
# enriquece figuras
# 8) info sobre esta ontologia
ouri=ocd.ocd+".owl"
g.add((ouri,rdf.type,owl.Ontology))
g.add((ouri,dct.title,r.Literal(u"Ontologia do Cidade Democrática")))
g.add((ouri,owl.versionInfo,r.Literal(u"0.01au")))
g.add((ouri,dct.description,r.Literal(u"Ontologia do Cidade Democratica, levantada com base nos dados e para conectar com outras instâncias")))
# 8) Escreve OWL, TTL e PNG
f=open("OCD.owl","wb")
f.write(g.serialize())
f.close()
f=open("OCD.ttl","wb")
f.write(g.serialize(format="turtle"))
f.close()
# 9) Sobe no endpoint para mais testes
# Como foi feita para cada classe, também centrada na propriedade
# Fazer também para cada literal.
print "total time: ", time.time()-T
| OpenLinkedSocialData/ocd | OCD.py | Python | unlicense | 24,833 | 0.021655 |
# -*- coding: utf-8 -*-
from flask_babel._compat import text_type
class LazyString(object):
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
def __getattr__(self, attr):
string = text_type(self)
if hasattr(string, attr):
return getattr(string, attr)
raise AttributeError(attr)
def __str__(self):
return text_type(self._func(*self._args, **self._kwargs))
def __len__(self):
return len(text_type(self))
def __getitem__(self, key):
return text_type(self)[key]
def __iter__(self):
return iter(text_type(self))
def __contains__(self, item):
return item in text_type(self)
def __add__(self, other):
return text_type(self) + other
def __radd__(self, other):
return other + text_type(self)
def __mul__(self, other):
return text_type(self) * other
def __rmul__(self, other):
return other * text_type(self)
def __lt__(self, other):
return text_type(self) < other
def __le__(self, other):
return text_type(self) <= other
def __eq__(self, other):
return text_type(self) == other
def __ne__(self, other):
return text_type(self) != other
def __gt__(self, other):
return text_type(self) > other
def __ge__(self, other):
return text_type(self) >= other
def __html__(self):
return text_type(self)
def __hash__(self):
return hash(text_type(self))
def __mod__(self, other):
return text_type(self) % other
def __rmod__(self, other):
return other + text_type(self) | hachard/Cra-Magnet | flask/lib/python3.5/site-packages/flask_babel/speaklater.py | Python | gpl-3.0 | 1,713 | 0.000584 |
# -*- coding: utf-8 -*-
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision$
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re, time
from mytime import MyTime
import iso8601
class DateTemplate:
def __init__(self):
self.__name = ""
self.__regex = ""
self.__cRegex = None
self.__hits = 0
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
def setRegex(self, regex):
self.__regex = regex.strip()
self.__cRegex = re.compile(regex)
def getRegex(self):
return self.__regex
def getHits(self):
return self.__hits
def matchDate(self, line):
dateMatch = self.__cRegex.search(line)
if not dateMatch == None:
self.__hits += 1
return dateMatch
def getDate(self, line):
raise Exception("matchDate() is abstract")
class DateEpoch(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
# We already know the format for TAI64N
self.setRegex("^\d{10}(\.\d{6})?")
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
date = list(time.localtime(float(dateMatch.group())))
return date
##
# Use strptime() to parse a date. Our current locale is the 'C'
# one because we do not set the locale explicitly. This is POSIX
# standard.
class DateStrptime(DateTemplate):
TABLE = dict()
TABLE["Jan"] = []
TABLE["Feb"] = [u"Fév"]
TABLE["Mar"] = [u"Mär"]
TABLE["Apr"] = ["Avr"]
TABLE["May"] = ["Mai"]
TABLE["Jun"] = []
TABLE["Jul"] = []
TABLE["Aug"] = ["Aou"]
TABLE["Sep"] = []
TABLE["Oct"] = ["Okt"]
TABLE["Nov"] = []
TABLE["Dec"] = [u"Déc", "Dez"]
def __init__(self):
DateTemplate.__init__(self)
self.__pattern = ""
def setPattern(self, pattern):
self.__pattern = pattern.strip()
def getPattern(self):
return self.__pattern
#@staticmethod
def convertLocale(date):
for t in DateStrptime.TABLE:
for m in DateStrptime.TABLE[t]:
if date.find(m) >= 0:
return date.replace(m, t)
return date
convertLocale = staticmethod(convertLocale)
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
try:
# Try first with 'C' locale
date = list(time.strptime(dateMatch.group(), self.getPattern()))
except ValueError:
# Try to convert date string to 'C' locale
conv = self.convertLocale(dateMatch.group())
try:
date = list(time.strptime(conv, self.getPattern()))
except ValueError, e:
# Try to add the current year to the pattern. Should fix
# the "Feb 29" issue.
conv += " %s" % MyTime.gmtime()[0]
pattern = "%s %%Y" % self.getPattern()
date = list(time.strptime(conv, pattern))
if date[0] < 2000:
# There is probably no year field in the logs
date[0] = MyTime.gmtime()[0]
# Bug fix for #1241756
# If the date is greater than the current time, we suppose
# that the log is not from this year but from the year before
if time.mktime(date) > MyTime.time():
date[0] -= 1
elif date[1] == 1 and date[2] == 1:
# If it is Jan 1st, it is either really Jan 1st or there
# is neither month nor day in the log.
date[1] = MyTime.gmtime()[1]
date[2] = MyTime.gmtime()[2]
return date
class DateTai64n(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
# We already know the format for TAI64N
self.setRegex("@[0-9a-f]{24}")
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
value = dateMatch.group()
seconds_since_epoch = value[2:17]
# convert seconds from HEX into local time stamp
date = list(time.localtime(int(seconds_since_epoch, 16)))
return date
class DateISO8601(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
date_re = "[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}" \
".[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?" \
"(Z|(([-+])([0-9]{2}):([0-9]{2})))?"
self.setRegex(date_re)
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# Parses the date.
value = dateMatch.group()
date = list(iso8601.parse_date(value).timetuple())
return date
| yarikoptic/Fail2Ban-Old-SVNGIT | server/datetemplate.py | Python | gpl-2.0 | 5,047 | 0.033902 |
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferUITest
from Gaffer.Private.IECorePreview import Message
from Gaffer.Private.IECorePreview import Messages
class MessageWidgetTest( GafferUITest.TestCase ) :
def assertCounts( self, widget, debug, info, warning, error ) :
self.assertEqual( widget.messageCount( IECore.Msg.Level.Debug ), debug )
self.assertEqual( widget.messageCount( IECore.Msg.Level.Info ), info )
self.assertEqual( widget.messageCount( IECore.Msg.Level.Warning ), warning )
self.assertEqual( widget.messageCount( IECore.Msg.Level.Error ), error )
self.assertEqual( widget.messageCount(), debug + info + warning + error )
def testMessages( self ) :
w = GafferUI.MessageWidget()
self.assertCounts( w, 0, 0, 0, 0 )
m = Messages()
for i in range( 24 ) :
m.add( Message( IECore.MessageHandler.Level( i % 4 ), "testMessages", "message %d" % i ) )
w.setMessages( m )
self.assertEqual( w.getMessages(), m )
self.assertCounts( w, 6, 6, 6, 6 )
w.clear()
self.assertNotEqual( w.getMessages(), m )
self.assertCounts( w, 0, 0, 0, 0 )
def testMessageLevel( self ) :
levels = (
IECore.MessageHandler.Level.Debug, IECore.MessageHandler.Level.Info,
IECore.MessageHandler.Level.Warning, IECore.MessageHandler.Level.Info
)
w = GafferUI.MessageWidget()
self.assertEqual( w.getMessageLevel(), IECore.MessageHandler.Level.Info )
for l in levels :
w.setMessageLevel( l )
self.assertEqual( w.getMessageLevel(), l )
for l in levels :
w = GafferUI.MessageWidget( messageLevel = l )
self.assertEqual( w.getMessageLevel(), l )
def testCounts( self ) :
def msg( level ) :
IECore.msg( level, "test", "test" )
self.waitForIdle( 10 )
w = GafferUI.MessageWidget()
self.assertCounts( w, 0, 0, 0, 0 )
with w.messageHandler() :
msg( IECore.Msg.Level.Error )
self.assertCounts( w, 0, 0, 0, 1 )
msg( IECore.Msg.Level.Warning )
self.assertCounts( w, 0, 0, 1, 1 )
msg( IECore.Msg.Level.Info )
self.assertCounts( w, 0, 1, 1, 1 )
msg( IECore.Msg.Level.Debug )
self.assertCounts( w, 1, 1, 1, 1 )
msg( IECore.Msg.Level.Error )
msg( IECore.Msg.Level.Error )
self.assertCounts( w, 1, 1, 1, 3 )
w.clear()
self.assertCounts( w, 0, 0, 0, 0 )
def testForwarding( self ) :
w = GafferUI.MessageWidget()
h = IECore.CapturingMessageHandler()
w.forwardingMessageHandler().addHandler( h )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 0 )
self.assertEqual( len( h.messages ), 0 )
with w.messageHandler() :
IECore.msg( IECore.Msg.Level.Error, "test", "test" )
self.waitForIdle( 10 )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 1 )
self.assertEqual( len( h.messages ), 1 )
w.forwardingMessageHandler().removeHandler( h )
with w.messageHandler() :
IECore.msg( IECore.Msg.Level.Error, "test", "test" )
self.waitForIdle( 10 )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 2 )
self.assertEqual( len( h.messages ), 1 )
if __name__ == "__main__":
unittest.main()
| lucienfostier/gaffer | python/GafferUITest/MessageWidgetTest.py | Python | bsd-3-clause | 4,866 | 0.044595 |
from __future__ import absolute_import, division, print_function
import toolz
from toolz import first
import datashape
from datashape import Record, dshape, DataShape
from datashape import coretypes as ct
from datashape.predicates import isscalar, iscollection
from .core import common_subexpression
from .expressions import Expr, Symbol
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_child', 'axis', 'keepdims'
_dtype = None
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in self.axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in self.axis)
return DataShape(*(shape + (self._dtype,)))
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
try:
return self._child._name + '_' + type(self).__name__
except (AttributeError, ValueError, TypeError):
return type(self).__name__
class any(Reduction):
_dtype = ct.bool_
class all(Reduction):
_dtype = ct.bool_
class sum(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class max(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class min(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class mean(Reduction):
_dtype = ct.real
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
_dtype = ct.int_
class nunique(Reduction):
_dtype = ct.int_
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze.compute.python import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_child', 'names', 'values', 'keepdims'
def __init__(self, _child, names, values, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
@property
def dshape(self):
measure = Record(list(zip(self.names,
[v._dtype for v in self.values])))
if self.keepdims:
return DataShape(*((1,) * self._child.ndim + (measure,)))
else:
return DataShape(measure)
def __str__(self):
return 'summary(' + ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values)) + \
', keepdims=%s' % self.keepdims + ')'
def summary(keepdims=False, **kwargs):
items = sorted(kwargs.items(), key=first)
names = tuple(map(first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
raise ValueError()
return Summary(child, names, values, keepdims=keepdims)
summary.__doc__ = Summary.__doc__
from datashape.predicates import (iscollection, isscalar, isrecord, isboolean,
isnumeric)
from .expressions import schema_method_list, dshape_method_list
schema_method_list.extend([
(isboolean, set([any, all, sum])),
(isnumeric, set([mean, sum, mean, min, max, std, var])),
])
dshape_method_list.extend([
(iscollection, set([count, nunique, min, max])),
])
| vitan/blaze | blaze/expr/reductions.py | Python | bsd-3-clause | 6,478 | 0.001698 |
from django.utils.translation import ugettext_lazy as _
DIAL_CHOICES = (
('none', _('None')),
('din 41091.1', _('Dial with minute and hour markers (DIN 41091, Sect. 1)')),
('din 41091.3', _('Dial with hour markers (DIN 41091, Sect. 3)')),
('din 41091.4', _('Dial with hour numerals (DIN 41091, Part 4)')),
('swiss', _('Dial with minute and hour markers (Bauhaus)')),
('austria', _('Dial with minute and hour markers (Austria)')),
('points', _('Dial with hour dots')),
)
HOUR_HAND_CHOICES = (
('none', _('None')),
('din 41092.3', _('Pointed, bar-shaped hand (DIN 41092, Sect. 3)')),
('german', _('Blunt, bar-shaped hand (German Rail)')),
('siemens', _('Heart-shaped hand (Siemens)')),
('swiss', _('Blunt, javelin-shaped hand (Austria)')),
)
MINUTE_HAND_CHOICES = (
('none', _('Without minute hand')),
('din 41092.3', _('Pointed, bar-shaped hand (DIN 41092, Sect. 3)')),
('german', _('Blunt, bar-shaped hand (German Rail)')),
('siemens', _('Serpentine hand (Siemens)')),
('swiss', _('Blunt, javelin-shaped hand (Austria)')),
)
SECOND_HAND_CHOICES = (
('none', _('Without second hand')),
('din 41071.1', _('Javelin-shaped hand (DIN 41071, Sect. 1)')),
('din 41071.2', _('Perforated pointer hand (DIN 41071, Sect. 2)')),
('german', _('Modern perforated pointer hand (German Rail)')),
('swiss', _('Disc-end hand (Switzerland)')),
)
MINUTE_HAND_MOVEMENT_CHOICES = (
('stepping', _('Stepping minute hand')),
('sweeping', _('Sweeping minute hand')),
)
SECOND_HAND_MOVEMENT_CHOICES = (
('stepping', _('Stepping second hand')),
('sweeping', _('Sweeping second hand')),
('swinging', _('Oscillating second hand')),
)
| akoebbe/sweetiepi | sweetiepi/clocks/choices.py | Python | mit | 1,719 | 0.000582 |
from uber.common import *
def swallow_exceptions(func):
"""
Don't allow ANY Exceptions to be raised from this.
Use this ONLY where it's absolutely needed, such as dealing with locking functionality.
WARNING: DO NOT USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING :)
"""
@wraps(func)
def swallow_exception(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
log.error("Exception raised, but we're going to ignore it and continue.", exc_info=True)
return swallow_exception
def log_pageview(func):
@wraps(func)
def with_check(*args, **kwargs):
with sa.Session() as session:
try:
attendee = session.admin_account(cherrypy.session['account_id'])
except:
pass # we don't care about unrestricted pages for this version
else:
sa.PageViewTracking.track_pageview()
return func(*args, **kwargs)
return with_check
def redirect_if_at_con_to_kiosk(func):
@wraps(func)
def with_check(*args, **kwargs):
if c.AT_THE_CON and c.KIOSK_REDIRECT_URL:
raise HTTPRedirect(c.KIOSK_REDIRECT_URL)
return func(*args, **kwargs)
return with_check
def check_if_can_reg(func):
@wraps(func)
def with_check(*args, **kwargs):
if c.BADGES_SOLD >= c.MAX_BADGE_SALES:
return render('static_views/prereg_soldout.html')
elif c.BEFORE_PREREG_OPEN:
return render('static_views/prereg_not_yet_open.html')
elif c.AFTER_PREREG_TAKEDOWN and not c.AT_THE_CON:
return render('static_views/prereg_closed.html')
return func(*args, **kwargs)
return with_check
def get_innermost(func):
return get_innermost(func.__wrapped__) if hasattr(func, '__wrapped__') else func
def site_mappable(func):
func.site_mappable = True
return func
def suffix_property(func):
func._is_suffix_property = True
return func
def _suffix_property_check(inst, name):
if not name.startswith('_'):
suffix = '_' + name.rsplit('_', 1)[-1]
prop_func = getattr(inst, suffix, None)
if getattr(prop_func, '_is_suffix_property', False):
field_name = name[:-len(suffix)]
field_val = getattr(inst, field_name)
return prop_func(field_name, field_val)
suffix_property.check = _suffix_property_check
def csrf_protected(func):
@wraps(func)
def protected(*args, csrf_token, **kwargs):
check_csrf(csrf_token)
return func(*args, **kwargs)
return protected
def ajax(func):
"""decorator for Ajax POST requests which require a CSRF token and return JSON"""
@wraps(func)
def returns_json(*args, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
assert cherrypy.request.method == 'POST', 'POST required, got {}'.format(cherrypy.request.method)
check_csrf(kwargs.pop('csrf_token', None))
return json.dumps(func(*args, **kwargs), cls=serializer).encode('utf-8')
return returns_json
def ajax_gettable(func):
"""
Decorator for page handlers which return JSON. Unlike the above @ajax decorator,
this allows either GET or POST and does not check for a CSRF token, so this can
be used for pages which supply data to external APIs as well as pages used for
periodically polling the server for new data by our own Javascript code.
"""
@wraps(func)
def returns_json(*args, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(func(*args, **kwargs), cls=serializer).encode('utf-8')
return returns_json
def multifile_zipfile(func):
func.site_mappable = True
@wraps(func)
def zipfile_out(self, session):
zipfile_writer = BytesIO()
with zipfile.ZipFile(zipfile_writer, mode='w') as zip_file:
func(self, zip_file, session)
# must do this after creating the zip file as other decorators may have changed this
# for example, if a .zip file is created from several .csv files, they may each set content-type.
cherrypy.response.headers['Content-Type'] = 'application/zip'
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename=' + func.__name__ + '.zip'
return zipfile_writer.getvalue()
return zipfile_out
def _set_csv_base_filename(base_filename):
"""
Set the correct headers when outputting CSV files to specify the filename the browser should use
"""
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename=' + base_filename + '.csv'
def csv_file(func):
parameters = inspect.getargspec(func)
if len(parameters[0]) == 3:
func.site_mappable = True
@wraps(func)
def csvout(self, session, set_headers=True, **kwargs):
writer = StringIO()
func(self, csv.writer(writer), session, **kwargs)
output = writer.getvalue().encode('utf-8')
# set headers last in case there were errors, so end user still see error page
if set_headers:
cherrypy.response.headers['Content-Type'] = 'application/csv'
_set_csv_base_filename(func.__name__)
return output
return csvout
def set_csv_filename(func):
"""
Use this to override CSV filenames, useful when working with aliases and redirects to make it print the correct name
"""
@wraps(func)
def change_filename(self, override_filename=None, *args, **kwargs):
out = func(self, *args, **kwargs)
_set_csv_base_filename(override_filename or func.__name__)
return out
return change_filename
def check_shutdown(func):
@wraps(func)
def with_check(self, *args, **kwargs):
if c.UBER_SHUT_DOWN or c.AT_THE_CON:
raise HTTPRedirect('index?message={}', 'The page you requested is only available pre-event.')
else:
return func(self, *args, **kwargs)
return with_check
def credit_card(func):
@wraps(func)
def charge(self, session, payment_id, stripeToken, stripeEmail='ignored', **ignored):
if ignored:
log.error('received unexpected stripe parameters: {}', ignored)
try:
return func(self, session=session, payment_id=payment_id, stripeToken=stripeToken)
except HTTPRedirect:
raise
except:
error_text = \
'Got an error while calling charge' \
'(self, payment_id={!r}, stripeToken={!r}, ignored={}):\n{}\n' \
'\n IMPORTANT: This could have resulted in an attendee paying and not being' \
'marked as paid in the database. Definitely double check.'\
.format(payment_id, stripeToken, ignored, traceback.format_exc())
report_critical_exception(msg=error_text, subject='ERROR: MAGFest Stripe error (Automated Message)')
return traceback.format_exc()
return charge
def cached(func):
func.cached = True
return func
def cached_page(func):
from sideboard.lib import config as sideboard_config
innermost = get_innermost(func)
func.lock = RLock()
@wraps(func)
def with_caching(*args, **kwargs):
if hasattr(innermost, 'cached'):
fpath = os.path.join(sideboard_config['root'], 'data', func.__module__ + '.' + func.__name__)
with func.lock:
if not os.path.exists(fpath) or datetime.now().timestamp() - os.stat(fpath).st_mtime > 60 * 15:
contents = func(*args, **kwargs)
with open(fpath, 'wb') as f:
# Try to write assuming content is a byte first, then try it as a string
try:
f.write(contents)
except:
f.write(bytes(contents, 'UTF-8'))
with open(fpath, 'rb') as f:
return f.read()
else:
return func(*args, **kwargs)
return with_caching
def timed(func):
@wraps(func)
def with_timing(*args, **kwargs):
before = datetime.now()
try:
return func(*args, **kwargs)
finally:
log.debug('{}.{} loaded in {} seconds'.format(func.__module__, func.__name__, (datetime.now() - before).total_seconds()))
return with_timing
def sessionized(func):
@wraps(func)
def with_session(*args, **kwargs):
innermost = get_innermost(func)
if 'session' not in inspect.getfullargspec(innermost).args:
return func(*args, **kwargs)
else:
with sa.Session() as session:
try:
retval = func(*args, session=session, **kwargs)
session.expunge_all()
return retval
except HTTPRedirect:
session.commit()
raise
return with_session
def renderable_data(data=None):
data = data or {}
data['c'] = c
data.update({m.__name__: m for m in sa.Session.all_models()})
return data
# render using the first template that actually exists in template_name_list
def render(template_name_list, data=None):
data = renderable_data(data)
try:
template = loader.select_template(listify(template_name_list))
rendered = template.render(Context(data))
except django.template.base.TemplateDoesNotExist:
raise
except Exception as e:
source_template_name = '[unknown]'
django_template_source_info = getattr(e, 'django_template_source')
if django_template_source_info:
for info in django_template_source_info:
if 'LoaderOrigin' in str(type(info)):
source_template_name = info.name
break
raise Exception('error rendering template [{}]'.format(source_template_name)) from e
# disabled for performance optimzation. so sad. IT SHALL RETURN
# rendered = screw_you_nick(rendered, template) # lolz.
return rendered.encode('utf-8')
# this is a Magfest inside joke.
# Nick gets mad when people call Magfest a "convention". He always says "It's not a convention, it's a festival"
# So........ if Nick is logged in.... let's annoy him a bit :)
def screw_you_nick(rendered, template):
if not c.AT_THE_CON and sa.AdminAccount.is_nick() and 'emails' not in template and 'history' not in template and 'form' not in rendered:
return rendered.replace('festival', 'convention').replace('Fest', 'Con') # lolz.
else:
return rendered
def get_module_name(class_or_func):
return class_or_func.__module__.split('.')[-1]
def _get_template_filename(func):
return os.path.join(get_module_name(func), func.__name__ + '.html')
def prettify_breadcrumb(str):
return str.replace('_', ' ').title()
def renderable(func):
@wraps(func)
def with_rendering(*args, **kwargs):
result = func(*args, **kwargs)
try:
result['breadcrumb_page_pretty_'] = prettify_breadcrumb(func.__name__) if func.__name__ != 'index' else 'Home'
result['breadcrumb_page_'] = func.__name__ if func.__name__ != 'index' else ''
except:
pass
try:
result['breadcrumb_section_pretty_'] = prettify_breadcrumb(get_module_name(func))
result['breadcrumb_section_'] = get_module_name(func)
except:
pass
if c.UBER_SHUT_DOWN and not cherrypy.request.path_info.startswith('/schedule'):
return render('closed.html')
elif isinstance(result, dict):
return render(_get_template_filename(func), result)
else:
return result
return with_rendering
def unrestricted(func):
func.restricted = False
return func
def restricted(func):
@wraps(func)
def with_restrictions(*args, **kwargs):
if func.restricted:
if func.restricted == (c.SIGNUPS,):
if not cherrypy.session.get('staffer_id'):
raise HTTPRedirect('../signups/login?message=You+are+not+logged+in', save_location=True)
elif cherrypy.session.get('account_id') is None:
raise HTTPRedirect('../accounts/login?message=You+are+not+logged+in', save_location=True)
else:
access = sa.AdminAccount.access_set()
if not c.AT_THE_CON:
access.discard(c.REG_AT_CON)
if not set(func.restricted).intersection(access):
if len(func.restricted) == 1:
return 'You need {} access for this page'.format(dict(c.ACCESS_OPTS)[func.restricted[0]])
else:
return ('You need at least one of the following access levels to view this page: '
+ ', '.join(dict(c.ACCESS_OPTS)[r] for r in func.restricted))
return func(*args, **kwargs)
return with_restrictions
def set_renderable(func, acccess):
"""
Return a function that is flagged correctly and is ready to be called by cherrypy as a request
"""
func.restricted = getattr(func, 'restricted', acccess)
new_func = timed(cached_page(sessionized(restricted(renderable(func)))))
new_func.exposed = True
return new_func
class all_renderable:
def __init__(self, *needs_access):
self.needs_access = needs_access
def __call__(self, klass):
for name, func in klass.__dict__.items():
if hasattr(func, '__call__'):
new_func = set_renderable(func, self.needs_access)
setattr(klass, name, new_func)
return klass
register = template.Library()
def tag(klass):
@register.tag(klass.__name__)
def tagged(parser, token):
return klass(*token.split_contents()[1:])
return klass
class Validation:
def __init__(self):
self.validations = defaultdict(OrderedDict)
def __getattr__(self, model_name):
def wrapper(func):
self.validations[model_name][func.__name__] = func
return func
return wrapper
validation, prereg_validation = Validation(), Validation()
adjustment_counter = count().__next__
def presave_adjustment(func):
"""
Decorate methods on a model class with this decorator to ensure that the
method is called immediately before the model is saved so that you can
make any adjustments, e.g. setting a ribbon based on other information.
"""
func.presave_adjustment = adjustment_counter()
return func
def predelete_adjustment(func):
"""
Decorate methods on a model class with this decorator to ensure that the
method is called immediately before the model is deleted, e.g. to shift
badges around the now-open slot.
"""
func.predelete_adjustment = adjustment_counter()
return func
class cost_property(property):
"""
Different events have extra things they charge money for to attendees and
groups. Those events can use the @Session.model_mixin decorator and then
define a @cost_property which returns the amount added. For example, we
have code in the MAGStock repo which looks vaguely like this:
@Session.model_mixin
class Attendee:
purchased_food = Column(Boolean, default=False)
@cost_property
def food_price(self):
return c.FOOD_PRICE if self.purchased_food else 0
"""
class class_property(object):
"""Read-only property for classes rather than instances."""
def __init__(self, func):
self.func = func
def __get__(self, obj, owner):
return self.func(owner)
def create_redirect(url, access=[c.PEOPLE]):
"""
Return a function which redirects to the given url when called.
"""
def redirect(self):
raise HTTPRedirect(url)
renderable_func = set_renderable(redirect, access)
return renderable_func
class alias_to_site_section(object):
"""
Inject a URL redirect from another page to the decorated function.
This is useful for downstream plugins to add or change functions in upstream plugins to modify their behavior.
Example: if you move the explode_kittens() function from the core's site_section/summary.py page to a plugin,
in that plugin you can create an alias back to the original function like this:
@alias_to_site_section('summary')
def explode_kittens(...):
...
Please note that this doesn't preserve arguments, it just causes a redirect. It's most useful for pages without
arguments like reports and landing pages.
"""
def __init__(self, site_section_name, alias_name=None, url=None):
self.site_section_name = site_section_name
self.alias_name = alias_name
self.url = url
def __call__(self, func):
root = getattr(uber.site_sections, self.site_section_name).Root
redirect_func = create_redirect(self.url or '../' + get_module_name(func) + '/' + func.__name__)
setattr(root, self.alias_name or func.__name__, redirect_func)
return func
def attendee_id_required(func):
@wraps(func)
def check_id(*args, **params):
message = "No ID provided. Try using a different link or going back."
session = params['session']
if params.get('id'):
try:
uuid.UUID(params['id'])
except ValueError:
message = "That Attendee ID is not a valid format. Did you enter or edit it manually?"
log.error("check_id: invalid_id: {}", params['id'])
else:
if session.query(sa.Attendee).filter(sa.Attendee.id == params['id']).first():
return func(*args, **params)
message = "The Attendee ID provided was not found in our database"
log.error("check_id: missing_id: {}", params['id'])
log.error("check_id: error: {}", message)
raise HTTPRedirect('../common/invalid?message=%s' % message)
return check_id
| md1024/rams | uber/decorators.py | Python | agpl-3.0 | 18,151 | 0.003251 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import scipy.signal as sp
import random
import nix
import matplotlib.pyplot as plt
COLORS_BLUE_AND_RED = (
'dodgerblue', 'red'
)
COLORS_BLUE_GRADIENT = (
"#034980", "#055DA1", "#1B70E0", "#3786ED", "#4A95F7",
"#0C3663", "#1B4775", "#205082", "#33608F", "#51779E",
"#23B0DB", "#29CDFF", "#57D8FF", "#8FE5FF"
)
class Plotter(object):
"""
Plotter class for nix data arrays.
"""
def __init__(self, width=800, height=600, dpi=90, lines=1, cols=1, facecolor="white",
defaultcolors=COLORS_BLUE_GRADIENT):
"""
:param width: Width of the image in pixels
:param height: Height of the image in pixels
:param dpi: DPI of the image (default 90)
:param lines: Number of vertical subplots
:param cols: Number of horizontal subplots
:param facecolor: The background color of the plot
:param defaultcolors: Defaultcolors that are assigned to lines in each subplot.
"""
self.__width = width
self.__height = height
self.__dpi = dpi
self.__lines = lines
self.__cols = cols
self.__facecolor = facecolor
self.__defaultcolors = defaultcolors
self.__subplot_data = tuple()
for i in range(self.subplot_count):
self.__subplot_data += ([], )
self.__last_figure = None
# properties
@property
def subplot_count(self):
return self.__cols * self.__lines
@property
def subplot_data(self):
return self.__subplot_data
@property
def defaultcolors(self):
return self.__defaultcolors
@property
def last_figure(self):
assert self.__last_figure is not None, "No figure available (method plot has to be called at least once)"
return self.__last_figure
# methods
def save(self, name):
"""
Saves the last figure to the specified location.
:param name: The name of the figure file
"""
self.last_figure.savefig(name)
def add(self, array, subplot=0, color=None, xlim=None, downsample=None, labels=None):
"""
Add a new data array to the plot
:param array: The data array to plot
:param subplot: The index of the subplot where the array should be added (starting with 0)
:param color: The color of the array to plot (if None the next default colors will be assigned)
:param xlim: Start and end of the x-axis limits.
:param downsample: True if the array should be sampled down
:param labels: Data array with labels that should be added to each data point of the array to plot
"""
color = self.__mk_color(color, subplot)
pdata = PlottingData(array, color, subplot, xlim, downsample, labels)
self.subplot_data[subplot].append(pdata)
def plot(self, width=None, height=None, dpi=None, lines=None, cols=None, facecolor=None):
"""
Plots all data arrays added to the plotter.
:param width: Width of the image in pixels
:param height: Height of the image in pixels
:param dpi: DPI of the image (default 90)
:param lines: Number of vertical subplots
:param cols: Number of horizontal subplots
:param facecolor: The background color of the plot
"""
# defaults
width = width or self.__width
height = height or self.__height
dpi = dpi or self.__dpi
lines = lines or self.__lines
cols = cols or self.__cols
facecolor = facecolor or self.__facecolor
# plot
figure, axis_all = plot_make_figure(width, height, dpi, cols, lines, facecolor)
for subplot, pdata_list in enumerate(self.subplot_data):
axis = axis_all[subplot]
pdata_list.sort()
event_like = Plotter.__count_event_like(pdata_list)
signal_like = Plotter.__count_signal_like(pdata_list)
for i, pdata in enumerate(pdata_list):
d1type = pdata.array.dimensions[0].dimension_type
shape = pdata.array.shape
nd = len(shape)
if nd == 1:
if d1type == nix.DimensionType.Set:
second_y = signal_like > 0
hint = (i + 1.0) / (event_like + 1.0) if event_like > 0 else None
plot_array_1d_set(pdata.array, axis, color=pdata.color, xlim=pdata.xlim, labels=pdata.labels,
second_y=second_y, hint=hint)
else:
plot_array_1d(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
elif nd == 2:
if d1type == nix.DimensionType.Set:
plot_array_2d_set(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
else:
plot_array_2d(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
else:
raise Exception('Unsupported data')
axis.legend()
self.__last_figure = figure
# private methods
def __mk_color(self, color, subplot):
"""
If color is None, select one from the defaults or create a random color.
"""
if color is None:
color_count = len(self.defaultcolors)
count = len(self.subplot_data[subplot])
color = self.defaultcolors[count if count < color_count else color_count - 1]
if color == "random":
color = "#%02x%02x%02x" % (random.randint(50, 255), random.randint(50, 255), random.randint(50, 255))
return color
@staticmethod
def __count_signal_like(pdata_list):
sig_types = (nix.DimensionType.Range, nix.DimensionType.Sample)
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if nd == 1 and dims[0].dimension_type in sig_types:
count += 1
elif nd == 2 and dims[0].dimension_type == nix.DimensionType.Set and dims[1].dimension_type in sig_types:
count += 1
return count
@staticmethod
def __count_image_like(pdata_list):
sig_types = (nix.DimensionType.Range, nix.DimensionType.Sample)
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if nd == 2 and dims[0].dimension_type in sig_types and dims[1].dimension_type in sig_types:
count += 1
return count
@staticmethod
def __count_event_like(pdata_list):
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if dims[0].dimension_type == nix.DimensionType.Set:
count += 1
return count
class PlottingData(object):
def __init__(self, array, color, subplot=0, xlim=None, downsample=False, labels=None):
self.array = array
self.dimensions = array.dimensions
self.shape = array.shape
self.rank = len(array.shape)
self.color = color
self.subplot = subplot
self.xlim = xlim
self.downsample = downsample
self.labels = labels
def __cmp__(self, other):
weights = lambda dims: [(1 if d.dimension_type == nix.DimensionType.Sample else 0) for d in dims]
return cmp(weights(self.array.dimensions), weights(other.array.dimensions))
def __lt__(self, other):
return self.__cmp__(other) < 0
def plot_make_figure(width, height, dpi, cols, lines, facecolor):
axis_all = []
figure = plt.figure(facecolor=facecolor, figsize=(width / dpi, height / dpi), dpi=90)
figure.subplots_adjust(wspace=0.3, hspace=0.3, left=0.1, right=0.9, bottom=0.05, top=0.95)
for subplot in range(cols * lines):
axis = figure.add_subplot(lines, cols, subplot+1)
axis.tick_params(direction='out')
axis.spines['top'].set_color('none')
axis.spines['right'].set_color('none')
axis.xaxis.set_ticks_position('bottom')
axis.yaxis.set_ticks_position('left')
axis_all.append(axis)
return figure, axis_all
def plot_array_1d(array, axis, color=None, xlim=None, downsample=None, hint=None, labels=None):
dim = array.dimensions[0]
assert dim.dimension_type in (nix.DimensionType.Sample, nix.DimensionType.Range), "Unsupported data"
y = array[:]
if dim.dimension_type == nix.DimensionType.Sample:
x_start = dim.offset or 0
x = np.arange(0, array.shape[0]) * dim.sampling_interval + x_start
else:
x = np.array(dim.ticks)
if downsample is not None:
x = sp.decimate(x, downsample)
y = sp.decimate(y, downsample)
if xlim is not None:
y = y[(x >= xlim[0]) & (x <= xlim[1])]
x = x[(x >= xlim[0]) & (x <= xlim[1])]
axis.plot(x, y, color, label=array.name)
axis.set_xlabel('%s [%s]' % (dim.label, dim.unit))
axis.set_ylabel('%s [%s]' % (array.label, array.unit))
axis.set_xlim([np.min(x), np.max(x)])
def plot_array_1d_set(array, axis, color=None, xlim=None, hint=None, labels=None, second_y=False):
dim = array.dimensions[0]
assert dim.dimension_type == nix.DimensionType.Set, "Unsupported data"
x = array[:]
z = np.ones_like(x) * 0.8 * (hint or 0.5) + 0.1
if second_y:
ax2 = axis.twinx()
ax2.set_ylim([0, 1])
ax2.scatter(x, z, 50, color, linewidths=2, label=array.name, marker="|")
ax2.set_yticks([])
if labels is not None:
for i, v in enumerate(labels[:]):
ax2.annotate(str(v), (x[i], z[i]))
else:
#x = array[xlim or Ellipsis]
axis.set_ylim([0, 1])
axis.scatter(x, z, 50, color, linewidths=2, label=array.name, marker="|")
axis.set_xlabel('%s [%s]' % (array.label, array.unit))
axis.set_ylabel(array.name)
axis.set_yticks([])
if labels is not None:
for i, v in enumerate(labels[:]):
axis.annotate(str(v), (x[i], z[i]))
def plot_array_2d(array, axis, color=None, xlim=None, downsample=None, hint=None, labels=None):
d1 = array.dimensions[0]
d2 = array.dimensions[1]
d1_type = d1.dimension_type
d2_type = d2.dimension_type
assert d1_type == nix.DimensionType.Sample, "Unsupported data"
assert d2_type == nix.DimensionType.Sample, "Unsupported data"
z = array[:]
x_start = d1.offset or 0
y_start = d2.offset or 0
x_end = x_start + array.shape[0] * d1.sampling_interval
y_end = y_start + array.shape[1] * d2.sampling_interval
axis.imshow(z, origin='lower', extent=[x_start, x_end, y_start, y_end])
axis.set_xlabel('%s [%s]' % (d1.label, d1.unit))
axis.set_ylabel('%s [%s]' % (d2.label, d2.unit))
axis.set_title(array.name)
bar = plt.colorbar()
bar.label('%s [%s]' % (array.label, array.unit))
def plot_array_2d_set(array, axis, color=None, xlim=None, downsample=None, hint=None, labels=None):
d1 = array.dimensions[0]
d2 = array.dimensions[1]
d1_type = d1.dimension_type
d2_type = d2.dimension_type
assert d1_type == nix.DimensionType.Set, "Unsupported data"
assert d2_type == nix.DimensionType.Sample, "Unsupported data"
x_start = d2.offset or 0
x_one = x_start + np.arange(0, array.shape[1]) * d2.sampling_interval
x = np.tile(x_one.reshape(array.shape[1], 1), array.shape[0])
y = array[:]
axis.plot(x, y.T, color=color)
axis.set_title(array.name)
axis.set_xlabel('%s [%s]' % (d2.label, d2.unit))
axis.set_ylabel('%s [%s]' % (array.label, array.unit))
if d1.labels is not None:
axis.legend(d1.labels)
| stoewer/nix-demo | utils/plotting.py | Python | bsd-3-clause | 12,201 | 0.002951 |
from datetime import date, time, timedelta
from decimal import Decimal
import itertools
from django.utils import timezone
from six.moves import xrange
from .models import Person
def get_fixtures(n=None):
"""
Returns `n` dictionaries of `Person` objects.
If `n` is not specified it defaults to 6.
"""
_now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
_date = date(2015, 3, 28)
_time = time(13, 0)
fixtures = [
{
'big_age': 59999999999999999, 'comma_separated_age': '1,2,3',
'age': -99, 'positive_age': 9999, 'positive_small_age': 299,
'small_age': -299, 'certified': False, 'null_certified': None,
'name': 'Mike', 'email': '[email protected]',
'file_path': '/Users/user/fixtures.json', 'slug': 'mike',
'text': 'here is a dummy text',
'url': 'https://docs.djangoproject.com',
'height': Decimal('1.81'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0.3,
'remote_addr': '192.0.2.30', 'my_file': 'dummy.txt',
'image': 'kitten.jpg', 'data': {'name': 'Mike', 'age': -99},
},
{
'big_age': 245999992349999, 'comma_separated_age': '6,2,9',
'age': 25, 'positive_age': 49999, 'positive_small_age': 315,
'small_age': 5409, 'certified': False, 'null_certified': True,
'name': 'Pete', 'email': '[email protected]',
'file_path': 'users.json', 'slug': 'pete', 'text': 'dummy',
'url': 'https://google.com', 'height': Decimal('1.93'),
'date_time': _now, 'date': _date, 'time': _time,
'float_height': 0.5, 'remote_addr': '127.0.0.1',
'my_file': 'fixtures.json',
'data': [{'name': 'Pete'}, {'name': 'Mike'}],
},
{
'big_age': 9929992349999, 'comma_separated_age': '6,2,9,10,5',
'age': 29, 'positive_age': 412399, 'positive_small_age': 23315,
'small_age': -5409, 'certified': False, 'null_certified': True,
'name': 'Ash', 'email': '[email protected]',
'file_path': '/Downloads/kitten.jpg', 'slug': 'ash',
'text': 'bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.78'), 'date_time': _now,
'date': _date, 'time': _time,
'float_height': 0.8, 'my_file': 'dummy.png',
'data': {'text': 'bla bla bla', 'names': ['Mike', 'Pete']},
},
{
'big_age': 9992349234, 'comma_separated_age': '12,29,10,5',
'age': -29, 'positive_age': 4199, 'positive_small_age': 115,
'small_age': 909, 'certified': True, 'null_certified': False,
'name': 'Mary', 'email': '[email protected]',
'file_path': 'dummy.png', 'slug': 'mary',
'text': 'bla bla bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.65'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0,
'remote_addr': '2a02:42fe::4',
'data': {'names': {'name': 'Mary'}},
},
{
'big_age': 999234, 'comma_separated_age': '12,1,30,50',
'age': 1, 'positive_age': 99199, 'positive_small_age': 5,
'small_age': -909, 'certified': False, 'null_certified': False,
'name': 'Sandra', 'email': '[email protected]',
'file_path': '/home/dummy.png', 'slug': 'sandra',
'text': 'this is a dummy text', 'url': 'google.com',
'height': Decimal('1.59'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 2 ** 2,
'image': 'dummy.jpeg', 'data': {},
},
{
'big_age': 9999999999, 'comma_separated_age': '1,100,3,5',
'age': 35, 'positive_age': 1111, 'positive_small_age': 500,
'small_age': 110, 'certified': True, 'null_certified': None,
'name': 'Crystal', 'email': '[email protected]',
'file_path': '/home/dummy.txt', 'slug': 'crystal',
'text': 'dummy text', 'url': 'docs.djangoproject.com',
'height': Decimal('1.71'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 2 ** 10,
'image': 'dummy.png', 'data': [],
},
]
n = n or len(fixtures)
fixtures = itertools.cycle(fixtures)
for _ in xrange(n):
yield next(fixtures)
def create_fixtures(n=None):
"""
Wrapper for Person.bulk_create which creates `n` fixtures
"""
Person.objects.bulk_create(Person(**person)
for person in get_fixtures(n))
| lead-ratings/django-bulk-update | tests/fixtures.py | Python | mit | 4,775 | 0.000209 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.block_store.close()
stop_nodes(self.nodes)
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.block_store = BlockStore(self.options.tmpdir)
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| appop/bitcoin | qa/rpc-tests/bip9-softforks.py | Python | mit | 10,528 | 0.004084 |
from keystone import utils
from keystone.common import wsgi
import keystone.config as config
from keystone.logic.types.tenant import Tenant
from . import get_marker_limit_and_url
class TenantController(wsgi.Controller):
"""Controller for Tenant related operations"""
def __init__(self, options, is_service_operation=None):
self.options = options
self.is_service_operation = is_service_operation
@utils.wrap_error
def create_tenant(self, req):
tenant = utils.get_normalized_request_content(Tenant, req)
return utils.send_result(201, req,
config.SERVICE.create_tenant(utils.get_auth_token(req), tenant))
@utils.wrap_error
def get_tenants(self, req):
tenant_name = req.GET["name"] if "name" in req.GET else None
if tenant_name:
tenant = config.SERVICE.get_tenant_by_name(
utils.get_auth_token(req),
tenant_name)
return utils.send_result(200, req, tenant)
else:
marker, limit, url = get_marker_limit_and_url(req)
tenants = config.SERVICE.get_tenants(utils.get_auth_token(req),
marker, limit, url, self.is_service_operation)
return utils.send_result(200, req, tenants)
@utils.wrap_error
def get_tenant(self, req, tenant_id):
tenant = config.SERVICE.get_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(200, req, tenant)
@utils.wrap_error
def update_tenant(self, req, tenant_id):
tenant = utils.get_normalized_request_content(Tenant, req)
rval = config.SERVICE.update_tenant(utils.get_auth_token(req),
tenant_id, tenant)
return utils.send_result(200, req, rval)
@utils.wrap_error
def delete_tenant(self, req, tenant_id):
rval = config.SERVICE.delete_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(204, req, rval)
| genius1611/Keystone | keystone/controllers/tenant.py | Python | apache-2.0 | 1,975 | 0.002532 |
"""generated file, don't modify or your data will be lost"""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/logilab/__init__.py | Python | agpl-3.0 | 155 | 0 |
import argparse
import sys
import math
from collections import namedtuple
from itertools import count
import gym
import numpy as np
import scipy.optimize
from gym import wrappers
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from torch.autograd import Variable
from models import Policy, Value, ActorCritic
from replay_memory import Memory
from running_state import ZFilter
# from utils import *
torch.set_default_tensor_type('torch.DoubleTensor')
PI = torch.DoubleTensor([3.1415926])
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
# parser.add_argument('--l2_reg', type=float, default=1e-3, metavar='G',
# help='l2 regularization regression (default: 1e-3)')
# parser.add_argument('--max_kl', type=float, default=1e-2, metavar='G',
# help='max kl value (default: 1e-2)')
# parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
# help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='batch size (default: 5000)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--entropy-coeff', type=float, default=0.0, metavar='N',
help='coefficient for entropy cost')
parser.add_argument('--clip-epsilon', type=float, default=0.2, metavar='N',
help='Clipping for PPO grad')
parser.add_argument('--use-joint-pol-val', action='store_true',
help='whether to use combined policy and value nets')
args = parser.parse_args()
env = gym.make(args.env_name)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
env.seed(args.seed)
torch.manual_seed(args.seed)
if args.use_joint_pol_val:
ac_net = ActorCritic(num_inputs, num_actions)
opt_ac = optim.Adam(ac_net.parameters(), lr=0.001)
else:
policy_net = Policy(num_inputs, num_actions)
value_net = Value(num_inputs)
opt_policy = optim.Adam(policy_net.parameters(), lr=0.001)
opt_value = optim.Adam(value_net.parameters(), lr=0.001)
def select_action(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def select_action_actor_critic(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std, v = ac_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * torch.log(2 * Variable(PI)) - log_std
return log_density.sum(1)
def update_params_actor_critic(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
action_means, action_log_stds, action_stds, values = ac_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
# compute probs from actions above
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old, values_old = ac_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
ac_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_ac.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
vf_loss1 = (values - targets).pow(2.)
vpredclipped = values_old + torch.clamp(values - values_old, -args.clip_epsilon, args.clip_epsilon)
vf_loss2 = (vpredclipped - targets).pow(2.)
vf_loss = 0.5 * torch.max(vf_loss1, vf_loss2).mean()
total_loss = policy_surr + vf_loss
total_loss.backward()
torch.nn.utils.clip_grad_norm(ac_net.parameters(), 40)
opt_ac.step()
def update_params(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
opt_value.zero_grad()
value_loss = (values - targets).pow(2.).mean()
value_loss.backward()
opt_value.step()
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
action_means, action_log_stds, action_stds = policy_net(Variable(states))
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old = policy_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
policy_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_policy.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
policy_surr.backward()
torch.nn.utils.clip_grad_norm(policy_net.parameters(), 40)
opt_policy.step()
running_state = ZFilter((num_inputs,), clip=5)
running_reward = ZFilter((1,), demean=False, clip=10)
episode_lengths = []
for i_episode in count(1):
memory = Memory()
num_steps = 0
reward_batch = 0
num_episodes = 0
while num_steps < args.batch_size:
state = env.reset()
state = running_state(state)
reward_sum = 0
for t in range(10000): # Don't infinite loop while learning
if args.use_joint_pol_val:
action = select_action_actor_critic(state)
else:
action = select_action(state)
action = action.data[0].numpy()
next_state, reward, done, _ = env.step(action)
reward_sum += reward
next_state = running_state(next_state)
mask = 1
if done:
mask = 0
memory.push(state, np.array([action]), mask, next_state, reward)
if args.render:
env.render()
if done:
break
state = next_state
num_steps += (t-1)
num_episodes += 1
reward_batch += reward_sum
reward_batch /= num_episodes
batch = memory.sample()
if args.use_joint_pol_val:
update_params_actor_critic(batch)
else:
update_params(batch)
if i_episode % args.log_interval == 0:
print('Episode {}\tLast reward: {}\tAverage reward {:.2f}'.format(
i_episode, reward_sum, reward_batch))
| tpbarron/pytorch-ppo | main.py | Python | mit | 9,755 | 0.003383 |
import json
import pathlib
import sys
import boto3
dist_folder = pathlib.Path.cwd() / 'dist'
try:
f = next(dist_folder.glob('*.whl'))
except StopIteration:
print("No .whl files found in ./dist!")
sys.exit()
print("Uploading", f.name)
s3 = boto3.client('s3')
s3.upload_file(str(f), 'releases.wagtail.io', 'nightly/dist/' + f.name, ExtraArgs={'ACL': 'public-read'})
print("Updating latest.json")
boto3.resource('s3').Object('releases.wagtail.io', 'nightly/latest.json').put(
ACL='public-read',
Body=json.dumps({
"url": 'https://releases.wagtail.io/nightly/dist/' + f.name,
})
)
| kaedroho/wagtail | scripts/nightly/upload.py | Python | bsd-3-clause | 615 | 0.001626 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Authors: Jussi Nurminen <[email protected]>
# License: BSD Style.
import tarfile
import os.path as op
import os
from ...utils import _fetch_file, verbose, _check_option
from ..utils import _get_path, logger, _do_path_update
@verbose
def data_path(dataset='evoked', path=None, force_update=False,
update_path=True, verbose=None):
u"""Get path to local copy of the high frequency SEF dataset.
Gets a local copy of the high frequency SEF MEG dataset [1]_.
Parameters
----------
dataset : 'evoked' | 'raw'
Whether to get the main dataset (evoked, structural and the rest) or
the separate dataset containing raw MEG data only.
path : None | str
Where to look for the HF-SEF data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the HF-SEF dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : str
Local path to the directory where the HF-SEF data is stored.
References
----------
.. [1] Nurminen, J., Paananen, H., Mäkelä, J. (2017): High frequency
somatosensory MEG dataset. https://doi.org/10.5281/zenodo.889234
"""
key = 'MNE_DATASETS_HF_SEF_PATH'
name = 'HF_SEF'
path = _get_path(path, key, name)
destdir = op.join(path, 'HF_SEF')
urls = {'evoked':
'https://zenodo.org/record/3523071/files/hf_sef_evoked.tar.gz',
'raw':
'https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz'}
hashes = {'evoked': '13d34cb5db584e00868677d8fb0aab2b',
'raw': '33934351e558542bafa9b262ac071168'}
_check_option('dataset', dataset, sorted(urls.keys()))
url = urls[dataset]
hash_ = hashes[dataset]
fn = url.split('/')[-1] # pick the filename from the url
archive = op.join(destdir, fn)
# check for existence of evoked and raw sets
has = dict()
subjdir = op.join(destdir, 'subjects')
megdir_a = op.join(destdir, 'MEG', 'subject_a')
has['evoked'] = op.isdir(destdir) and op.isdir(subjdir)
has['raw'] = op.isdir(megdir_a) and any(['raw' in fn_ for fn_ in
os.listdir(megdir_a)])
if not has[dataset] or force_update:
if not op.isdir(destdir):
os.mkdir(destdir)
_fetch_file(url, archive, hash_=hash_)
with tarfile.open(archive) as tar:
logger.info('Decompressing %s' % archive)
for member in tar.getmembers():
# strip the leading dirname 'hf_sef/' from the archive paths
# this should be fixed when making next version of archives
member.name = member.name[7:]
try:
tar.extract(member, destdir)
except IOError:
# check whether file exists but could not be overwritten
fn_full = op.join(destdir, member.name)
if op.isfile(fn_full):
os.remove(fn_full)
tar.extract(member, destdir)
else: # some more sinister cause for IOError
raise
os.remove(archive)
_do_path_update(path, update_path, key, name)
return destdir
| olafhauk/mne-python | mne/datasets/hf_sef/hf_sef.py | Python | bsd-3-clause | 3,751 | 0 |
#!/usr/bin/env python
"""
A simple coin flipping example. The model is written in TensorFlow.
Inspired by Stan's toy example.
Probability model
Prior: Beta
Likelihood: Bernoulli
Inference: Maximum a posteriori
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.stats import bernoulli, beta
class BetaBernoulli:
"""p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)"""
def __init__(self):
self.n_vars = 1
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs, a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
for z in tf.unpack(zs)])
return log_lik + log_prior
ed.set_seed(42)
model = BetaBernoulli()
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}
params = tf.sigmoid(tf.Variable(tf.random_normal([1])))
inference = ed.MAP(model, data, params=params)
inference.run(n_iter=100, n_print=10)
| ruohoruotsi/Wavelet-Tree-Synth | edward-examples/beta_bernoulli_map.py | Python | gpl-2.0 | 1,013 | 0.002962 |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to ultimateonlinecash.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| cryptoprojects/ultimateonlinecash | share/rpcuser/rpcuser.py | Python | mit | 1,125 | 0.005333 |
import time
from subprocess import *
PATH = "/home/richie_rich/OSProj/redis-OS-project/src/redis-cli"
p1 = Popen([PATH], shell=True, stdin=PIPE)
p1.communicate(input="FLUSHALL")
strength = 1000000
rangeVal = strength + 1
string = "set key"
string1 = ""
count = 0
for i in xrange(1,rangeVal):
count = count + 1
string1 = string1 + string + str(i) + " val" + str(i) + "\n"
if (i % 1000) == 0 :
p1 = Popen([PATH], shell=True, stdin=PIPE)
p1.communicate(input=string1)
string = "set key"
string1 = ""
print string1
print "Inserted %d items" %(count)
| richasinha/redis-OS-project | src/setRedis.py | Python | bsd-3-clause | 595 | 0.006723 |
from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer, reactor
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, daemon_getinfo_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.daemon_work.value['bits'].target),
block_value=node.daemon_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, daemon_getinfo_var.value, node.daemon_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('prefix', WebInterface(lambda: node.net.PREFIX.encode('hex')))
web_root.putChild('symbol', WebInterface(lambda: node.net.PARENT.SYMBOL))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=p2pool_data.parse_bip0034(s.share_data['coinbase'])[0],
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.daemon_work.value['bits'].target),
block_value=node.daemon_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
last_txout_nonce='%016x' % share.contents['last_txout_nonce'],
),
other_transaction_hashes=['%064x' % x for x in share.get_other_tx_hashes(node.tracker)],
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
new_root.putChild('my_share_hashes', WebInterface(lambda: ['%064x' % my_share_hash for my_share_hash in wb.my_share_hashes]))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
tx_explorer_url_prefix=node.net.PARENT.TX_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False,
multivalues=True, multivalue_undefined_means_0=True,
default_func=graph.make_multivalue_migrator(dict(good='local_share_hash_rate', dead='local_dead_share_hash_rate', orphan='local_orphan_share_hash_rate'),
post_func=lambda bins: [dict((k, (v[0] - (sum(bin.get(rem_k, (0, 0))[0] for rem_k in ['dead', 'orphan']) if k == 'good' else 0), v[1])) for k, v in bin.iteritems()) for bin in bins])),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'peers': graph.DataStreamDescription(dataview_descriptions, multivalues=True, default_func=graph.make_multivalue_migrator(dict(incoming='incoming_peers', outgoing='outgoing_peers'))),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead, share_hash):
t = time.time()
if not dead:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=work))
else:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=work))
def later():
res = node.tracker.is_child_of(share_hash, node.best_share_var.value)
if res is None: res = False # share isn't connected to sharechain? assume orphaned
if res and dead: # share was DOA, but is now in sharechain
# move from dead to good
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=-work, good=work))
elif not res and not dead: # share wasn't DOA, and isn't in sharechain
# move from good to orphan
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=-work, orphan=work))
reactor.callLater(200, later)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['peers'].add_datum(t, dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.daemon_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'web-static')))
return web_root
| andreasfaerber/p2pool-feathercoin | p2pool/web.py | Python | gpl-3.0 | 25,473 | 0.00687 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for the (WS) SOAP I{rpc/literal} and I{rpc/encoded} bindings.
"""
from logging import getLogger
from tornado_suds import *
from tornado_suds.mx.encoded import Encoded as MxEncoded
from tornado_suds.umx.encoded import Encoded as UmxEncoded
from tornado_suds.bindings.binding import Binding, envns
from tornado_suds.sax.element import Element
log = getLogger(__name__)
encns = ('SOAP-ENC', 'http://schemas.xmlsoap.org/soap/encoding/')
class RPC(Binding):
"""
RPC/Literal binding style.
"""
def param_defs(self, method):
return self.bodypart_types(method)
def envelope(self, header, body):
env = Binding.envelope(self, header, body)
env.addPrefix(encns[0], encns[1])
env.set('%s:encodingStyle' % envns[0],
'http://schemas.xmlsoap.org/soap/encoding/')
return env
def bodycontent(self, method, args, kwargs):
n = 0
root = self.method(method)
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
p = self.mkparam(method, pd, value)
if p is not None:
root.append(p)
n += 1
return root
def replycontent(self, method, body):
return body[0].children
def method(self, method):
"""
Get the document root. For I{rpc/(literal|encoded)}, this is the
name of the method qualifed by the schema tns.
@param method: A service method.
@type method: I{service.Method}
@return: A root element.
@rtype: L{Element}
"""
ns = method.soap.input.body.namespace
if ns[0] is None:
ns = ('ns0', ns[1])
method = Element(method.name, ns=ns)
return method
class Encoded(RPC):
"""
RPC/Encoded (section 5) binding style.
"""
def marshaller(self):
return MxEncoded(self.schema())
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxEncoded(self.schema())
else:
return RPC.unmarshaller(self, typed)
| richshaffer/tornado-suds | tornado_suds/bindings/rpc.py | Python | lgpl-3.0 | 3,181 | 0.002829 |
#!/usr/bin/env python3
from lmap import ldap
from getpass import getpass
import threading
pw = getpass()
def bind_fnord(num):
def do_teh_action():
ld = ldap.ldap('ldap://emmi.physik-pool.tu-berlin.de/')
ld.simple_bind('uid=jaseg,ou=people,ou=pcpool,ou=physik,o=tu-berlin,c=de', pw)
print(num, len(ld.search('ou=people,ou=pcpool,ou=physik,o=tu-berlin,c=de', filter='uid=jaseg')))
return do_teh_action
for i in range(100):
t = threading.Thread(target = bind_fnord(i))
t.start()
| jaseg/python-lmap | testfnord.py | Python | bsd-2-clause | 490 | 0.028571 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class Proj2Pipeline(object):
def process_item(self, item, spider):
return item
| passByReference/webcrawler | proj2/proj2/pipelines.py | Python | apache-2.0 | 285 | 0 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
WTF_CSRF_ENABLED = True
SECRET_KEY = '33stanlake#'
DEBUG = True
APP_TITLE = 'Cloud of Reproducible Records API'
VERSION = '0.1-dev'
MONGODB_SETTINGS = {
'db': 'corr-production',
'host': '0.0.0.0',
'port': 27017
}
# STORMPATH_API_KEY_FILE = '~/.stormpath/apiKey.properties'
# STORMPATH_APPLICATION = 'sumatra-cloud'
# STORMPATH_REDIRECT_URL = '/dashboard'
| wd15/corr | corr-api/config.py | Python | mit | 436 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PagesTransport(abc.ABC):
"""Abstract transport class for Pages."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_pages: gapic_v1.method.wrap_method(
self.list_pages, default_timeout=None, client_info=client_info,
),
self.get_page: gapic_v1.method.wrap_method(
self.get_page, default_timeout=None, client_info=client_info,
),
self.create_page: gapic_v1.method.wrap_method(
self.create_page, default_timeout=None, client_info=client_info,
),
self.update_page: gapic_v1.method.wrap_method(
self.update_page, default_timeout=None, client_info=client_info,
),
self.delete_page: gapic_v1.method.wrap_method(
self.delete_page, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_pages(
self,
) -> Callable[
[page.ListPagesRequest],
Union[page.ListPagesResponse, Awaitable[page.ListPagesResponse]],
]:
raise NotImplementedError()
@property
def get_page(
self,
) -> Callable[[page.GetPageRequest], Union[page.Page, Awaitable[page.Page]]]:
raise NotImplementedError()
@property
def create_page(
self,
) -> Callable[
[gcdc_page.CreatePageRequest], Union[gcdc_page.Page, Awaitable[gcdc_page.Page]]
]:
raise NotImplementedError()
@property
def update_page(
self,
) -> Callable[
[gcdc_page.UpdatePageRequest], Union[gcdc_page.Page, Awaitable[gcdc_page.Page]]
]:
raise NotImplementedError()
@property
def delete_page(
self,
) -> Callable[
[page.DeletePageRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
]:
raise NotImplementedError()
__all__ = ("PagesTransport",)
| googleapis/python-dialogflow-cx | google/cloud/dialogflowcx_v3beta1/services/pages/transports/base.py | Python | apache-2.0 | 7,215 | 0.001663 |
"""
Functions to fix fonts so they conform to the Google Fonts
specification:
https://github.com/googlefonts/gf-docs/tree/main/Spec
"""
from fontTools.misc.fixedTools import otRound
from fontTools.ttLib import TTFont, newTable, getTableModule
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
from fontTools.ttLib.tables._f_v_a_r import NamedInstance
from gftools.util.google_fonts import _KNOWN_WEIGHTS
from gftools.utils import (
download_family_from_Google_Fonts,
Google_Fonts_has_family,
font_stylename,
font_familyname,
family_bounding_box,
get_unencoded_glyphs,
normalize_unicode_marks,
partition_cmap,
typo_metrics_enabled,
validate_family,
unique_name,
)
from gftools.stat import gen_stat_tables
from os.path import basename, splitext
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
__all__ = [
"remove_tables",
"add_dummy_dsig",
"fix_unhinted_font",
"fix_hinted_font",
"fix_fs_type",
"fix_weight_class",
"fix_fs_selection",
"fix_mac_style",
"fix_fvar_instances",
"update_nametable",
"fix_nametable",
"inherit_vertical_metrics",
"fix_vertical_metrics",
"fix_ascii_fontmetadata",
"drop_nonpid0_cmap",
"drop_mac_cmap",
"fix_pua",
"fix_isFixedPitch",
"drop_mac_names",
"drop_superfluous_mac_names",
"fix_font",
"fix_family",
"rename_font",
"fix_filename"
]
# The _KNOWN_WEIGHT_VALUES constant is used internally by the GF Engineering
# team so we cannot update ourselves. TODO (Marc F) unify this one day
WEIGHT_NAMES = _KNOWN_WEIGHTS
del WEIGHT_NAMES[""]
WEIGHT_NAMES["Hairline"] = 1
WEIGHT_NAMES["ExtraBlack"] = 1000
WEIGHT_VALUES = {v: k for k, v in WEIGHT_NAMES.items()}
UNWANTED_TABLES = frozenset(
[
"FFTM",
"TTFA",
"TSI0",
"TSI1",
"TSI2",
"TSI3",
"TSI5",
"prop",
"MVAR",
"Debg",
]
)
def remove_tables(ttFont, tables=None):
"""Remove unwanted tables from a font. The unwanted tables must belong
to the UNWANTED_TABLES set.
Args:
ttFont: a TTFont instance
tables: an iterable containing tables remove
"""
tables_to_remove = UNWANTED_TABLES if not tables else frozenset(tables)
font_tables = frozenset(ttFont.keys())
tables_not_in_font = tables_to_remove - font_tables
if tables_not_in_font:
log.warning(
f"Cannot remove tables '{list(tables_not_in_font)}' since they are "
f"not in the font."
)
required_tables = tables_to_remove - UNWANTED_TABLES
if required_tables:
log.warning(
f"Cannot remove tables '{list(required_tables)}' since they are required"
)
tables_to_remove = UNWANTED_TABLES & font_tables & tables_to_remove
if not tables_to_remove:
return
log.info(f"Removing tables '{list(tables_to_remove)}' from font")
for tbl in tables_to_remove:
del ttFont[tbl]
def add_dummy_dsig(ttFont):
"""Add a dummy dsig table to a font. Older versions of MS Word
require this table.
Args:
ttFont: a TTFont instance
"""
newDSIG = newTable("DSIG")
newDSIG.ulVersion = 1
newDSIG.usFlag = 0
newDSIG.usNumSigs = 0
newDSIG.signatureRecords = []
ttFont.tables["DSIG"] = newDSIG
def fix_unhinted_font(ttFont):
"""Improve the appearance of an unhinted font on Win platforms by:
- Add a new GASP table with a newtable that has a single
range which is set to smooth.
- Add a new prep table which is optimized for unhinted fonts.
Args:
ttFont: a TTFont instance
"""
gasp = newTable("gasp")
# Set GASP so all sizes are smooth
gasp.gaspRange = {0xFFFF: 15}
program = ttProgram.Program()
assembly = ["PUSHW[]", "511", "SCANCTRL[]", "PUSHB[]", "4", "SCANTYPE[]"]
program.fromAssembly(assembly)
prep = newTable("prep")
prep.program = program
ttFont["gasp"] = gasp
ttFont["prep"] = prep
def fix_hinted_font(ttFont):
"""Improve the appearance of a hinted font on Win platforms by enabling
the head table's flag 3.
Args:
ttFont: a TTFont instance
"""
if not 'fpgm' in ttFont:
return False, ["Skipping. Font is not hinted."]
old = ttFont["head"].flags
ttFont["head"].flags |= 1 << 3
return ttFont["head"].flags != old
def fix_fs_type(ttFont):
"""Set the OS/2 table's fsType flag to 0 (Installable embedding).
Args:
ttFont: a TTFont instance
"""
old = ttFont["OS/2"].fsType
ttFont["OS/2"].fsType = 0
return old != 0
def fix_weight_class(ttFont):
"""Set the OS/2 table's usWeightClass so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
old_weight_class = ttFont["OS/2"].usWeightClass
if 'fvar' in ttFont:
fvar = ttFont['fvar']
default_axis_values = {a.axisTag: a.defaultValue for a in fvar.axes}
v = default_axis_values.get('wght', None)
if v is not None:
ttFont["OS/2"].usWeightClass = int(v)
return ttFont["OS/2"].usWeightClass != old_weight_class
stylename = font_stylename(ttFont)
tokens = stylename.split()
# Order WEIGHT_NAMES so longest names are first
for style in sorted(WEIGHT_NAMES, key=lambda k: len(k), reverse=True):
if style in tokens:
ttFont["OS/2"].usWeightClass = WEIGHT_NAMES[style]
return ttFont["OS/2"].usWeightClass != old_weight_class
if "Italic" in tokens:
ttFont["OS/2"].usWeightClass = 400
return ttFont["OS/2"].usWeightClass != old_weight_class
raise ValueError(
f"Cannot determine usWeightClass because font style, '{stylename}' "
f"doesn't have a weight token which is in our known "
f"weights, '{WEIGHT_NAMES.keys()}'"
)
def fix_fs_selection(ttFont):
"""Fix the OS/2 table's fsSelection so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
stylename = font_stylename(ttFont)
tokens = set(stylename.split())
old_selection = fs_selection = ttFont["OS/2"].fsSelection
# turn off all bits except for bit 7 (USE_TYPO_METRICS)
fs_selection &= 1 << 7
if "Italic" in tokens:
fs_selection |= 1 << 0
if "Bold" in tokens:
fs_selection |= 1 << 5
# enable Regular bit for all other styles
if not tokens & set(["Bold", "Italic"]):
fs_selection |= 1 << 6
ttFont["OS/2"].fsSelection = fs_selection
return old_selection != fs_selection
def fix_mac_style(ttFont):
"""Fix the head table's macStyle so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
stylename = font_stylename(ttFont)
tokens = set(stylename.split())
mac_style = 0
if "Italic" in tokens:
mac_style |= 1 << 1
if "Bold" in tokens:
mac_style |= 1 << 0
ttFont["head"].macStyle = mac_style
def fix_fvar_instances(ttFont):
"""Replace a variable font's fvar instances with a set of new instances
that conform to the Google Fonts instance spec:
https://github.com/googlefonts/gf-docs/tree/main/Spec#fvar-instances
Args:
ttFont: a TTFont instance
"""
if "fvar" not in ttFont:
raise ValueError("ttFont is not a variable font")
fvar = ttFont["fvar"]
default_axis_vals = {a.axisTag: a.defaultValue for a in fvar.axes}
stylename = font_stylename(ttFont)
is_italic = "Italic" in stylename
is_roman_and_italic = any(a for a in ("slnt", "ital") if a in default_axis_vals)
wght_axis = next((a for a in fvar.axes if a.axisTag == "wght"), None)
wght_min = int(wght_axis.minValue)
wght_max = int(wght_axis.maxValue)
nametable = ttFont["name"]
def gen_instances(is_italic):
results = []
for wght_val in range(wght_min, wght_max + 100, 100):
name = (
WEIGHT_VALUES[wght_val]
if not is_italic
else f"{WEIGHT_VALUES[wght_val]} Italic".strip()
)
name = name.replace("Regular Italic", "Italic")
coordinates = deepcopy(default_axis_vals)
coordinates["wght"] = wght_val
inst = NamedInstance()
inst.subfamilyNameID = nametable.addName(name)
inst.coordinates = coordinates
results.append(inst)
return results
instances = []
if is_roman_and_italic:
for bool_ in (False, True):
instances += gen_instances(is_italic=bool_)
elif is_italic:
instances += gen_instances(is_italic=True)
else:
instances += gen_instances(is_italic=False)
fvar.instances = instances
def update_nametable(ttFont, family_name=None, style_name=None):
"""Update a static font's name table. The updated name table will conform
to the Google Fonts support styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
If a style_name includes tokens other than wght and ital, these tokens
will be appended to the family name e.g
Input:
family_name="MyFont"
style_name="SemiCondensed SemiBold"
Output:
familyName (nameID 1) = "MyFont SemiCondensed SemiBold
subFamilyName (nameID 2) = "Regular"
typo familyName (nameID 16) = "MyFont SemiCondensed"
typo subFamilyName (nameID 17) = "SemiBold"
Google Fonts has used this model for several years e.g
https://fonts.google.com/?query=cabin
Args:
ttFont:
family_name: New family name
style_name: New style name
"""
if "fvar" in ttFont:
raise ValueError("Cannot update the nametable for a variable font")
nametable = ttFont["name"]
# Remove nametable records which are not Win US English
# TODO this is too greedy. We should preserve multilingual
# names in the future. Please note, this has always been an issue.
platforms = set()
for rec in nametable.names:
platforms.add((rec.platformID, rec.platEncID, rec.langID))
platforms_to_remove = platforms ^ set([(3, 1, 0x409)])
if platforms_to_remove:
log.warning(
f"Removing records which are not Win US English, {list(platforms_to_remove)}"
)
for platformID, platEncID, langID in platforms_to_remove:
nametable.removeNames(
platformID=platformID, platEncID=platEncID, langID=langID
)
# Remove any name records which contain linebreaks
contains_linebreaks = []
for r in nametable.names:
for char in ("\n", "\r"):
if char in r.toUnicode():
contains_linebreaks.append(r.nameID)
for nameID in contains_linebreaks:
nametable.removeNames(nameID)
if not family_name:
family_name = font_familyname(ttFont)
if not style_name:
style_name = font_stylename(ttFont)
ribbi = ("Regular", "Bold", "Italic", "Bold Italic")
tokens = family_name.split() + style_name.split()
nameids = {
1: " ".join(t for t in tokens if t not in ribbi),
2: " ".join(t for t in tokens if t in ribbi) or "Regular",
16: " ".join(t for t in tokens if t not in list(WEIGHT_NAMES) + ['Italic']),
17: " ".join(t for t in tokens if t in list(WEIGHT_NAMES) + ['Italic']) or "Regular"
}
# Remove typo name if they match since they're redundant
if nameids[16] == nameids[1]:
del nameids[16]
if nameids[17] == nameids[2]:
del nameids[17]
family_name = nameids.get(16) or nameids.get(1)
style_name = nameids.get(17) or nameids.get(2)
# create NameIDs 3, 4, 6
nameids[4] = f"{family_name} {style_name}"
nameids[6] = f"{family_name.replace(' ', '')}-{style_name.replace(' ', '')}"
nameids[3] = unique_name(ttFont, nameids)
# Pass through all records and replace occurences of the old family name
# with the new family name
current_family_name = font_familyname(ttFont)
for record in nametable.names:
string = record.toUnicode()
if current_family_name in string:
nametable.setName(
string.replace(current_family_name, family_name),
record.nameID,
record.platformID,
record.platEncID,
record.langID,
)
# Remove previous typographic names
for nameID in (16, 17):
nametable.removeNames(nameID=nameID)
# Update nametable with new names
for nameID, string in nameids.items():
nametable.setName(string, nameID, 3, 1, 0x409)
def fix_nametable(ttFont):
"""Fix a static font's name table so it conforms to the Google Fonts
supported styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
if "fvar" in ttFont:
from fontTools.varLib.instancer.names import updateNameTable
dflt_axes = {a.axisTag: a.defaultValue for a in ttFont['fvar'].axes}
updateNameTable(ttFont, dflt_axes)
return
family_name = font_familyname(ttFont)
style_name = font_stylename(ttFont)
update_nametable(ttFont, family_name, style_name)
def rename_font(font, new_name):
nametable = font["name"]
current_name = font_familyname(font)
if not current_name:
raise Exception(
"Name table does not contain nameID 1 or nameID 16. "
"This tool does not work on webfonts."
)
log.info("Updating font name records")
for record in nametable.names:
record_string = record.toUnicode()
no_space = current_name.replace(" ", "")
hyphenated = current_name.replace(" ", "-")
if " " not in record_string:
new_string = record_string.replace(no_space, new_name.replace(" ", ""))
else:
new_string = record_string.replace(current_name, new_name)
if new_string is not record_string:
record_info = (
record.nameID,
record.platformID,
record.platEncID,
record.langID
)
log.info(
"Updating {}: '{}' to '{}'".format(
record_info,
record_string,
new_string,
)
)
record.string = new_string
def fix_filename(ttFont):
ext = splitext(ttFont.reader.file.name)[1]
family_name = font_familyname(ttFont)
style_name = font_stylename(ttFont)
if "fvar" in ttFont:
axes = ",".join([a.axisTag for a in ttFont['fvar'].axes])
if "Italic" in style_name:
return f"{family_name}-Italic[{axes}]{ext}".replace(" ", "")
return f"{family_name}[{axes}]{ext}".replace(" ", "")
return f"{family_name}-{style_name}{ext}".replace(" ", "")
def inherit_vertical_metrics(ttFonts, family_name=None):
"""Inherit the vertical metrics from the same family which is
hosted on Google Fonts.
Args:
ttFonts: a list of TTFont instances which belong to a family
family_name: Optional string which allows users to specify a
different family to inherit from e.g "Maven Pro".
"""
family_name = font_familyname(ttFonts[0]) if not family_name else family_name
gf_fonts = list(map(TTFont, download_family_from_Google_Fonts(family_name)))
gf_fonts = {font_stylename(f): f for f in gf_fonts}
# TODO (Marc F) use Regular font instead. If VF use font which has Regular
# instance
gf_fallback = list(gf_fonts.values())[0]
fonts = {font_stylename(f): f for f in ttFonts}
for style, font in fonts.items():
if style in gf_fonts:
src_font = gf_fonts[style]
else:
src_font = gf_fallback
copy_vertical_metrics(src_font, font)
if typo_metrics_enabled(src_font):
font["OS/2"].fsSelection |= 1 << 7
def fix_vertical_metrics(ttFonts):
"""Fix a family's vertical metrics based on:
https://github.com/googlefonts/gf-docs/tree/main/VerticalMetrics
Args:
ttFonts: a list of TTFont instances which belong to a family
"""
src_font = next((f for f in ttFonts if font_stylename(f) == "Regular"), ttFonts[0])
# TODO (Marc F) CJK Fonts?
# If OS/2.fsSelection bit 7 isn't enabled, enable it and set the typo metrics
# to the previous win metrics.
if not typo_metrics_enabled(src_font):
src_font["OS/2"].fsSelection |= 1 << 7 # enable USE_TYPO_METRICS
src_font["OS/2"].sTypoAscender = src_font["OS/2"].usWinAscent
src_font["OS/2"].sTypoDescender = -src_font["OS/2"].usWinDescent
src_font["OS/2"].sTypoLineGap = 0
# Set the hhea metrics so they are the same as the typo
src_font["hhea"].ascent = src_font["OS/2"].sTypoAscender
src_font["hhea"].descent = src_font["OS/2"].sTypoDescender
src_font["hhea"].lineGap = src_font["OS/2"].sTypoLineGap
# Set the win Ascent and win Descent to match the family's bounding box
win_desc, win_asc = family_bounding_box(ttFonts)
src_font["OS/2"].usWinAscent = win_asc
src_font["OS/2"].usWinDescent = abs(win_desc)
# Set all fonts vertical metrics so they match the src_font
for ttFont in ttFonts:
ttFont["OS/2"].fsSelection |= 1 << 7
copy_vertical_metrics(src_font, ttFont)
def copy_vertical_metrics(src_font, dst_font):
for table, key in [
("OS/2", "usWinAscent"),
("OS/2", "usWinDescent"),
("OS/2", "sTypoAscender"),
("OS/2", "sTypoDescender"),
("OS/2", "sTypoLineGap"),
("hhea", "ascent"),
("hhea", "descent"),
("hhea", "lineGap"),
]:
val = getattr(src_font[table], key)
setattr(dst_font[table], key, val)
def fix_italic_angle(ttFont):
style_name = font_stylename(ttFont)
if "Italic" not in style_name and ttFont["post"].italicAngle != 0:
ttFont["post"].italicAngle = 0
# TODO (Marc F) implement for italic fonts
def fix_ascii_fontmetadata(font):
"""Fixes TTF 'name' table strings to be ascii only"""
for name in font['name'].names:
title = name.string.decode(name.getEncoding())
title = normalize_unicode_marks(title)
name.string = title.encode(name.getEncoding())
def convert_cmap_subtables_to_v4(font):
"""Converts all cmap subtables to format 4.
Returns a list of tuples (format, platformID, platEncID) of the tables
which needed conversion."""
cmap = font['cmap']
outtables = []
converted = []
for table in cmap.tables:
if table.format != 4:
converted.append((table.format, table.platformID, table.platEncID))
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
font['cmap'].tables = outtables
return converted
def drop_nonpid0_cmap(font, report=True):
keep, drop = partition_cmap(font, lambda table: table.platformID == 0, report)
return drop
def drop_mac_cmap(font, report=True):
keep, drop = partition_cmap(font, lambda table: table.platformID != 1 or table.platEncID != 0, report)
return drop
def fix_pua(font):
unencoded_glyphs = get_unencoded_glyphs(font)
if not unencoded_glyphs:
return
ucs2cmap = None
cmap = font["cmap"]
# Check if an UCS-2 cmap exists
for ucs2cmapid in ((3, 1), (0, 3), (3, 0)):
ucs2cmap = cmap.getcmap(ucs2cmapid[0], ucs2cmapid[1])
if ucs2cmap:
break
# Create UCS-4 cmap and copy the contents of UCS-2 cmap
# unless UCS 4 cmap already exists
ucs4cmap = cmap.getcmap(3, 10)
if not ucs4cmap:
cmapModule = getTableModule('cmap')
ucs4cmap = cmapModule.cmap_format_12(12)
ucs4cmap.platformID = 3
ucs4cmap.platEncID = 10
ucs4cmap.language = 0
if ucs2cmap:
ucs4cmap.cmap = deepcopy(ucs2cmap.cmap)
cmap.tables.append(ucs4cmap)
# Map all glyphs to UCS-4 cmap Supplementary PUA-A codepoints
# by 0xF0000 + glyphID
ucs4cmap = cmap.getcmap(3, 10)
for glyphID, glyph in enumerate(font.getGlyphOrder()):
if glyph in unencoded_glyphs:
ucs4cmap.cmap[0xF0000 + glyphID] = glyph
font['cmap'] = cmap
return True
def fix_isFixedPitch(ttfont):
same_width = set()
glyph_metrics = ttfont['hmtx'].metrics
messages = []
changed = False
for character in [chr(c) for c in range(65, 91)]:
same_width.add(glyph_metrics[character][0])
if len(same_width) == 1:
if ttfont['post'].isFixedPitch == 1:
messages.append("Skipping isFixedPitch is set correctly")
else:
messages.append("Font is monospace. Updating isFixedPitch to 0")
ttfont['post'].isFixedPitch = 1
changed = True
familyType = ttfont['OS/2'].panose.bFamilyType
if familyType == 2:
expected = 9
elif familyType == 3 or familyType == 5:
expected = 3
elif familyType == 0:
messages.append("Font is monospace but panose fields seems to be not set."
" Setting values to defaults (FamilyType = 2, Proportion = 9).")
ttfont['OS/2'].panose.bFamilyType = 2
ttfont['OS/2'].panose.bProportion = 9
changed = True
expected = None
else:
expected = None
if expected:
if ttfont['OS/2'].panose.bProportion == expected:
messages.append("Skipping OS/2.panose.bProportion is set correctly")
else:
messages.append(("Font is monospace."
" Since OS/2.panose.bFamilyType is {}"
" we're updating OS/2.panose.bProportion"
" to {}").format(familyType, expected))
ttfont['OS/2'].panose.bProportion = expected
changed = True
widths = [m[0] for m in ttfont['hmtx'].metrics.values() if m[0] > 0]
width_max = max(widths)
if ttfont['hhea'].advanceWidthMax == width_max:
messages.append("Skipping hhea.advanceWidthMax is set correctly")
else:
messsages.append("Font is monospace. Updating hhea.advanceWidthMax to %i" %
width_max)
ttfont['hhea'].advanceWidthMax = width_max
changed = True
avg_width = otRound(sum(widths) / len(widths))
if avg_width == ttfont['OS/2'].xAvgCharWidth:
messages.append("Skipping OS/2.xAvgCharWidth is set correctly")
else:
messages.append("Font is monospace. Updating OS/2.xAvgCharWidth to %i" %
avg_width)
ttfont['OS/2'].xAvgCharWidth = avg_width
changed = True
else:
if ttfont['post'].isFixedPitch != 0 or ttfont['OS/2'].panose.bProportion != 0:
changed = True
ttfont['post'].isFixedPitch = 0
ttfont['OS/2'].panose.bProportion = 0
return changed, messages
def drop_superfluous_mac_names(ttfont):
"""Drop superfluous Mac nameIDs.
The following nameIDS are kept:
1: Font Family name,
2: Font Family Subfamily name,
3: Unique font identifier,
4: Full font name,
5: Version string,
6: Postscript name,
16: Typographic family name,
17: Typographic Subfamily name
18: Compatible full (Macintosh only),
20: PostScript CID,
21: WWS Family Name,
22: WWS Subfamily Name,
25: Variations PostScript Name Prefix.
We keep these IDs in order for certain application to still function
such as Word 2011. IDs 1-6 are very common, > 16 are edge cases.
https://www.microsoft.com/typography/otspec/name.htm"""
keep_ids = [1, 2, 3, 4, 5, 6, 16, 17, 18, 20, 21, 22, 25]
changed = False
for n in range(255):
if n not in keep_ids:
name = ttfont['name'].getName(n, 1, 0, 0)
if name:
changed = True
ttfont['name'].names.remove(name)
return changed
def drop_mac_names(ttfont):
"""Drop all mac names"""
changed = False
for n in range(255):
name = ttfont['name'].getName(n, 1, 0, 0)
if name:
ttfont['name'].names.remove(name)
changed = True
return changed
def fix_font(font, include_source_fixes=False, new_family_name=None):
if new_family_name:
rename_font(font, new_family_name)
font["OS/2"].version = 4
if "fpgm" in font:
fix_hinted_font(font)
else:
fix_unhinted_font(font)
if "fvar" in font:
remove_tables(font, ["MVAR"])
if include_source_fixes:
log.warning(
"include-source-fixes is enabled. Please consider fixing the "
"source files instead."
)
remove_tables(font)
fix_nametable(font)
fix_fs_type(font)
fix_fs_selection(font)
fix_mac_style(font)
fix_weight_class(font)
fix_italic_angle(font)
if "fvar" in font:
fix_fvar_instances(font)
def fix_family(fonts, include_source_fixes=False, new_family_name=None):
"""Fix all fonts in a family"""
validate_family(fonts)
for font in fonts:
fix_font(
font,
include_source_fixes=include_source_fixes,
new_family_name=new_family_name
)
family_name = font_familyname(fonts[0])
if include_source_fixes:
try:
if Google_Fonts_has_family(family_name):
inherit_vertical_metrics(fonts)
else:
log.warning(
f"{family_name} is not on Google Fonts. Skipping "
"regression fixes"
)
except FileNotFoundError:
log.warning(
f"Google Fonts api key not found so we can't regression "
"fix fonts. See Repo readme to add keys."
)
fix_vertical_metrics(fonts)
if all(["fvar" in f for f in fonts]):
gen_stat_tables(fonts, ["opsz", "wdth", "wght", "ital", "slnt"])
class FontFixer():
def __init__(self, path, report=True, verbose=False, **kwargs):
self.font = TTFont(path)
self.path = path
self.font_filename = basename(path)
self.saveit = False
self.report = report
self.verbose = verbose
self.messages = []
self.args = kwargs
self.fixes = []
if "fixes" in kwargs:
self.fixes = kwargs["fixes"]
def __del__(self):
if self.report:
print("\n".join(self.messages))
if self.saveit:
if self.verbose:
print('Saving %s to %s.fix' % (self.font_filename, self.path))
self.font.save(self.path + ".fix")
elif self.verbose:
print('There were no changes needed on %s!' % self.font_filename)
def show(self):
pass
def fix(self):
for f in self.fixes:
rv = f(self.font)
if isinstance(rv, tuple) and len(rv) == 2:
changed, messages = rv
self.messages.extend(messages)
else:
changed = rv
if changed:
self.saveit = True
class GaspFixer(FontFixer):
def fix(self, value=15):
try:
table = self.font.get('gasp')
table.gaspRange[65535] = value
self.saveit = True
except:
print(('ER: {}: no table gasp... '
'Creating new table. ').format(self.path))
table = ttLib.newTable('gasp')
table.gaspRange = {65535: value}
self.font['gasp'] = table
self.saveit = True
def show(self):
try:
self.font.get('gasp')
except:
print('ER: {}: no table gasp'.format(self.path))
return
try:
print(self.font.get('gasp').gaspRange[65535])
except IndexError:
print('ER: {}: no index 65535'.format(self.path))
| googlefonts/gftools | Lib/gftools/fix.py | Python | apache-2.0 | 28,567 | 0.001505 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsratecontrol(base_resource) :
""" Configuration for rate control resource. """
def __init__(self) :
self._tcpthreshold = 0
self._udpthreshold = 0
self._icmpthreshold = 0
self._tcprstthreshold = 0
@property
def tcpthreshold(self) :
ur"""Number of SYNs permitted per 10 milliseconds.
"""
try :
return self._tcpthreshold
except Exception as e:
raise e
@tcpthreshold.setter
def tcpthreshold(self, tcpthreshold) :
ur"""Number of SYNs permitted per 10 milliseconds.
"""
try :
self._tcpthreshold = tcpthreshold
except Exception as e:
raise e
@property
def udpthreshold(self) :
ur"""Number of UDP packets permitted per 10 milliseconds.
"""
try :
return self._udpthreshold
except Exception as e:
raise e
@udpthreshold.setter
def udpthreshold(self, udpthreshold) :
ur"""Number of UDP packets permitted per 10 milliseconds.
"""
try :
self._udpthreshold = udpthreshold
except Exception as e:
raise e
@property
def icmpthreshold(self) :
ur"""Number of ICMP packets permitted per 10 milliseconds.<br/>Default value: 100.
"""
try :
return self._icmpthreshold
except Exception as e:
raise e
@icmpthreshold.setter
def icmpthreshold(self, icmpthreshold) :
ur"""Number of ICMP packets permitted per 10 milliseconds.<br/>Default value: 100
"""
try :
self._icmpthreshold = icmpthreshold
except Exception as e:
raise e
@property
def tcprstthreshold(self) :
ur"""The number of TCP RST packets permitted per 10 milli second. zero means rate control is disabled and 0xffffffff means every thing is rate controlled.<br/>Default value: 100.
"""
try :
return self._tcprstthreshold
except Exception as e:
raise e
@tcprstthreshold.setter
def tcprstthreshold(self, tcprstthreshold) :
ur"""The number of TCP RST packets permitted per 10 milli second. zero means rate control is disabled and 0xffffffff means every thing is rate controlled.<br/>Default value: 100
"""
try :
self._tcprstthreshold = tcprstthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsratecontrol_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsratecontrol
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update nsratecontrol.
"""
try :
if type(resource) is not list :
updateresource = nsratecontrol()
updateresource.tcpthreshold = resource.tcpthreshold
updateresource.udpthreshold = resource.udpthreshold
updateresource.icmpthreshold = resource.icmpthreshold
updateresource.tcprstthreshold = resource.tcprstthreshold
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of nsratecontrol resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nsratecontrol()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the nsratecontrol resources that are configured on netscaler.
"""
try :
if not name :
obj = nsratecontrol()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class nsratecontrol_response(base_response) :
def __init__(self, length=1) :
self.nsratecontrol = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsratecontrol = [nsratecontrol() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsratecontrol.py | Python | apache-2.0 | 5,380 | 0.036059 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class TypedObjectPlugTest( GafferTest.TestCase ) :
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
def testSerialisationWithConnection( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["t2"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, direction=Gaffer.Plug.Direction.Out )
s["n"]["t"].setInput( s["n2"]["t2"] )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].getInput().isSame( s2["n2"]["t2"] ) )
def testDefaultValue( self ) :
p = Gaffer.ObjectPlug( "p", defaultValue = IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( p.defaultValue(), IECore.IntVectorData( [ 1, 2, 3 ] ) )
def testRunTimeTyped( self ) :
self.assertEqual( IECore.RunTimeTyped.baseTypeId( Gaffer.ObjectPlug.staticTypeId() ), Gaffer.ValuePlug.staticTypeId() )
def testAcceptsNoneInput( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.failUnless( p.acceptsInput( None ) )
def testBoolVectorDataPlug( self ) :
p = Gaffer.BoolVectorDataPlug( "p", defaultValue = IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.defaultValue(), IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ True, False ] ) )
p.setValue( IECore.BoolVectorData( [ False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ False ] ) )
self.assertRaises( Exception, p.setValue, IECore.IntData( 10 ) )
def testNullDefaultValue( self ) :
self.assertRaises( ValueError, Gaffer.ObjectPlug, "hello", defaultValue = None )
def testNullValue( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.assertRaises( ValueError, p.setValue, None )
def testSerialisationWithValueAndDefaultValue( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = IECore.IntData( 10 ) )
s["n"]["t"].setValue( IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
self.failUnless( s2["n"]["t"].defaultValue() == IECore.IntData( 10 ) )
self.failUnless( s2["n"]["t"].getValue() == IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
@GafferTest.expectedFailure
def testSerialisationOfMeshPrimitives( self ) :
# right now we can only serialise types which define __repr__, but that
# isn't defined for all cortex types. this test should pass when we get round
# to defining it for MeshPrimitives - we should do the other primitives at the
# same time, obviously.
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 10 ) ) ) )
s["n"]["t"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s["n"]["t"].defaultValue(), s2["n"]["t"].defaultValue() )
self.assertEqual( s["n"]["t"].getValue(), s2["n"]["t"].getValue() )
def testConstructCantSpecifyBothInputAndValue( self ) :
out = Gaffer.ObjectPlug( "out", direction=Gaffer.Plug.Direction.Out, defaultValue=IECore.StringData( "hi" ) )
self.assertRaises( Exception, Gaffer.ObjectPlug, "in", input=out, value=IECore.IntData( 10 ) )
class TypedObjectPlugNode( Gaffer.Node ) :
def __init__( self, name="TypedObjectPlugNode" ) :
Gaffer.Node.__init__( self, name )
self.addChild(
Gaffer.ObjectPlug( "p", defaultValue = IECore.IntData( 1 ) ),
)
IECore.registerRunTimeTyped( TypedObjectPlugNode )
def testSerialisationOfStaticPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = self.TypedObjectPlugNode()
s["n"]["p"].setValue( IECore.IntData( 10 ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s2["n"]["p"].getValue(), IECore.IntData( 10 ) )
def testSetToDefault( self ) :
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 10 ) ) )
plug = Gaffer.ObjectPlug( defaultValue = plane )
self.assertEqual( plug.getValue(), plane )
plug.setValue( IECore.SpherePrimitive() )
self.assertEqual( plug.getValue(), IECore.SpherePrimitive() )
plug.setToDefault()
self.assertEqual( plug.getValue(), plane )
def testValueType( self ) :
self.failUnless( Gaffer.ObjectPlug.ValueType is IECore.Object )
self.failUnless( Gaffer.BoolVectorDataPlug.ValueType is IECore.BoolVectorData )
self.failUnless( Gaffer.IntVectorDataPlug.ValueType is IECore.IntVectorData )
self.failUnless( Gaffer.FloatVectorDataPlug.ValueType is IECore.FloatVectorData )
self.failUnless( Gaffer.StringVectorDataPlug.ValueType is IECore.StringVectorData )
self.failUnless( Gaffer.V3fVectorDataPlug.ValueType is IECore.V3fVectorData )
self.failUnless( Gaffer.ObjectVectorPlug.ValueType is IECore.ObjectVector )
def testReadOnlySetValueRaises( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.NullObject(), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.ReadOnly )
self.assertRaises( RuntimeError, p.setValue, IECore.IntData( 10 ) )
def testSetValueCopying( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 1 ) )
i = IECore.IntData( 10 )
p.setValue( i )
self.failIf( p.getValue( _copy=False ).isSame( i ) )
i = IECore.IntData( 20 )
p.setValue( i, _copy=False )
self.failUnless( p.getValue( _copy=False ).isSame( i ) )
def testCreateCounterpart( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
p2 = p.createCounterpart( "c", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "c" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.defaultValue(), p.defaultValue() )
self.assertEqual( p2.getFlags(), p.getFlags() )
def testNoChildrenAccepted( self ) :
p1 = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
p2 = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
self.assertFalse( p1.acceptsChild( p2 ) )
self.assertRaises( RuntimeError, p1.addChild, p2 )
if __name__ == "__main__":
unittest.main()
| goddardl/gaffer | python/GafferTest/TypedObjectPlugTest.py | Python | bsd-3-clause | 8,984 | 0.059996 |
#!/usr/bin/python
"""
Init script for Earthquake Docker Image (osrg/earthquake)
Supported Env vars:
- EQ_DOCKER_PRIVILEGED
"""
import os
import prctl
import subprocess
import sys
def log(s):
print 'INIT: %s' % s
def is_privileged_mode():
has_env = os.getenv('EQ_DOCKER_PRIVILEGED')
has_cap = prctl.cap_permitted.sys_admin
if has_env and not has_cap:
raise RuntimeError('EQ_DOCKER_PRIVILEGED is set, but SYS_ADMIN cap is missing')
return has_env
def run_daemons(l):
for elem in l:
log('Starting daemon: %s' % elem)
rc = subprocess.call(elem)
if rc != 0:
log('Exiting with status %d..(%s)' % (rc, elem))
sys.exit(rc)
def run_command_and_exit(l):
log('Starting command: %s' % l)
rc = subprocess.call(l)
log('Exiting with status %d..(%s)' % (rc, l))
sys.exit(rc)
def get_remaining_args():
return sys.argv[1:]
if __name__ == '__main__':
daemons = [
['service', 'mongodb', 'start']
]
run_daemons(daemons)
com = ['/bin/bash', '--login', '-i']
if is_privileged_mode():
log('Running with privileged mode. Enabling DinD, OVS, and Ryu')
com = ['wrapdocker', '/init.dind-ovs-ryu.sh']
else:
log('Running without privileged mode. Please set EQ_DOCKER_PRIVILEGED if you want to use Ethernet Inspector')
log('Earthquake is installed on /earthquake. Please refer to /earthquake/README.md')
run_command_and_exit(com + get_remaining_args())
| AkihiroSuda/earthquake | docker/eq-init.py | Python | apache-2.0 | 1,499 | 0.008005 |
import os
import glob
import operator
import errno
import filecmp
import shutil
import numpy
from .smac_output_readers import *
def find_largest_file (glob_pattern):
""" Function to find the largest file matching a glob pattern.
Old SMAC version keep several versions of files as back-ups. This
helper can be used to find the largest file (which should contain the
final output). One could also go for the most recent file, but that
might fail when the data is copied.
:param glob_pattern: a UNIX style pattern to apply
:type glob_pattern: string
:returns: string -- largest file matching the pattern
"""
fns = glob.glob(glob_pattern)
if len(fns) == 0:
raise RuntimeError("No file matching pattern \'{}\' found!".format(glob_pattern))
f_name = ""
f_size = -1
for fn in fns:
s = os.lstat(fn).st_size
if (s > f_size):
f_size = s
f_name = fn
return(f_name)
def read_sate_run_folder(directory, rar_fn = "runs_and_results-it*.csv",inst_fn = "instances.txt" , feat_fn = "instance-features.txt" , ps_fn = "paramstrings-it*.txt"):
""" Helper function that can reads all information from a state_run folder.
To get all information of a SMAC run, several different files have
to be read. This function provides a short notation for gathering
all data at once.
:param directory: the location of the state_run_folder
:type directory: str
:param rar_fn: pattern to find the runs_and_results file
:type rar_fn: str
:param inst_fn: name of the instance file
:type inst_fn: str
:param feat_fn: name of the instance feature file. If this file is not found, pysmac assumes no instance features.
:type feat_fn: str
:param ps_fn: name of the paramstrings file
:type ps_fn: str
:returns: tuple -- (configurations returned by read_paramstring_file,\n
instance names returned by read_instance_file,\n
instance features returned by read_instance_features_file,\n
actual run data returned by read_runs_and_results_file)
"""
print(("reading {}".format(directory)))
configs = read_paramstrings_file(find_largest_file(os.path.join(directory,ps_fn)))
instance_names = read_instances_file(find_largest_file(os.path.join(directory,inst_fn)))
runs_and_results = read_runs_and_results_file(find_largest_file(os.path.join(directory, rar_fn)))
full_feat_fn = glob.glob(os.path.join(directory,feat_fn))
if len(full_feat_fn) == 1:
instance_features = read_instance_features_file(full_feat_fn[0])
else:
instance_features = None
return (configs, instance_names, instance_features, runs_and_results)
def state_merge(state_run_directory_list, destination,
check_scenario_files = True, drop_duplicates = False,
instance_subset = None):
""" Function to merge multiple state_run directories into a single
run to be used in, e.g., the fANOVA.
To take advantage of the data gathered in multiple independent runs,
the state_run folders have to be merged into a single directory that
resemble the same structure. This allows easy application of the
pyfANOVA on all run_and_results files.
:param state_run_directory_list: list of state_run folders to be merged
:type state_run_directory_list: list of str
:param destination: a directory to store the merged data. The folder is created if needed, and already existing data in that location is silently overwritten.
:type destination: str
:param check_scenario_files: whether to ensure that all scenario files in all state_run folders are identical. This helps to avoid merging runs with different settings. Note: Command-line options given to SMAC are not compared here!
:type check_scenario_files: bool
:param drop_duplicates: Defines how to handle runs with identical configurations. For deterministic algorithms the function's response should be the same, so dropping duplicates is safe. Keep in mind that every duplicate effectively puts more weight on a configuration when estimating parameter importance.
:type drop_duplicates: bool
:param instance_subset: Defines a list of instances that are used for the merge. All other instances are ignored. (Default: None, all instances are used)
:type instance_subset: list
"""
configurations = {}
instances = {}
runs_and_results = {}
ff_header= set()
i_confs = 1;
i_insts = 1;
# make sure all pcs files are the same
pcs_files = [os.path.join(d,'param.pcs') for d in state_run_directory_list]
if not all([filecmp.cmp(fn, pcs_files[0]) for fn in pcs_files[1:]]):
raise RuntimeError("The pcs files of the different runs are not identical!")
#check the scenario files if desired
scenario_files = [os.path.join(d,'scenario.txt') for d in state_run_directory_list]
if check_scenario_files and not all([filecmp.cmp(fn, scenario_files[0]) for fn in scenario_files[1:]]):
raise RuntimeError("The scenario files of the different runs are not identical!")
for directory in state_run_directory_list:
try:
confs, inst_names, tmp , rars = read_sate_run_folder(directory)
(header_feats, inst_feats) = tmp if tmp is not None else (None,None)
except:
print(("Something went wrong while reading {}. Skipping it.".format(directory)))
continue
# confs is a list of dicts, but dicts are not hashable, so they are
# converted into a tuple of (key, value) pairs and then sorted
confs = [tuple(sorted(d.items())) for d in confs]
# merge the configurations
for conf in confs:
if not conf in configurations:
configurations[conf] = {'index': i_confs}
i_confs += 1
# merge the instances
ignored_instance_ids = []
for i in range(len(inst_names)):
if instance_subset is not None and inst_names[i][0] not in instance_subset:
ignored_instance_ids.append(i)
continue
if not inst_names[i][0] in instances:
instances[inst_names[i][0]] = {'index': i_insts}
instances[inst_names[i][0]]['features'] = inst_feats[inst_names[i][0]] if inst_feats is not None else None
instances[inst_names[i][0]]['additional info'] = ' '.join(inst_names[i][1:]) if len(inst_names[i]) > 1 else None
i_insts += 1
else:
if (inst_feats is None):
if not (instances[inst_names[i][0]]['features'] is None):
raise ValueError("The data contains the same instance name ({}) twice, but once with and without features!".format(inst_names[i]))
elif not numpy.all(instances[inst_names[i][0]]['features'] == inst_feats[inst_names[i][0]]):
raise ValueError("The data contains the same instance name ({}) twice, but with different features!".format(inst_names[i]))
pass
# store the feature file header:
if header_feats is not None:
ff_header.add(",".join(header_feats))
if len(ff_header) != 1:
raise RuntimeError("Feature Files not consistent across runs!\n{}".format(header_feats))
if len(rars.shape) == 1:
rars = numpy.array([rars])
for run in rars:
# get the local configuration and instance id
lcid, liid = int(run[0])-1, int(run[1])-1
if liid in ignored_instance_ids:
continue
# translate them into the global ones
gcid = configurations[confs[lcid]]['index']
giid = instances[inst_names[liid][0]]['index']
# check for duplicates and skip if necessary
if (gcid, giid) in runs_and_results:
if drop_duplicates:
#print('dropped duplicate: configuration {} on instace {}'.format(gcid, giid))
continue
else:
runs_and_results[(gcid, giid)].append(run[2:])
else:
runs_and_results[(gcid, giid)] = [run[2:]]
# create output directory
try:
os.makedirs(destination)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# create all files, overwriting existing ones
shutil.copy(pcs_files[0], destination)
shutil.copy(scenario_files[0], destination)
with open(os.path.join(destination, 'instances.txt'),'w') as fh:
sorted_instances = []
for name in instances:
if instances[name]['additional info'] is not None:
sorted_instances.append( (instances[name]['index'], name + ' ' + instances[name]['additional info']) )
else:
sorted_instances.append( (instances[name]['index'], name) )
sorted_instances.sort()
fh.write('\n'.join(map(operator.itemgetter(1), sorted_instances)))
fh.write('\n')
with open(os.path.join(destination, 'runs_and_results-it0.csv'),'w') as fh:
cumulative_runtime = 0.0
fh.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,"
"Seed,Runtime,Run Length,"
"Run Result Code,Run Quality,SMAC Iteration,"
"SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
run_i = 1
for ((conf,inst),res) in list(runs_and_results.items()):
for r in res:
fh.write('{},{},{},'.format(run_i, conf, inst))
fh.write('{},{},{},'.format(r[0], int(r[1]), r[2]))
fh.write('{},{},{},'.format(int(r[3]), r[4], r[5]))
fh.write('{},{},{},'.format(int(r[6]), r[7], 0))
cumulative_runtime += r[4]
if r[10] == 2:
tmp = 'SAT'
if r[10] == 1:
tmp = 'UNSAT'
if r[10] == 0:
tmp = 'TIMEOUT'
if r[10] == -1:
tmp = 'CRASHED'
fh.write('{},{},,{},'.format(cumulative_runtime,tmp, r[11]))
fh.write('\n')
run_i += 1
with open(os.path.join(destination, 'paramstrings-it0.txt'),'w') as fh:
sorted_confs = [(configurations[k]['index'],k) for k in list(configurations.keys())]
sorted_confs.sort()
for conf in sorted_confs:
fh.write("{}: ".format(conf[0]))
fh.write(", ".join(["{}='{}'".format(p[0],p[1]) for p in conf[1]]))
fh.write('\n')
#print(instances.values())
if header_feats is not None:
with open(os.path.join(destination, 'instance-features.txt'),'w') as fh:
fh.write("instance," + ff_header.pop())
sorted_features = [(instances[inst]['index'], inst + ',' + ",".join(list(map(str, instances[inst]['features']))) ) for inst in instances]
sorted_features.sort()
fh.write('\n'.join([ t[1] for t in sorted_features]))
return(configurations, instances, runs_and_results, sorted_instances, sorted_confs, inst_feats)
| sfalkner/pySMAC | pysmac/utils/state_merge.py | Python | agpl-3.0 | 11,573 | 0.009937 |
#!/usr/bin/python
# -*- encoding: UTF-8 -*-
'''MATRIX Log File Maker
Version: 2
This script will create a csv file that will be a table of settings for all
STM data recorded from the Omicron MATRIX software.
List of classes: -none-
List of functions:
main
'''
# built-in modules
import sys
import traceback
import os
import os.path
import re
import random
import time
import multiprocessing as mp
from pprint import pprint
import pdb
# 3rd-party modules
#sys.path.append('C:/Users/csykes/alex/Dropbox/ampPy/spm_dev/')
import pyMTRX
from pyMTRX.experiment import Experiment
#==============================================================================
def main( cwd='./', sdir=None, r=True, processes=mp.cpu_count(),
single_sheet=False, debug=False
):
if debug: print '*** DEBUG MODE ON ***'
t = time.time()
if cwd[-1] != '/':
cwd += '/'
files = os.listdir(cwd)
print 'looking for experiment files in "{}"'.format(cwd)
# find one experiment file and then move on
experiment_files = find_files(cwd, fext='mtrx', r=r)
print 'Found the following .mtrx files'
for fp in experiment_files:
print ' ' + os.path.basename(fp)
N_opened = []
try:
processes = int(processes)
except ValueError:
processes = 1
#END try
if processes < 1 or debug: processes = 1
if processes == 1:
for fp in experiment_files:
if not isinstance(sdir, basestring): sdir = os.path.dirname(fp)
N_opened.append(
create_experiment_log(fp, sdir=sdir, debug=debug)
)
# END for
else:
# Create worker pool and start all jobs
worker_pool = mp.Pool(processes=processes, maxtasksperchild=12)
print 'running in multiprocess mode: {} processes'.format(processes)
for fp in experiment_files:
if not isinstance(sdir, basestring): sdir = os.path.dirname(fp)
N_opened.append(
worker_pool.apply_async( wrapped_create_exlog,
args=(fp,sdir,debug),
)
)
# END for
worker_pool.close()
# Wait here for all work to complete
worker_pool.join()
# END if
N = 0
if processes == 1:
for n in N_opened: N += n
else:
for n in N_opened:
try:
N += n.get()
except Exception as err:
print err
# END try
# END for
# END if
t = time.time() - t
hours = int(t/3600)
minutes = int((t-3600*hours)/60)
seconds = int(t - 3600*hours - 60*minutes)
print 'Total run time: {:02d}:{:02d}:{:02d}'.format(
hours, minutes, seconds
)
print 'Average processing speed: {:.0f} files/min'.format(N/(t/60))
# END main
#==============================================================================
def wrapped_create_exlog(*args, **kwargs):
try:
return create_experiment_log(*args, **kwargs)
except Exception as err:
print '{}: {}'.format(args[0], repr(err))
return 0
# END try
# END wrapped_create_exlog
def create_experiment_log(exp_fp, sdir='./', debug=False):
cwd, exp_fn = os.path.split(exp_fp)
cwd += '/'
print 'loading ' + exp_fn
ex = Experiment(cwd + exp_fn, debug=debug)
# collect image files
# (*image file must be in experiment file AND a file in the directory)
all_files = list( set(ex.get_data_filenames()) & set(os.listdir(cwd)) )
img_files = [fn for fn in all_files if Experiment.is_image(fn)]
sts_files = [fn for fn in all_files if Experiment.is_point_spectrum(fn)]
dname_lkup = { 0: '00 trace up', 1: '01 retrace up',
2: '10 trace down', 3: '11 retrace down'
}
IMG_entries = []
STS_entries = []
for fn in sorted(img_files, key=lambda f: os.path.getctime(cwd+f)):
if debug: print 'loading "{}"'.format(fn)
# scns = [trace_up, retrace_up, trace_down, retrace_down]
scns = flatten_tree( ex.import_scan(cwd + fn) )
for i in range(len(scns)):
scns[i].props['direction'] = dname_lkup[i]
IMG_entries.append( make_scan_entry(scns[i]) )
#for crv in scns[i].spectra:
# STS_entries.append( make_spectrum_entry(crv, debug=debug) )
# END for
# END for
for fn in sts_files:
curves = ex.import_spectra(os.path.join(cwd, fn))
for crv in curves:
STS_entries.append( make_spectrum_entry(crv, debug=debug) )
# END for
IMG_entries.sort(key=lambda tup: tup[0])
STS_entries.sort(key=lambda tup: tup[0])
N_opened = len(IMG_entries) + len(STS_entries) + 1
save_name = re.sub(r'_0001\.mtrx$', '_settings.csv', exp_fn)
f = open(os.path.join(sdir, save_name), 'w')
columns = [ 'date/time (d)',
'sample', 'data set',
'index', 'rep', 'dir', 'channel',
'x (nm)', 'y (nm)',
'scan bias (V)', 'current setpoint (pA)',
'loop gain (%)', 'T_raster (ms)',
'points', 'lines',
'line width (nm)', 'image height (nm)', '', 'angle (deg)',
'No. STS',
'exp comment', 'img comment',
'file'
]
f.write(','.join(columns))
f.write('\n')
for t, ln in IMG_entries:
f.write(ln)
f.close()
save_name = re.sub(r'_0001\.mtrx$', '_settings_STS.csv', exp_fn)
f = open(os.path.join(sdir, save_name), 'w')
columns = [ 'date/time (d)',
'sample', 'data set',
'scan index', 'rep', 'dir', 'channel',
'spec index', 'rep', 'dir', 'channel',
'start voltage (V)', 'end voltage (V)',
'scan bias (V)', 'current setpoint (pA)',
'loop gain (%)', 'T_raster (ms)',
'points',
'exp comment', 'spectrum comments',
'file'
]
f.write(','.join(columns))
f.write('\n')
for t, ln in STS_entries:
f.write(ln)
f.close()
if len(os.path.join(sdir, save_name)) > 79:
print cwd + '\n ' + save_name
else:
print cwd + ' ' + save_name
# END if
return N_opened
# END create_experiment_log
#==============================================================================
def make_scan_entry(scn):
ls = []
# time
ls.append( str(scn.props['time']/86400.0 + 25569 - 4.0/24) )
# experiment sample
ls.append( csv_safe(scn.ex.sample) )
ls.append( csv_safe(scn.ex.data_set) )
# img index (scan, repetition, direction) and channel
ls.append(
'{index:03d},{rep:04d},{direction},{channel}'.format(**scn.props)
)
# scan location
ls.append('{}'.format(scn.props['XYScanner_X_Offset'].value * 1e9))
ls.append('{}'.format(scn.props['XYScanner_Y_Offset'].value * 1e9))
# scan voltage
ls.append('{}'.format(scn.props['GapVoltageControl_Voltage'].value))
# scan current
ls.append('{:0.1f}'.format(scn.props['Regulator_Setpoint_1'].value * 1e12))
# scan loop gain
ls.append('{:0.2f}'.format(scn.props['Regulator_Loop_Gain_1_I'].value))
# scan raster time
ls.append('{:0.3f}'.format(scn.props['XYScanner_Raster_Time'].value * 1e3))
# scan size in points and lines
ls.append(str(scn.props['XYScanner_Points'].value))
ls.append(str(scn.props['XYScanner_Lines'].value))
# scan size in physical units (nm)
ls.append('{:0.2f}'.format(scn.props['XYScanner_Width'].value * 1e9))
ls.append('{:0.2f}'.format(scn.props['XYScanner_Height'].value * 1e9))
# alert flag for parameter errors
if pyMTRX.size_change(scn):
ls.append('*')
else:
ls.append('')
# END if
# scan angle
ls.append('{:0.1f}'.format(scn.props['XYScanner_Angle'].value))
# number of linked point spectra
ls.append(str(len(scn.spectra)))
# experiment data set, comment, scan comment, and file name
ls.append( csv_safe(scn.ex.comment) )
ls.append( csv_safe(scn.props['comment']) )
ls.append( '{}\n'.format(scn.props['file']) )
return (scn.props['time'], ','.join(ls))
# END make_scan_entry
#==============================================================================
def make_spectrum_entry(crv, no_warn=True, debug=False):
# Column titles
# time, scan,,,, spec index, spec channel, start voltage, end voltage,
# scan voltage (V), current setpoint (pA), loop gain (%), T_raster (ms)
# points, file, comments
ls = []
# time (write time in DAYS since 1900Jan1, this is MS Excel friendly)
ls.append( str(crv.props['time']/86400.0 + 25569 - 4.0/24) )
# experiment sample
ls.append( csv_safe(crv.ex.sample) )
ls.append( csv_safe(crv.ex.data_set) )
# parent scan index (scan, repetition, direction) and channel
if crv.is_linked:
ls.append(
'{0[0]:03d},{0[1]:04d},{1},{0[2]}'.format(
pyMTRX.file_name_values(crv.props['parent']), crv.mrk.dir
)
)
else:
ls.append(',,,')
# END try
# spec index (scan, repetition, direction) and channel
ls.append(
'{index:03d},{rep:04d},{direction},{channel}'.format(**crv.props)
)
# spec start, end
ls.append('{:0.3f},{:0.3f}'.format(crv.X[0], crv.X[-1]))
# scan bias
ls.append('{}'.format(crv.props['GapVoltageControl_Voltage'].value))
# scan current setpoint
ls.append(
'{:0.1f}'.format(crv.props['Regulator_Setpoint_1'].value * 1e12)
)
# scan loop gain
ls.append('{:0.2f}'.format(crv.props['Regulator_Loop_Gain_1_I'].value))
# spec raster time
ls.append(
'{:0.3f}'.format(crv.props['Spectroscopy_Raster_Time_1'].value * 1e3)
)
# spec number of points
ls.append(str(len(crv)))
# experiment data set and comment, sts file name
ls.append( csv_safe(crv.ex.comment) )
ls.append( csv_safe(crv.props['comment']) )
ls.append( '{}\n'.format(crv.props['file']) )
return (crv.props['time'], ','.join(ls))
# END make_spectrum_entry
#==============================================================================
def find_files(cwd='./', fext='[^.]+', r=True):
'''Find _mtrx files (Breath-first search)
Args:
cwd (str): current working directory
fext (str): pattern used to match the file extensions
r (bool): flag for recursive search
Returns:
(list) ['./file.ext', './file.ext', './file.ext', ...]
'''
if cwd[-1] != '/':
cwd += '/'
out_files = []
work_queue = [cwd+fn for fn in os.listdir(cwd)]
# BFS for I(t)_mtrx files
while work_queue:
fpath = work_queue.pop(0)
if os.path.isdir(fpath) and r:
work_queue.extend( [fpath+'/'+fn for fn in os.listdir(fpath)] )
elif re.search(r'\.'+fext+'$', fpath):
out_files.append(fpath)
# END if
# END while
return out_files
# END find files
#==============================================================================
def csv_safe(s):
return '"' + re.sub(r'[\r\n]+', ' | ', s) + '"'
# END csv_safe
#==============================================================================
def make_hms(t):
hours = int(t/60**2)
minutes = int((t%60**2)/60**1)
seconds = t%60**1/60**0
return hours, minutes, seconds
# END make_hms
#==============================================================================
def flatten_tree(A):
flat = []
try:
for a in A:
flat.extend( flatten_tree(a) )
except TypeError:
return [A]
# END try
return flat
# END flatten_tree
#==============================================================================
if __name__ == '__main__':
if os.name == 'nt': mp.freeze_support()
#main()
#quit()
#try:
# main()
#except Exception as err:
# exc_type, exc_value, exc_tb = sys.exc_info()
# bad_file, bad_line, func_name, text = traceback.extract_tb(exc_tb)[-1]
# print 'Error in {}'.format(bad_file)
# print '{} on {}: {}'.format(type(err).__name__, bad_line, err)
# print ''
#finally:
# raw_input("press enter to exit")
## END try
# END if
| ampron/pyMTRX | pyMTRX/scripts/notebook_sheet.py | Python | gpl-3.0 | 12,420 | 0.008132 |
#!/usr/bin/env python3
import argparse
import functools
import json
import os
import re
import signal
import socket
import ssl
import subprocess
import sys
import threading
import time
import warnings
from collections import defaultdict
from operator import itemgetter
import psutil
from mtools.util import OrderedDict
from mtools.util.cmdlinetool import BaseCmdLineTool
from mtools.util.print_table import print_table
from mtools.version import __version__
try:
import Queue
except ImportError:
import queue as Queue
try:
from pymongo import MongoClient as Connection
from pymongo import version_tuple as pymongo_version
from bson import SON
from io import BytesIO
from distutils.version import LooseVersion
from pymongo.errors import ConnectionFailure, AutoReconnect
from pymongo.errors import OperationFailure, ConfigurationError
except ImportError as e:
raise ImportError("Can't import pymongo. See "
"https://api.mongodb.com/python/current/ for "
"instructions on how to install pymongo: " + str(e))
class MongoConnection(Connection):
"""
MongoConnection class.
Wrapper around Connection (itself conditionally a MongoClient or
pymongo.Connection) to specify timeout and directConnection.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('directConnection', True)
kwargs.setdefault('serverSelectionTimeoutMS', 1)
# Set client application name for MongoDB 3.4+ servers
kwargs['appName'] = f'''mlaunch v{__version__}'''
Connection.__init__(self, *args, **kwargs)
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None,
ssl_pymongo_options=None, tls_pymongo_options=None):
"""
Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly.
"""
host = 'localhost:%i' % port
start_time = time.time()
while True:
if (time.time() - start_time) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host,
**(ssl_pymongo_options or {}),
**(tls_pymongo_options or {}))
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True
def shutdown_host(port, username=None, password=None, authdb=None):
"""
Send the shutdown command to a mongod or mongos on given port.
This function can be called as a separate thread.
"""
host = 'localhost:%i' % port
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for "
"admin database")
mc = MongoConnection(host, username=username, password=password)
else:
mc = MongoConnection(host)
try:
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print("Error: cannot authenticate to shut down %s." % host)
return
except ConnectionFailure:
pass
else:
mc.close()
@functools.lru_cache()
def check_mongo_server_output(binary, argument):
"""Call mongo[d|s] with arguments such as --help or --version.
This is used only to check the server's output. We expect the server to
exit immediately.
"""
try:
proc = subprocess.Popen(['%s' % binary, argument],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, shell=False)
except OSError as exc:
print('Failed to launch %s' % binary)
raise exc
out, err = proc.communicate()
if proc.returncode:
raise OSError(out or err)
return out
class MLaunchTool(BaseCmdLineTool):
UNDOCUMENTED_MONGOD_ARGS = ['--nopreallocj', '--wiredTigerEngineConfigString']
UNSUPPORTED_MONGOS_ARGS = ['--wiredTigerCacheSizeGB', '--storageEngine']
UNSUPPORTED_CONFIG_ARGS = ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']
def __init__(self, test=False):
BaseCmdLineTool.__init__(self)
# arguments
self.args = None
# startup parameters for each port
self.startup_info = {}
# data structures for the discovery feature
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# memoize ignored arguments passed to different binaries
self.ignored_arguments = {}
# config docs for replica sets (key is replica set name)
self.config_docs = {}
# shard connection strings
self.shard_connection_str = []
# ssl configuration to start mongod or mongos, or create a MongoClient
self.ssl_server_args = ''
self.ssl_pymongo_options = {}
# tls configuration to start mongod or mongos, or create a MongoClient
self.tls_server_args = ''
self.tls_pymongo_options = {}
# indicate if running in testing mode
self.test = test
# version of MongoDB server
self.current_version = self.getMongoDVersion()
def run(self, arguments=None):
"""
Main run method.
Called for all sub-commands and parameters. It sets up argument
parsing, then calls the sub-command method with the same name.
"""
# set up argument parsing in run, so that subsequent calls
# to run can call different sub-commands
self.argparser = argparse.ArgumentParser()
self.argparser.add_argument('--version', action='version',
version=f'''mtools version {__version__} || Python {sys.version}''')
self.argparser.add_argument('--no-progressbar', action='store_true',
default=False,
help='disables progress bar')
self.argparser.description = ('script to launch MongoDB stand-alone '
'servers, replica sets and shards.')
# make sure init is default command even when specifying
# arguments directly
if arguments and arguments.startswith('-'):
arguments = 'init ' + arguments
# default sub-command is `init` if none provided
elif (len(sys.argv) > 1 and sys.argv[1].startswith('-') and
sys.argv[1] not in ['-h', '--help', '--version']):
sys.argv = sys.argv[0:1] + ['init'] + sys.argv[1:]
# create command sub-parsers
subparsers = self.argparser.add_subparsers(dest='command')
self.argparser._action_groups[0].title = 'commands'
self.argparser._action_groups[0].description = \
('init is the default command and can be omitted. To get help on '
'individual commands, run mlaunch <command> --help. Command line '
'arguments which are not handled by mlaunch will be passed '
'through to mongod/mongos if those options are listed in the '
'--help output for the current binary. For example: '
'--storageEngine, --logappend, or --config.')
# init command
helptext = ('initialize a new MongoDB environment and start '
'stand-alone instances, replica sets, or sharded '
'clusters.')
desc = ('Initialize a new MongoDB environment and start stand-alone '
'instances, replica sets, or sharded clusters. Command line '
'arguments which are not handled by mlaunch will be passed '
'through to mongod/mongos if those options are listed in the '
'--help output for the current binary. For example: '
'--storageEngine, --logappend, or --config.')
init_parser = subparsers.add_parser('init', help=helptext,
description=desc)
# either single or replica set
me_group = init_parser.add_mutually_exclusive_group(required=True)
me_group.add_argument('--single', action='store_true',
help=('creates a single stand-alone mongod '
'instance'))
me_group.add_argument('--replicaset', action='store_true',
help=('creates replica set with several mongod '
'instances'))
# replica set arguments
init_parser.add_argument('--nodes', action='store', metavar='NUM',
type=int, default=3,
help=('adds NUM data nodes to replica set '
'(requires --replicaset, default=3)'))
init_parser.add_argument('--arbiter', action='store_true',
default=False,
help=('adds arbiter to replica set '
'(requires --replicaset)'))
init_parser.add_argument('--name', action='store', metavar='NAME',
default='replset',
help='name for replica set (default=replset)')
init_parser.add_argument('--priority', action='store_true',
default=False,
help='make lowest-port member primary')
# sharded clusters
init_parser.add_argument('--sharded', '--shards', action='store',
nargs='+', metavar='N',
help=('creates a sharded setup consisting of '
'several singles or replica sets. '
'Provide either list of shard names or '
'number of shards.'))
init_parser.add_argument('--config', action='store', default=-1,
type=int, metavar='NUM',
help=('adds NUM config servers to sharded '
'setup (requires --sharded, default=1, '
'with --csrs default=3)'))
init_parser.add_argument('--csrs', default=False, action='store_true',
help=('deploy config servers as a replica '
'set (requires MongoDB >= 3.2.0)'))
init_parser.add_argument('--mongos', action='store', default=1,
type=int, metavar='NUM',
help=('starts NUM mongos processes (requires '
'--sharded, default=1)'))
# verbose, port, binary path
init_parser.add_argument('--verbose', action='store_true',
default=False,
help='outputs more verbose information.')
init_parser.add_argument('--port', action='store', type=int,
default=27017,
help=('port for mongod, start of port range '
'in case of replica set or shards '
'(default=27017)'))
init_parser.add_argument('--binarypath', action='store', default=None,
metavar='PATH',
help=('search for mongod/s binaries in the '
'specified PATH.'))
init_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to create db and log '
'paths (default=./data/)'))
init_parser.add_argument('--hostname', action='store',
default='localhost',
help=('override hostname for replica set '
'configuration'))
# authentication, users, roles
self._default_auth_roles = ['dbAdminAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase',
'clusterAdmin']
init_parser.add_argument('--auth', action='store_true', default=False,
help=('enable authentication and create a '
'key file and admin user '
'(default=user/password)'))
init_parser.add_argument('--username', action='store', type=str,
default='user',
help=('username to add (requires --auth, '
'default=user)'))
init_parser.add_argument('--password', action='store', type=str,
default='password',
help=('password for given username (requires '
'--auth, default=password)'))
init_parser.add_argument('--auth-db', action='store', type=str,
default='admin', metavar='DB',
help=('database where user will be added '
'(requires --auth, default=admin)'))
init_parser.add_argument('--auth-roles', action='store',
default=self._default_auth_roles,
metavar='ROLE', nargs='*',
help=('admin user''s privilege roles; note'
'that the clusterAdmin role is '
'required to run the stop command '
'(requires --auth, default="%s")'
% ' '.join(self._default_auth_roles)))
init_parser.add_argument('--auth-role-docs', action='store_true',
default=False,
help='auth-roles are JSON documents')
init_parser.add_argument('--no-initial-user', action='store_false',
default=True, dest='initial-user',
help=('Do not create an initial user if auth '
'is enabled'))
def is_file(arg):
if not os.path.exists(os.path.expanduser(arg)):
init_parser.error("The file [%s] does not exist" % arg)
return arg
# MongoDB 4.2 adds TLS options to replace the corresponding SSL options
# https://docs.mongodb.com/manual/release-notes/4.2/#new-tls-options
if (LooseVersion(self.current_version) >= LooseVersion("4.2.0")):
# tls
tls_args = init_parser.add_argument_group('TLS options')
tls_args.add_argument('--tlsCAFile',
help='Certificate Authority file for TLS',
type=is_file)
tls_args.add_argument('--tlsCRLFile',
help='Certificate Revocation List file for TLS',
type=is_file)
tls_args.add_argument('--tlsAllowInvalidHostnames',
action='store_true',
help=('allow client and server certificates to '
'provide non-matching hostnames'))
tls_args.add_argument('--tlsAllowInvalidCertificates',
action='store_true',
help=('allow client or server connections with '
'invalid certificates'))
tls_server_args = init_parser.add_argument_group('Server TLS options')
tls_server_args.add_argument('--tlsMode',
help='set the TLS operation mode',
choices=('disabled allowTLS preferTLS '
'requireTLS'.split()))
tls_server_args.add_argument('--tlsCertificateKeyFile',
help='PEM file for TLS', type=is_file)
tls_server_args.add_argument('--tlsCertificateKeyFilePassword',
help='PEM file password')
tls_server_args.add_argument('--tlsClusterFile',
help=('key file for internal TLS '
'authentication'), type=is_file)
tls_server_args.add_argument('--tlsClusterPassword',
help=('internal authentication key '
'file password'))
tls_server_args.add_argument('--tlsDisabledProtocols',
help=('comma separated list of TLS '
'protocols to disable '
'[TLS1_0,TLS1_1,TLS1_2]'))
tls_server_args.add_argument('--tlsAllowConnectionsWithoutCertificates',
action='store_true',
help=('allow client to connect without '
'presenting a certificate'))
tls_server_args.add_argument('--tlsFIPSMode', action='store_true',
help='activate FIPS 140-2 mode')
tls_client_args = init_parser.add_argument_group('Client TLS options')
tls_client_args.add_argument('--tlsClientCertificate',
help='client certificate file for TLS',
type=is_file)
tls_client_args.add_argument('--tlsClientCertificateKeyFile',
help='client certificate key file for TLS',
type=is_file)
tls_client_args.add_argument('--tlsClientCertificateKeyFilePassword',
help='client certificate key file password')
self.tls_args = tls_args
self.tls_client_args = tls_client_args
self.tls_server_args = tls_server_args
else:
# ssl
ssl_args = init_parser.add_argument_group('TLS/SSL options')
ssl_args.add_argument('--sslCAFile',
help='Certificate Authority file for TLS/SSL',
type=is_file)
ssl_args.add_argument('--sslCRLFile',
help='Certificate Revocation List file for TLS/SSL',
type=is_file)
ssl_args.add_argument('--sslAllowInvalidHostnames',
action='store_true',
help=('allow client and server certificates to '
'provide non-matching hostnames'))
ssl_args.add_argument('--sslAllowInvalidCertificates',
action='store_true',
help=('allow client or server connections with '
'invalid certificates'))
ssl_server_args = init_parser.add_argument_group('Server TLS/SSL options')
ssl_server_args.add_argument('--sslMode',
help='set the TLS/SSL operation mode',
choices=('disabled allowSSL preferSSL '
'requireSSL'.split()))
ssl_server_args.add_argument('--sslPEMKeyFile',
help='PEM file for TLS/SSL', type=is_file)
ssl_server_args.add_argument('--sslPEMKeyPassword',
help='PEM file password')
ssl_server_args.add_argument('--sslClusterFile',
help=('key file for internal TLS/SSL '
'authentication'), type=is_file)
ssl_server_args.add_argument('--sslClusterPassword',
help=('internal authentication key '
'file password'))
ssl_server_args.add_argument('--sslDisabledProtocols',
help=('comma separated list of TLS '
'protocols to disable '
'[TLS1_0,TLS1_1,TLS1_2]'))
ssl_server_args.add_argument('--sslAllowConnectionsWithoutCertificates',
action='store_true',
help=('allow client to connect without '
'presenting a certificate'))
ssl_server_args.add_argument('--sslFIPSMode', action='store_true',
help='activate FIPS 140-2 mode')
ssl_client_args = init_parser.add_argument_group('Client TLS/SSL options')
ssl_client_args.add_argument('--sslClientCertificate',
help='client certificate file for TLS/SSL',
type=is_file)
ssl_client_args.add_argument('--sslClientPEMKeyFile',
help='client PEM file for TLS/SSL',
type=is_file)
ssl_client_args.add_argument('--sslClientPEMKeyPassword',
help='client PEM file password')
self.ssl_args = ssl_args
self.ssl_client_args = ssl_client_args
self.ssl_server_args = ssl_server_args
# start command
start_parser = subparsers.add_parser('start',
help=('starts existing MongoDB '
'instances. Example: '
'"mlaunch start config" '
'will start all config '
'servers.'),
description=('starts existing '
'MongoDB instances. '
'Example: "mlaunch '
'start config" will '
'start all config '
'servers.'))
start_parser.add_argument('tags', metavar='TAG', action='store',
nargs='*', default=[],
help=('without tags, all non-running nodes '
'will be restarted. Provide '
'additional tags to narrow down the '
'set of nodes to start.'))
start_parser.add_argument('--verbose', action='store_true',
default=False,
help='outputs more verbose information.')
start_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to start nodes '
'(default=./data/)'))
start_parser.add_argument('--binarypath', action='store',
default=None, metavar='PATH',
help=('search for mongod/s binaries in the '
'specified PATH.'))
# stop command
helptext = ('stops running MongoDB instances. Example: "mlaunch stop '
'shard 2 secondary" will stop all secondary nodes '
'of shard 2.')
desc = ('stops running MongoDB instances with the shutdown command. '
'Example: "mlaunch stop shard 2 secondary" will stop all '
'secondary nodes of shard 2.')
stop_parser = subparsers.add_parser('stop',
help=helptext,
description=desc)
helptext = ('without tags, all running nodes will be stopped. '
'Provide additional tags to narrow down the set of '
'nodes to stop.')
stop_parser.add_argument('tags', metavar='TAG', action='store',
nargs='*', default=[], help=helptext)
stop_parser.add_argument('--verbose', action='store_true',
default=False,
help='outputs more verbose information.')
stop_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to stop nodes '
'(default=./data/)'))
# restart command
desc = ('stops running MongoDB instances with the shutdown command. '
'Then restarts the stopped instances.')
restart_parser = subparsers.add_parser('restart',
help=('stops, then restarts '
'MongoDB instances.'),
description=desc)
restart_parser.add_argument('tags', metavar='TAG', action='store',
nargs='*', default=[],
help=('without tags, all non-running '
'nodes will be restarted. Provide '
'additional tags to narrow down the '
'set of nodes to start.'))
restart_parser.add_argument('--verbose', action='store_true',
default=False,
help='outputs more verbose information.')
restart_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to restart nodes '
'(default=./data/)'))
restart_parser.add_argument('--binarypath', action='store',
default=None, metavar='PATH',
help=('search for mongod/s binaries in '
'the specified PATH.'))
# list command
list_parser = subparsers.add_parser('list',
help=('list MongoDB instances of '
'this environment.'),
description=('list MongoDB '
'instances of this '
'environment.'))
list_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to list nodes '
'(default=./data/)'))
list_parser.add_argument('--json', action='store_true', default=False,
help=('output in JSON format '))
list_parser.add_argument('--tags', action='store_true', default=False,
help=('outputs the tags for each instance. '
'Tags can be used to target instances '
'for start/stop/kill.'))
list_parser.add_argument('--startup', action='store_true',
default=False,
help=('outputs the startup command lines for '
'each instance.'))
list_parser.add_argument('--verbose', action='store_true',
default=False, help='alias for --tags.')
# list command
helptext = ('kills (or sends another signal to) MongoDB instances '
'of this environment.')
desc = ('kills (or sends another signal to) MongoDB instances '
'of this environment.')
kill_parser = subparsers.add_parser('kill', help=helptext,
description=desc)
kill_parser.add_argument('tags', metavar='TAG', action='store',
nargs='*', default=[],
help=('without tags, all running nodes will '
'be killed. Provide additional tags to '
'narrow down the set of nodes to '
'kill.'))
kill_parser.add_argument('--dir', action='store', default='./data',
help=('base directory to kill nodes '
'(default=./data/)'))
kill_parser.add_argument('--signal', action='store', default=15,
help=('signal to send to processes, '
'default=15 (SIGTERM)'))
kill_parser.add_argument('--verbose', action='store_true',
default=False,
help='outputs more verbose information.')
# argparser is set up, now call base class run()
BaseCmdLineTool.run(self, arguments, get_unknowns=True)
# conditions on argument combinations
if (self.args['command'] == 'init' and
'single' in self.args and self.args['single']):
if self.args['arbiter']:
self.argparser.error("can't specify --arbiter for "
"single nodes.")
# replace path with absolute path, but store relative path as well
if ('dir' in self.args and self.args['dir']):
self.relative_dir = self.args['dir']
self.dir = os.path.abspath(self.args['dir'])
self.args['dir'] = self.dir
if (self.args['command'] is None):
self.argparser.print_help()
self.argparser.exit()
else:
# branch out in sub-commands
getattr(self, self.args['command'])()
# -- below are the main commands: init, start, stop, list, kill
def init(self):
"""
Sub-command init.
Branches out to sharded, replicaset or single node methods.
"""
# check for existing environment. Only allow subsequent
# 'mlaunch init' if they are identical.
if self._load_parameters():
if self.loaded_args != self.args:
raise SystemExit('A different environment already exists '
'at %s.' % self.dir)
first_init = False
else:
first_init = True
self.ssl_pymongo_options = self._get_ssl_pymongo_options(self.args)
self.tls_pymongo_options = self._get_tls_pymongo_options(self.args)
if (self._get_ssl_server_args() and not
self.args['sslAllowConnectionsWithoutCertificates'] and not
self.args['sslClientCertificate'] and not
self.args['sslClientPEMKeyFile']):
sys.stderr.write('warning: server requires certificates but no'
' --sslClientCertificate provided\n')
if (self._get_tls_server_args() and not
self.args['tlsAllowConnectionsWithoutCertificates'] and not
self.args['tlsClientCertificate'] and not
self.args['tlsClientCertificateKeyFile']):
sys.stderr.write('warning: server requires certificates but no'
' --tlsClientCertificate provided\n')
# number of default config servers
if self.args['config'] == -1:
self.args['config'] = 1
# Exit with error if --csrs is set and MongoDB < 3.1.0
if (self.args['csrs'] and
LooseVersion(self.current_version) < LooseVersion("3.1.0") and
LooseVersion(self.current_version) != LooseVersion("0.0.0")):
errmsg = (" \n * The '--csrs' option requires MongoDB version "
"3.2.0 or greater, the current version is %s.\n"
% self.current_version)
raise SystemExit(errmsg)
# add the 'csrs' parameter as default for MongoDB >= 3.3.0
if (LooseVersion(self.current_version) >= LooseVersion("3.3.0") or
LooseVersion(self.current_version) == LooseVersion("0.0.0")):
self.args['csrs'] = True
# construct startup strings
self._construct_cmdlines()
# write out parameters
if self.args['verbose']:
print("writing .mlaunch_startup file.")
self._store_parameters()
# exit if running in testing mode
if self.test:
return
# check if authentication is enabled, make key file
if self.args['auth'] and first_init:
if not os.path.exists(self.dir):
os.makedirs(self.dir)
if '--keyFile' in self.unknown_args:
# Check if keyfile is readable
keyfile = None
try:
keyfile_idx = self.unknown_args.index('--keyFile') + 1
keyfile_path = self.unknown_args[keyfile_idx]
keyfile = self._read_key_file(keyfile_path)
except:
print(f'\n WARNING: Specified keyFile does not appear readable: {keyfile_path}\n')
else:
keyfile = os.path.join(self.dir, "keyfile")
print(f'Generating keyfile: {keyfile}')
os.system('openssl rand -base64 753 > "%s"' % keyfile)
if os.name != 'nt':
os.system(f'chmod 600 "{keyfile}"')
# if not all ports are free, complain and suggest alternatives.
all_ports = self.get_tagged(['all'])
ports_avail = self.wait_for(all_ports, 1, 1, to_start=False)
if not all(map(itemgetter(1), ports_avail)):
dir_addon = (' --dir %s' % self.relative_dir
if self.relative_dir != './data' else '')
errmsg = ('\nThe following ports are not available: %s\n\n'
% ', '.join([str(p[0])
for p in ports_avail if not p[1]]))
errmsg += (" * If you want to restart nodes from this "
"environment, use 'mlaunch start%s' instead.\n"
% dir_addon)
errmsg += (" * If the ports are used by a different mlaunch "
"environment, stop those first with 'mlaunch stop "
"--dir <env>'.\n")
errmsg += (" * You can also specify a different port range with "
"an additional '--port <startport>'\n")
raise SystemExit(errmsg)
if self.args['sharded']:
shard_names = self._get_shard_names(self.args)
# start mongod (shard and config) nodes and wait
nodes = self.get_tagged(['mongod', 'down'])
self._start_on_ports(nodes, wait=True, override_auth=True)
# initiate replica sets if init is called for the first time
if first_init:
if self.args['csrs']:
# Initiate config servers in a replicaset
if self.args['verbose']:
print('Initiating config server replica set.')
members = sorted(self.get_tagged(["config"]))
self._initiate_replset(members[0], "configRepl")
for shard in shard_names:
# initiate replica set on first member
if self.args['verbose']:
print('Initiating shard replica set %s.' % shard)
members = sorted(self.get_tagged([shard]))
self._initiate_replset(members[0], shard)
# add mongos
mongos = sorted(self.get_tagged(['mongos', 'down']))
self._start_on_ports(mongos, wait=True, override_auth=True)
if first_init:
# add shards
mongos = sorted(self.get_tagged(['mongos']))
con = self.client('localhost:%i' % mongos[0])
shards_to_add = len(self.shard_connection_str)
nshards = con['config']['shards'].count_documents({})
if nshards < shards_to_add:
if self.args['replicaset']:
print("adding shards. can take up to 30 seconds...")
else:
print("adding shards.")
shard_conns_and_names = list(zip(self.shard_connection_str,
shard_names))
while True:
try:
nshards = con['config']['shards'].count_documents({})
except Exception:
nshards = 0
if nshards >= shards_to_add:
break
for conn_str, name in shard_conns_and_names:
try:
res = con['admin'].command(SON([('addShard',
conn_str),
('name', name)]))
except Exception as e:
if self.args['verbose']:
print('%s will retry in a moment.' % e)
continue
if res['ok']:
if self.args['verbose']:
print("shard %s added successfully" % conn_str)
shard_conns_and_names.remove((conn_str, name))
break
else:
if self.args['verbose']:
print(res + ' - will retry')
time.sleep(1)
elif self.args['single']:
# just start node
nodes = self.get_tagged(['single', 'down'])
self._start_on_ports(nodes, wait=False)
elif self.args['replicaset']:
# start nodes and wait
nodes = sorted(self.get_tagged(['mongod', 'down']))
self._start_on_ports(nodes, wait=True)
# initiate replica set
if first_init:
self._initiate_replset(nodes[0], self.args['name'])
# wait for all nodes to be running
nodes = self.get_tagged(['all'])
self.wait_for(nodes)
# now that nodes are running, add admin user if authentication enabled
if self.args['auth'] and self.args['initial-user'] and first_init:
self.discover()
nodes = []
if self.args['sharded']:
nodes = self.get_tagged(['mongos', 'running'])
elif self.args['single']:
nodes = self.get_tagged(['single', 'running'])
elif self.args['replicaset']:
print("waiting for primary to add a user.")
if self._wait_for_primary():
nodes = self.get_tagged(['primary', 'running'])
else:
raise RuntimeError("failed to find a primary, so adding "
"admin user isn't possible")
if not nodes:
raise RuntimeError("can't connect to server, so adding admin "
"user isn't possible")
roles = []
found_cluster_admin = False
if self.args['auth_role_docs']:
for role_str in self.args['auth_roles']:
role_doc = json.loads(role_str)
roles.append(role_doc)
if role_doc['role'] == "clusterAdmin":
found_cluster_admin = True
else:
roles = self.args['auth_roles']
found_cluster_admin = "clusterAdmin" in roles
if not found_cluster_admin:
warnings.warn("the stop command will not work with auth "
"because the user does not have the "
"clusterAdmin role")
self._add_user(sorted(nodes)[0], name=self.args['username'],
password=self.args['password'],
database=self.args['auth_db'],
roles=roles)
if self.args['sharded']:
for shard in shard_names:
members = sorted(self.get_tagged([shard]))
if self.args['verbose']:
print("adding users to %s" % shard)
self._add_user(members[0],
name=self.args['username'],
password=self.args['password'],
database=self.args['auth_db'],
roles=roles)
if self.args['verbose']:
print("added user %s on %s database" % (self.args['username'],
self.args['auth_db']))
# in sharded env, if --mongos 0, kill the dummy mongos
if self.args['sharded'] and self.args['mongos'] == 0:
port = self.args['port']
print("shutting down temporary mongos on localhost:%s" % port)
username = self.args['username'] if self.args['auth'] else None
password = self.args['password'] if self.args['auth'] else None
authdb = self.args['auth_db'] if self.args['auth'] else None
shutdown_host(port, username, password, authdb)
# discover again, to get up-to-date info
self.discover()
# for sharded authenticated clusters, restart after first_init
# to enable auth
if self.args['sharded'] and self.args['auth'] and first_init:
if self.args['verbose']:
print("restarting cluster to enable auth...")
self.restart()
if self.args['auth'] and self.args['initial-user']:
print('Username "%s", password "%s"'
% (self.args['username'], self.args['password']))
if self.args['verbose']:
print("done.")
# Get the "mongod" version, useful for checking for support or
# non-support of features.
# Normally we expect to get back something like "db version v3.4.0",
# but with release candidates we get abck something like
# "db version v3.4.0-rc2". This code exact the "major.minor.revision"
# part of the string
def getMongoDVersion(self):
binary = "mongod"
if self.args and self.args.get('binarypath'):
binary = os.path.join(self.args['binarypath'], binary)
try:
out = check_mongo_server_output(binary, '--version')
except Exception:
return "0.0"
buf = BytesIO(out)
current_version = buf.readline().strip().decode('utf-8')
# remove prefix "db version v"
if current_version.rindex('v') > 0:
current_version = current_version.rpartition('v')[2]
# remove suffix making assumption that all release candidates
# equal revision 0
try:
if current_version.rindex('-') > 0: # release candidate?
current_version = current_version.rpartition('-')[0]
except Exception:
pass
if self.args and self.args['verbose']:
print("Detected mongod version: %s" % current_version)
return current_version
def client(self, host_and_port, **kwargs):
kwargs.update(self.ssl_pymongo_options)
kwargs.update(self.tls_pymongo_options)
return MongoConnection(host_and_port, **kwargs)
def stop(self):
"""
Sub-command stop.
Parse the list of tags and stop the matching nodes. Each tag has a set
of nodes associated with it, and only the nodes matching all tags
(intersection) will be shut down.
Currently this is an alias for kill()
"""
self.kill()
def start(self):
"""Sub-command start."""
self.discover()
# startup_info only gets loaded from protocol version 2 on,
# check if it's loaded
if not self.startup_info:
# hack to make environment startable with older protocol
# versions < 2: try to start nodes via init if all nodes are down
if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])):
self.args = self.loaded_args
print("upgrading mlaunch environment meta-data.")
return self.init()
else:
raise SystemExit("These nodes were created with an older "
"version of mlaunch (v1.1.1 or below). To "
"upgrade this environment and make use of "
"the start/stop/list commands, stop all "
"nodes manually, then run 'mlaunch start' "
"again. You only have to do this once.")
# if new unknown_args are present, compare them with loaded ones
# (here we can be certain of protocol v2+)
if (self.args['binarypath'] is not None or
(self.unknown_args and
set(self.unknown_args) != set(self.loaded_unknown_args))):
# store current args, use self.args from file (self.loaded_args)
start_args = self.args
self.args = self.loaded_args
self.args['binarypath'] = start_args['binarypath']
# construct new startup strings with updated unknown args.
# They are for this start only and will not be persisted in
# the .mlaunch_startup file
self._construct_cmdlines()
# reset to original args for this start command
self.args = start_args
matches = self._get_ports_from_args(self.args, 'down')
if len(matches) == 0:
raise SystemExit('no nodes started.')
# start config servers first
config_matches = self.get_tagged(['config']).intersection(matches)
self._start_on_ports(config_matches, wait=True)
# start shards next
mongod_matches = (self.get_tagged(['mongod']) -
self.get_tagged(['config']))
mongod_matches = mongod_matches.intersection(matches)
self._start_on_ports(mongod_matches, wait=True)
# now start mongos
mongos_matches = self.get_tagged(['mongos']).intersection(matches)
self._start_on_ports(mongos_matches)
# wait for all matched nodes to be running
self.wait_for(matches)
# refresh discover
self.discover()
def list(self):
"""
Sub-command list.
Takes no further parameters. Will discover the current configuration
and print a table of all the nodes with status and port.
"""
self.discover()
print_docs = []
# mongos
for node in sorted(self.get_tagged(['mongos'])):
doc = OrderedDict([('process', 'mongos'), ('port', node),
('status', 'running'
if self.cluster_running[node] else 'down')])
print_docs.append(doc)
if len(self.get_tagged(['mongos'])) > 0:
print_docs.append(None)
# configs
for node in sorted(self.get_tagged(['config'])):
doc = OrderedDict([('process', 'config server'),
('port', node),
('status', 'running'
if self.cluster_running[node] else 'down')])
print_docs.append(doc)
if len(self.get_tagged(['config'])) > 0:
print_docs.append(None)
# mongod
for shard in self._get_shard_names(self.loaded_args):
tags = []
replicaset = ('replicaset' in self.loaded_args and
self.loaded_args['replicaset'])
padding = ''
if shard:
print_docs.append(shard)
tags.append(shard)
padding = ' '
if replicaset:
# primary
primary = self.get_tagged(tags + ['primary', 'running'])
if len(primary) > 0:
node = list(primary)[0]
print_docs.append(OrderedDict
([('process', padding + 'primary'),
('port', node),
('status', 'running'
if self.cluster_running[node]
else 'down')]))
# secondaries
secondaries = self.get_tagged(tags + ['secondary', 'running'])
for node in sorted(secondaries):
print_docs.append(OrderedDict
([('process', padding + 'secondary'),
('port', node),
('status', 'running'
if self.cluster_running[node]
else 'down')]))
# data-bearing nodes that are down or not in the
# replica set yet
mongods = self.get_tagged(tags + ['mongod'])
arbiters = self.get_tagged(tags + ['arbiter'])
nodes = sorted(mongods - primary - secondaries - arbiters)
for node in nodes:
print_docs.append(OrderedDict
([('process', padding + 'mongod'),
('port', node),
('status', 'running'
if self.cluster_running[node]
else 'down')]))
# arbiters
for node in arbiters:
print_docs.append(OrderedDict
([('process', padding + 'arbiter'),
('port', node),
('status', 'running'
if self.cluster_running[node]
else 'down')]))
else:
nodes = self.get_tagged(tags + ['mongod'])
if len(nodes) > 0:
node = nodes.pop()
print_docs.append(OrderedDict
([('process', padding + 'single'),
('port', node),
('status', 'running'
if self.cluster_running[node]
else 'down')]))
if shard:
print_docs.append(None)
processes = self._get_processes()
startup = self.startup_info
# print tags as well
for doc in [x for x in print_docs if type(x) == OrderedDict]:
try:
doc['pid'] = processes[doc['port']].pid
except KeyError:
doc['pid'] = '-'
if self.args['verbose'] or self.args['tags']:
tags = self.get_tags_of_port(doc['port'])
doc['tags'] = ', '.join(tags)
if self.args['startup']:
try:
# first try running process (startup may be modified
# via start command)
doc['startup command'] = ' '.join(processes[doc['port']]
.cmdline())
except KeyError:
# if not running, use stored startup_info
doc['startup command'] = startup[str(doc['port'])]
if (self.args['json']):
print(json.dumps(print_docs))
else:
print()
print_docs.append(None)
print_table(print_docs)
if self.loaded_args.get('auth'):
print('\tauth: "%s:%s"' % (self.loaded_args.get('username'),
self.loaded_args.get('password')))
def kill(self):
self.discover()
# get matching tags, can only send signals to running nodes
matches = self._get_ports_from_args(self.args, 'running')
processes = self._get_processes()
# convert signal to int, default is SIGTERM for graceful shutdown
sig = self.args.get('signal') or 'SIGTERM'
if os.name == 'nt':
sig = signal.CTRL_BREAK_EVENT
if type(sig) == int:
pass
elif isinstance(sig, str):
try:
sig = int(sig)
except ValueError:
try:
sig = getattr(signal, sig)
except AttributeError:
raise SystemExit("can't parse signal '%s', use integer or "
"signal name (SIGxxx)." % sig)
for port in processes:
# only send signal to matching processes
if port in matches:
p = processes[port]
p.send_signal(sig)
if self.args['verbose']:
print(" %s on port %i, pid=%i" % (p.name, port, p.pid))
print("sent signal %s to %i process%s."
% (sig, len(matches), '' if len(matches) == 1 else 'es'))
# there is a very brief period in which nodes are not reachable
# anymore, but the port is not torn down fully yet and an immediate
# start command would fail. This very short sleep prevents that case,
# and it is practically not noticable by users.
time.sleep(0.1)
# refresh discover
self.discover()
def restart(self):
# get all running processes
processes = self._get_processes()
procs = [processes[k] for k in list(processes.keys())]
# stop nodes via stop command
self.stop()
# wait until all processes terminate
psutil.wait_procs(procs)
# start nodes again via start command
self.start()
# --- below are api helper methods, can be called after creating an
# MLaunchTool() object
def discover(self):
"""
Fetch state for each processes.
Build the self.cluster_tree, self.cluster_tags, self.cluster_running
data structures, needed for sub-commands start, stop, list.
"""
# need self.args['command'] so fail if it's not available
if (not self.args or 'command' not in self.args or not
self.args['command']):
return
# load .mlaunch_startup file for start, stop, list, use current
# parameters for init
if self.args['command'] == 'init':
self.loaded_args = self.args
self.loaded_unknown_args = self.unknown_args
else:
if not self._load_parameters():
startup_file = os.path.join(self.dir, ".mlaunch_startup")
raise SystemExit("Can't read %s, use 'mlaunch init ...' first."
% startup_file)
self.ssl_pymongo_options = self._get_ssl_pymongo_options(self.loaded_args)
self.tls_pymongo_options = self._get_tls_pymongo_options(self.loaded_args)
# reset cluster_* variables
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# get shard names
shard_names = self._get_shard_names(self.loaded_args)
# some shortcut variables
is_sharded = ('sharded' in self.loaded_args and
self.loaded_args['sharded'] is not None)
is_replicaset = ('replicaset' in self.loaded_args and
self.loaded_args['replicaset'])
is_single = 'single' in self.loaded_args and self.loaded_args['single']
has_arbiter = ('arbiter' in self.loaded_args and
self.loaded_args['arbiter'])
# determine number of nodes to inspect
if is_sharded:
num_config = self.loaded_args['config']
# at least one temp. mongos for adding shards, will be
# killed later on
num_mongos = max(1, self.loaded_args['mongos'])
num_shards = len(shard_names)
else:
num_shards = 1
num_config = 0
num_mongos = 0
num_nodes_per_shard = self.loaded_args['nodes'] if is_replicaset else 1
if has_arbiter:
num_nodes_per_shard += 1
num_nodes = num_shards * num_nodes_per_shard + num_config + num_mongos
current_port = self.loaded_args['port']
# tag all nodes with 'all'
self.cluster_tags['all'].extend(list(range(current_port,
current_port + num_nodes)))
# tag all nodes with their port number (as string) and whether
# they are running
for port in range(current_port, current_port + num_nodes):
self.cluster_tags[str(port)].append(port)
running = self.is_running(port)
self.cluster_running[port] = running
self.cluster_tags['running' if running else 'down'].append(port)
# find all mongos
for i in range(num_mongos):
port = i + current_port
# add mongos to cluster tree
self.cluster_tree.setdefault('mongos', []).append(port)
# add mongos to tags
self.cluster_tags['mongos'].append(port)
current_port += num_mongos
# find all mongods (sharded, replicaset or single)
if shard_names is None:
shard_names = [None]
for shard in shard_names:
port_range = list(range(current_port,
current_port + num_nodes_per_shard))
# all of these are mongod nodes
self.cluster_tags['mongod'].extend(port_range)
if shard:
# if this is a shard, store in cluster_tree and tag shard name
self.cluster_tree.setdefault('shard', []).append(port_range)
self.cluster_tags[shard].extend(port_range)
if is_replicaset:
# get replica set states
rs_name = shard if shard else self.loaded_args['name']
try:
mrsc = self.client(
','.join('localhost:%i' % i for i in port_range),
replicaSet=rs_name)
# primary, secondaries, arbiters
# @todo: this is no longer working because MongoClient
# is now non-blocking
if mrsc.primary:
self.cluster_tags['primary'].append(mrsc.primary[1])
self.cluster_tags['secondary'].extend(list(map
(itemgetter(1),
mrsc.secondaries)))
self.cluster_tags['arbiter'].extend(list(map(itemgetter(1),
mrsc.arbiters)))
# secondaries in cluster_tree (order is now important)
self.cluster_tree.setdefault('secondary', [])
for i, secondary in enumerate(sorted(map
(itemgetter(1),
mrsc.secondaries))):
if len(self.cluster_tree['secondary']) <= i:
self.cluster_tree['secondary'].append([])
self.cluster_tree['secondary'][i].append(secondary)
except (ConnectionFailure, ConfigurationError):
pass
elif is_single:
self.cluster_tags['single'].append(current_port)
# increase current_port
current_port += num_nodes_per_shard
# add config server to cluster tree
self.cluster_tree.setdefault('config', []).append(port)
# If not CSRS, set the number of config servers to be 1 or 3
# This is needed, otherwise `mlaunch init --sharded 2 --replicaset
# --config 2` on <3.3.0 will crash
if not self.args.get('csrs') and self.args['command'] == 'init':
if num_config >= 3:
num_config = 3
else:
num_config = 1
for i in range(num_config):
port = i + current_port
try:
mc = self.client('localhost:%i' % port)
mc.admin.command('ping')
running = True
except ConnectionFailure:
# node not reachable
running = False
# add config server to cluster tree
self.cluster_tree.setdefault('config', []).append(port)
# add config server to tags
self.cluster_tags['config'].append(port)
self.cluster_tags['mongod'].append(port)
current_port += num_mongos
def is_running(self, port):
"""Return True if a host on a specific port is running."""
try:
con = self.client('localhost:%s' % port)
con.admin.command('ping')
return True
except (AutoReconnect, ConnectionFailure, OperationFailure):
# Catch OperationFailure to work around SERVER-31916.
return False
def get_tagged(self, tags):
"""
Tag format.
The format for the tags list is tuples for tags: mongos, config, shard,
secondary tags of the form (tag, number), e.g. ('mongos', 2) which
references the second mongos in the list. For all other tags, it is
simply the string, e.g. 'primary'.
"""
# if tags is a simple string, make it a list (note: tuples like
# ('mongos', 2) must be in a surrounding list)
if not hasattr(tags, '__iter__') and type(tags) == str:
tags = [tags]
nodes = set(self.cluster_tags['all'])
for tag in tags:
if re.match(r"\w+ \d{1,2}", tag):
# special case for tuple tags: mongos, config, shard,
# secondary. These can contain a number
tag, number = tag.split()
try:
branch = self.cluster_tree[tag][int(number) - 1]
except (IndexError, KeyError):
continue
if hasattr(branch, '__iter__'):
subset = set(branch)
else:
subset = set([branch])
else:
# otherwise use tags dict to get the subset
subset = set(self.cluster_tags[tag])
nodes = nodes.intersection(subset)
return nodes
def get_tags_of_port(self, port):
"""
Get all tags related to a given port.
This is the inverse of what is stored in self.cluster_tags).
"""
return(sorted([tag for tag in self.cluster_tags
if port in self.cluster_tags[tag]]))
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True):
"""
Spawn threads to ping host using a list of ports.
Returns when all hosts are running (if to_start=True) / shut down (if
to_start=False).
"""
threads = []
queue = Queue.Queue()
for port in ports:
threads.append(threading.Thread(target=wait_for_host, args=(
port, interval, timeout, to_start, queue,
self.ssl_pymongo_options, self.tls_pymongo_options)))
if self.args and 'verbose' in self.args and self.args['verbose']:
print("waiting for nodes %s..."
% ('to start' if to_start else 'to shutdown'))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# get all results back and return tuple
return tuple(queue.get_nowait() for _ in ports)
# --- below here are internal helper methods, do not call externally ---
def _load_parameters(self):
"""
Load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = json.load(open(startup_file, 'rb'))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True
def _store_parameters(self):
"""Store startup params and config in datadir/.mlaunch_startup."""
datapath = self.dir
out_dict = {
'protocol_version': 2,
'mtools_version': __version__,
'parsed_args': self.args,
'unknown_args': self.unknown_args,
'startup_info': self.startup_info
}
if not os.path.exists(datapath):
os.makedirs(datapath)
try:
json.dump(out_dict,
open(os.path.join(datapath,
'.mlaunch_startup'), 'w'), indent=-1)
except Exception as ex:
print("ERROR STORING Parameters:", ex)
def _create_paths(self, basedir, name=None):
"""Create datadir and subdir paths."""
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print('creating directory: %s' % dbpath)
return datapath
def _get_ports_from_args(self, args, extra_tag):
tags = []
if 'tags' not in args:
args['tags'] = []
for tag1, tag2 in zip(args['tags'][:-1], args['tags'][1:]):
if re.match(r'^\d{1,2}$', tag1):
print("warning: ignoring numeric value '%s'" % tag1)
continue
if re.match(r'^\d{1,2}$', tag2):
if tag1 in ['mongos', 'shard', 'secondary', 'config']:
# combine tag with number, separate by string
tags.append('%s %s' % (tag1, tag2))
continue
else:
print("warning: ignoring numeric value '%s' after '%s'"
% (tag2, tag1))
tags.append(tag1)
if len(args['tags']) > 0:
tag = args['tags'][-1]
if not re.match(r'^\d{1,2}$', tag):
tags.append(tag)
tags.append(extra_tag)
matches = self.get_tagged(tags)
return matches
def _filter_valid_arguments(self, arguments, binary="mongod",
config=False):
"""
Return a list of accepted arguments.
Check which arguments in list are accepted by the specified binary
(mongod, mongos). If an argument does not start with '-' but its
preceding argument was accepted, then it is accepted as well. Example
['--slowms', '1000'] both arguments would be accepted for a mongod.
"""
# get the help list of the binary
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
try:
out = check_mongo_server_output(binary, '--help')
except Exception:
raise SystemExit("Fatal error trying get output from `%s`."
"Is the binary in your path?" % binary)
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.decode('utf-8').split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
accepted_arguments.append(argument)
# add undocumented options
accepted_arguments.append('--setParameter')
if binary.endswith('mongod'):
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument
# or special case -vvv for any number of v
argname = arg.split('=', 1)[0]
if (binary.endswith('mongod') and config and
argname in self.UNSUPPORTED_CONFIG_ARGS):
continue
elif argname in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif (binary.endswith('mongod') and
argname in self.UNDOCUMENTED_MONGOD_ARGS):
result.append(arg)
elif self.ignored_arguments.get(binary + argname) is None:
# warn once for each combination of binary and unknown arg
self.ignored_arguments[binary + argname] = True
if not (binary.endswith("mongos") and
arg in self.UNSUPPORTED_MONGOS_ARGS):
print("warning: ignoring unknown argument %s for %s" %
(arg, binary))
elif i > 0 and arguments[i - 1] in result:
# if it doesn't start with a '-', it could be the value of
# the last argument, e.g. `--slowms 1000`
# NB: arguments are always quoted
result.append(f'"{arg}"')
# return valid arguments as joined string
return ' '.join(result)
def _get_ssl_server_args(self):
s = ''
if not self.ssl_server_args:
return s
for parser in self.ssl_args, self.ssl_server_args:
for action in parser._group_actions:
name = action.dest
value = self.args.get(name)
if value:
if value is True:
s += ' --%s' % (name,)
else:
s += ' --%s "%s"' % (name, value)
return s
def _get_ssl_pymongo_options(self, args):
opts = {}
if not self.ssl_server_args:
return opts
for parser in [self.ssl_server_args]:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['ssl'] = True
opts['ssl_cert_reqs'] = ssl.CERT_NONE
for parser in self.ssl_args, self.ssl_client_args:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['ssl'] = True
if name == 'sslClientCertificate':
opts['ssl_certfile'] = value
elif name == 'sslClientPEMKeyFile':
opts['ssl_keyfile'] = value
elif name == 'sslClientPEMKeyPassword':
opts['ssl_pem_passphrase'] = value
elif name == 'sslAllowInvalidCertificates':
opts['ssl_cert_reqs'] = ssl.CERT_OPTIONAL
elif name == 'sslAllowInvalidHostnames':
opts['ssl_match_hostname'] = False
elif name == 'sslCAFile':
opts['ssl_ca_certs'] = value
elif name == 'sslCRLFile':
opts['ssl_crlfile'] = value
return opts
def _get_tls_server_args(self):
s = ''
if not self.tls_server_args:
return s
for parser in self.tls_args, self.tls_server_args:
for action in parser._group_actions:
name = action.dest
value = self.args.get(name)
if value:
if value is True:
s += ' --%s' % (name,)
else:
s += ' --%s "%s"' % (name, value)
return s
def _get_tls_pymongo_options(self, args):
opts = {}
if not self.tls_server_args:
return opts
for parser in [self.tls_server_args]:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['tls'] = True
opts['tls_cert_reqs'] = ssl.CERT_NONE
for parser in self.tls_args, self.tls_client_args:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['tls'] = True
# TLS parameters require PyMongo 3.9.0+
# https://api.mongodb.com/python/3.9.0/changelog.html
if name == 'tlsCertificateKeyFile':
opts['tlsCertificateKeyFile'] = value
elif name == 'tlsCertificateKeyFilePassword':
opts['tlsCertificateKeyFilePassword'] = value
elif name == 'tlsAllowInvalidCertificates':
opts['tlsAllowInvalidCertificates'] = ssl.CERT_OPTIONAL
elif name == 'tlsAllowInvalidHostnames':
opts['tlsAllowInvalidHostnames'] = False
elif name == 'tlsCAFile':
opts['tlsCAFile'] = value
elif name == 'tlsCRLFile':
opts['tlsCRLFile'] = value
return opts
def _get_shard_names(self, args):
"""
Get the shard names based on the self.args['sharded'] parameter.
If it's a number, create shard names of type shard##, where ## is a
2-digit number. Returns a list [None] if no shards are present.
"""
if 'sharded' in args and args['sharded']:
if len(args['sharded']) == 1:
try:
# --sharded was a number, name shards shard01, shard02,
# ... (only works with replica sets)
n_shards = int(args['sharded'][0])
shard_names = ['shard%.2i'
% (i + 1) for i in range(n_shards)]
except ValueError:
# --sharded was a string, use it as name for the one shard
shard_names = args['sharded']
else:
shard_names = args['sharded']
else:
shard_names = [None]
return shard_names
def _get_last_error_log(self, command_str):
logpath = re.search(r'--logpath ([^\s]+)', command_str)
loglines = ''
try:
with open(logpath.group(1), 'rb') as logfile:
for line in logfile:
if not line.startswith('----- BEGIN BACKTRACE -----'):
loglines += line
else:
break
except IOError:
pass
return loglines
def _start_on_ports(self, ports, wait=False, override_auth=False):
if override_auth and self.args['verbose']:
print("creating cluster without auth for setup, "
"will enable auth at the end...")
for port in ports:
command_str = self.startup_info[str(port)]
if override_auth:
# this is to set up sharded clusters without auth first,
# then relaunch with auth
command_str = re.sub(r'--keyFile \S+', '', command_str)
try:
if os.name == 'nt':
subprocess.check_call(command_str, shell=True)
# create sub process on windows doesn't wait for output,
# wait a few seconds for mongod instance up
time.sleep(5)
else:
subprocess.check_output([command_str], shell=True,
stderr=subprocess.STDOUT)
binary = command_str.split()[0]
if '--configsvr' in command_str:
binary = 'config server'
if self.args['verbose']:
print("launching: %s" % command_str)
else:
print("launching: %s on port %s" % (binary, port))
except subprocess.CalledProcessError as e:
print(e.output)
print(self._get_last_error_log(command_str), file=sys.stderr)
raise SystemExit("can't start process, return code %i. "
"tried to launch: %s"
% (e.returncode, command_str))
if wait:
self.wait_for(ports)
def _initiate_replset(self, port, name, maxwait=30):
"""Initiate replica set."""
if not self.args['replicaset'] and name != 'configRepl':
if self.args['verbose']:
print('Skipping replica set initialization for %s' % name)
return
con = self.client('localhost:%i' % port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
return rs_status
except OperationFailure:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':
self.config_docs[name]})
break
except OperationFailure as e:
print(e.message + " - will retry")
time.sleep(1)
if self.args['verbose']:
print("initializing replica set '%s' with configuration: %s"
% (name, self.config_docs[name]))
print("replica set '%s' initialized." % name)
def _add_user(self, port, name, password, database, roles):
con = self.client('localhost:%i' % port, serverSelectionTimeoutMS=10000)
ismaster = con['admin'].command('isMaster')
set_name = ismaster.get('setName')
if set_name:
con.close()
con = self.client('localhost:%i' % port, replicaSet=set_name,
serverSelectionTimeoutMS=10000)
v = ismaster.get('maxWireVersion', 0)
if v >= 7:
# Until drivers have implemented SCRAM-SHA-256, use old mechanism.
opts = {'mechanisms': ['SCRAM-SHA-1']}
else:
opts = {}
if database == "$external":
password = None
try:
con[database].command("createUser", name, pwd=password, roles=roles,
**opts)
except OperationFailure as e:
raise e
def _get_processes(self):
all_ports = self.get_tagged(['running'])
process_dict = {}
for p in psutil.process_iter():
# deal with zombie process errors in OSX
try:
name = p.name()
except psutil.NoSuchProcess:
continue
# skip all but mongod / mongos
if os.name == 'nt':
if name not in ['mongos.exe', 'mongod.exe']:
continue
else:
if name not in ['mongos', 'mongod']:
continue
port = None
for possible_port in self.startup_info:
# compare ports based on command line argument
startup = self.startup_info[possible_port].split()
try:
p_port = p.cmdline()[p.cmdline().index('--port') + 1]
startup_port = startup[startup.index('--port') + 1]
except ValueError:
continue
if str(p_port) == str(startup_port):
port = int(possible_port)
break
# only consider processes belonging to this environment
if port in all_ports:
process_dict[port] = p
return process_dict
def _wait_for_primary(self):
hosts = ([x['host']
for x in self.config_docs[self.args['name']]['members']])
rs_name = self.config_docs[self.args['name']]['_id']
mrsc = self.client(hosts, replicaSet=rs_name,
serverSelectionTimeoutMS=30000)
if mrsc.is_primary:
# update cluster tags now that we have a primary
self.cluster_tags['primary'].append(mrsc.primary[1])
self.cluster_tags['secondary'].extend(list(map(itemgetter(1),
mrsc.secondaries)))
self.cluster_tags['arbiter'].extend(list(map(itemgetter(1),
mrsc.arbiters)))
# secondaries in cluster_tree (order is now important)
self.cluster_tree.setdefault('secondary', [])
for i, secondary in enumerate(sorted(map(itemgetter(1),
mrsc.secondaries))):
if len(self.cluster_tree['secondary']) <= i:
self.cluster_tree['secondary'].append([])
self.cluster_tree['secondary'][i].append(secondary)
return True
return False
# --- below are command line constructor methods, that build the command
# --- line strings to be called
def _construct_cmdlines(self):
"""
Top-level _construct_* method.
From here, it will branch out to the different cases:
_construct_sharded, _construct_replicaset, _construct_single. These can
themselves call each other (for example sharded needs to create the
shards with either replicaset or single node). At the lowest level, the
construct_mongod, _mongos, _config will create the actual command line
strings and store them in self.startup_info.
"""
if self.args['sharded']:
# construct startup string for sharded environments
self._construct_sharded()
elif self.args['single']:
# construct startup string for single node environment
self._construct_single(self.dir, self.args['port'])
elif self.args['replicaset']:
# construct startup strings for a non-sharded replica set
self._construct_replset(self.dir, self.args['port'],
self.args['name'],
list(range(self.args['nodes'])),
self.args['arbiter'])
# discover current setup
self.discover()
def _construct_sharded(self):
"""Construct command line strings for a sharded cluster."""
num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1
shard_names = self._get_shard_names(self.args)
# create shards as stand-alones or replica sets
nextport = self.args['port'] + num_mongos
for shard in shard_names:
if (self.args['single'] and
LooseVersion(self.current_version) >= LooseVersion("3.6.0")):
errmsg = " \n * In MongoDB 3.6 and above a Shard must be " \
"made up of a replica set. Please use --replicaset " \
"option when starting a sharded cluster.*"
raise SystemExit(errmsg)
elif (self.args['single'] and
LooseVersion(self.current_version) < LooseVersion("3.6.0")):
self.shard_connection_str.append(
self._construct_single(
self.dir, nextport, name=shard, extra='--shardsvr'))
nextport += 1
elif self.args['replicaset']:
self.shard_connection_str.append(
self._construct_replset(
self.dir, nextport, shard,
num_nodes=list(range(self.args['nodes'])),
arbiter=self.args['arbiter'], extra='--shardsvr'))
nextport += self.args['nodes']
if self.args['arbiter']:
nextport += 1
# start up config server(s)
config_string = []
# SCCC config servers (MongoDB <3.3.0)
if not self.args['csrs'] and self.args['config'] >= 3:
config_names = ['config1', 'config2', 'config3']
else:
config_names = ['config']
# CSRS config servers (MongoDB >=3.1.0)
if self.args['csrs']:
config_string.append(self._construct_config(self.dir, nextport,
"configRepl", True))
else:
for name in config_names:
self._construct_config(self.dir, nextport, name)
config_string.append('%s:%i' % (self.args['hostname'],
nextport))
nextport += 1
# multiple mongos use <datadir>/mongos/ as subdir for log files
if num_mongos > 1:
mongosdir = os.path.join(self.dir, 'mongos')
if not os.path.exists(mongosdir):
if self.args['verbose']:
print("creating directory: %s" % mongosdir)
os.makedirs(mongosdir)
# start up mongos, but put them to the front of the port range
nextport = self.args['port']
for i in range(num_mongos):
if num_mongos > 1:
mongos_logfile = 'mongos/mongos_%i.log' % nextport
else:
mongos_logfile = 'mongos.log'
self._construct_mongos(os.path.join(self.dir, mongos_logfile),
nextport, ','.join(config_string))
nextport += 1
def _construct_replset(self, basedir, portstart, name, num_nodes,
arbiter, extra=''):
"""
Construct command line strings for a replicaset.
Handles single set or sharded cluster.
"""
self.config_docs[name] = {'_id': name, 'members': []}
# Construct individual replica set nodes
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + i, replset=name, extra=extra)
host = '%s:%i' % (self.args['hostname'], portstart + i)
member_config = {
'_id': len(self.config_docs[name]['members']),
'host': host,
}
# First node gets increased priority.
if i == 0 and self.args['priority']:
member_config['priority'] = 10
if i >= 7:
member_config['votes'] = 0
member_config['priority'] = 0
self.config_docs[name]['members'].append(member_config)
# launch arbiter if True
if arbiter:
datapath = self._create_paths(basedir, '%s/arb' % (name))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + self.args['nodes'],
replset=name)
host = '%s:%i' % (self.args['hostname'],
portstart + self.args['nodes'])
(self.config_docs[name]['members']
.append({'_id': len(self.config_docs[name]['members']),
'host': host,
'arbiterOnly': True}))
return(name + '/' +
','.join([c['host']
for c in self.config_docs[name]['members']]))
def _construct_config(self, basedir, port, name=None, isreplset=False):
"""Construct command line strings for a config server."""
if isreplset:
return self._construct_replset(basedir=basedir, portstart=port,
name=name,
num_nodes=list(range(
self.args['config'])),
arbiter=False, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
port, replset=None, extra='--configsvr')
def _construct_single(self, basedir, port, name=None, extra=''):
"""
Construct command line strings for a single node.
Handles shards and stand-alones.
"""
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'), port,
replset=None, extra=extra)
host = '%s:%i' % (self.args['hostname'], port)
return host
def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''):
"""Construct command line strings for mongod process."""
rs_param = ''
if replset:
rs_param = '--replSet %s' % replset
auth_param = ''
if self.args['auth']:
auth_param = '--auth'
if '--keyFile' not in self.unknown_args:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = f'{auth_param} --keyFile "{key_path}"'
if self.unknown_args:
config = '--configsvr' in extra
extra = self._filter_valid_arguments(self.unknown_args, "mongod",
config=config) + ' ' + extra
# set WiredTiger cache size to 1 GB by default
if ('--wiredTigerCacheSizeGB' not in extra and
self._filter_valid_arguments(['--wiredTigerCacheSizeGB'],
'mongod')):
extra += ' --wiredTigerCacheSizeGB 1 '
# Exit with error if hostname is specified but not bind_ip options
if (self.args['hostname'] != 'localhost'
and LooseVersion(self.current_version) >= LooseVersion("3.6.0")
and (self.args['sharded'] or self.args['replicaset'])
and '--bind_ip' not in extra):
os.removedirs(dbpath)
errmsg = " \n * If hostname is specified, please include "\
"'--bind_ip_all' or '--bind_ip' options when deploying "\
"replica sets or sharded cluster with MongoDB version 3.6.0 "\
"or greater"
raise SystemExit(errmsg)
extra += self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newdbpath = dbpath.replace('\\', '\\\\')
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b \"\" \"%s\" %s --dbpath \"%s\" "
" --logpath \"%s\" --port %i "
"%s %s" % (os.path.join(path, 'mongod.exe'),
rs_param, newdbpath, newlogpath, port,
auth_param, extra))
else:
command_str = ("\"%s\" %s --dbpath \"%s\" --logpath \"%s\" "
"--port %i --fork "
"%s %s" % (os.path.join(path, 'mongod'), rs_param,
dbpath, logpath, port, auth_param,
extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _construct_mongos(self, logpath, port, configdb):
"""Construct command line strings for a mongos process."""
extra = ''
auth_param = ''
if self.args['auth']:
auth_param = '--auth'
if '--keyFile' not in self.unknown_args:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = f'{auth_param} --keyFile "{key_path}"'
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args,
"mongos") + extra
extra += ' ' + self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s "
"%s %s " % (os.path.join(path, 'mongos'),
newlogpath, port, configdb,
auth_param, extra))
else:
command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s "
"--fork" % (os.path.join(path, 'mongos'), logpath,
port, configdb, auth_param, extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _read_key_file(self, keyfile=None):
if not keyfile:
with open(os.path.join(self.dir, 'keyfile'), 'rb') as f:
return ''.join(f.readlines())
else:
with open(keyfile, 'rb') as f:
return ''.join(f.readlines())
def main():
tool = MLaunchTool()
tool.run()
if __name__ == '__main__':
sys.exit(main())
| rueckstiess/mtools | mtools/mlaunch/mlaunch.py | Python | apache-2.0 | 96,965 | 0.000423 |
import logging
from collections import UserDict
from pajbot.models.db import DBManager, Base
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.mysql import TEXT
log = logging.getLogger('pajbot')
class Setting(Base):
__tablename__ = 'tb_settings'
id = Column(Integer, primary_key=True)
setting = Column(String(128))
value = Column(TEXT)
type = Column(String(32))
def __init__(self, setting, value, type):
self.id = None
self.setting = setting
self.value = value
self.type = type
def parse_value(self):
try:
if self.type == 'int':
return int(self.value)
elif self.type == 'string':
return self.value
elif self.type == 'list':
return self.value.split(',')
elif self.type == 'bool':
return int(self.value) == 1
else:
log.error('Invalid setting type: {0}'.format(self.type))
except Exception:
log.exception('Exception caught when loading setting')
return None
class SettingManager(UserDict):
def __init__(self, overrides={}):
UserDict.__init__(self)
self.db_session = DBManager.create_session()
self.default_settings = {
'broadcaster': 'test_broadcaster',
'ban_ascii': True,
'lines_offline': True,
'parse_pyramids': False,
'parse_emote_combo': False,
'check_links': True,
'warnings_enabled': True,
'warnings_total_chances': 2,
'warnings_redis_prefix': '',
'warnings_length': 600,
'warnings_base_timeout': 10,
}
self.default_settings.update(overrides)
def commit(self):
self.db_session.commit()
def reload(self):
self.data = self.default_settings
for setting in self.db_session.query(Setting):
parsed_value = setting.parse_value()
if parsed_value is not None:
self.data[setting.setting] = setting.parse_value()
return self
| gigglearrows/anniesbot | pajbot/models/setting.py | Python | mit | 2,197 | 0 |
import json
import os
import pexpect
import re
import time
from behave import step
import nmci
@step(u'Autocomplete "{cmd}" in bash and execute')
def autocomplete_command(context, cmd):
bash = context.pexpect_spawn("bash")
bash.send(cmd)
bash.send('\t')
time.sleep(1)
bash.send('\r\n')
time.sleep(1)
bash.sendeof()
@step(u'Check RSS writable memory in noted value "{i2}" differs from "{i1}" less than "{dif}"')
def check_rss_rw_dif(context, i2, i1, dif):
# def sum_rss_writable_memory(context, pmap_raw):
# total = 0
# for line in pmap_raw.split("\n"):
# vals = line.split()
# if (len(vals) > 2):
# total += int(vals[2])
# return total
#
# sum2 = int(sum_rss_writable_memory(context, context.noted[i2]))
# sum1 = int(sum_rss_writable_memory(context, context.noted[i1]))
sum2 = int(context.noted[i2])
sum1 = int(context.noted[i1])
assert (sum1 + int(dif) > sum2), \
"rw RSS mem: %d + %s !> %d !" % (sum1, dif, sum2)
@step(u'Check noted value "{i2}" difference from "{i1}" is lower than "{dif}"')
def check_dif_in_values(context, i2, i1, dif):
assert (int(context.noted[i1].strip()) + int(dif)) > int(context.noted[i2].strip()), \
"Noted values: %s + %s !> %s !" % (context.noted[i1].strip(), dif, context.noted[i2].strip())
@step(u'Check noted values "{i1}" and "{i2}" are the same')
def check_same_noted_values(context, i1, i2):
assert context.noted[i1].strip() == context.noted[i2].strip(), \
"Noted values: %s != %s !" % (context.noted[i1].strip(), context.noted[i2].strip())
@step(u'Check noted values "{i1}" and "{i2}" are not the same')
def check_same_noted_values_equals(context, i1, i2):
assert context.noted[i1].strip() != context.noted[i2].strip(), \
"Noted values: %s == %s !" % (context.noted[i1].strip(), context.noted[i2].strip())
@step(u'Check noted output contains "{pattern}"')
def check_noted_output_contains(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is not None, "Noted output does not contain the pattern %s" % pattern
@step(u'Check noted output does not contain "{pattern}"')
def check_noted_output_not_contains(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is None, "Noted output contains the pattern %s" % pattern
@step(u'Execute "{command}"')
def execute_command(context, command):
assert context.command_code(command) == 0
@step(u'Execute "{command}" without waiting for process to finish')
def execute_command_nowait(context, command):
context.pexpect_service(command, shell=True)
@step(u'Execute "{command}" without output redirect')
def execute_command_noout(context, command):
context.run(command, stdout=None, stderr=None)
@step(u'Execute "{command}" for "{number}" times')
def execute_multiple_times(context, command, number):
orig_nm_pid = nmci.lib.nm_pid()
i = 0
while i < int(number):
context.command_code(command)
curr_nm_pid = nmci.lib.nm_pid()
assert curr_nm_pid == orig_nm_pid, 'NM crashed as original pid was %s but now is %s' %(orig_nm_pid, curr_nm_pid)
i += 1
@step(u'"{command}" fails')
def wait_for_process(context, command):
assert context.command_code(command) != 0
time.sleep(0.1)
@step(u'Restore hostname from the noted value')
def restore_hostname(context):
context.command_code('hostname %s' % context.noted['noted-value'])
time.sleep(0.5)
@step(u'Hostname is visible in log "{log}"')
@step(u'Hostname is visible in log "{log}" in "{seconds}" seconds')
def hostname_visible(context, log, seconds=1):
seconds = int(seconds)
orig_seconds = seconds
cmd = "grep $(hostname -s) '%s'" %log
while seconds > 0:
if context.command_code(cmd) == 0:
return True
seconds = seconds - 1
time.sleep(1)
raise Exception('Hostname not visible in log in %d seconds' % (orig_seconds))
@step(u'Hostname is not visible in log "{log}"')
@step(u'Hostname is not visible in log "{log}" for full "{seconds}" seconds')
def hostname_not_visible(context, log, seconds=1):
seconds = int(seconds)
orig_seconds = seconds
cmd = "grep $(hostname -s) '%s'" %log
while seconds > 0:
if context.command_code(cmd) != 0:
return True
seconds = seconds - 1
time.sleep(1)
raise Exception('Hostname visible in log after %d seconds' % (orig_seconds - seconds))
@step(u'Nameserver "{server}" is set')
@step(u'Nameserver "{server}" is set in "{seconds}" seconds')
@step(u'Domain "{server}" is set')
@step(u'Domain "{server}" is set in "{seconds}" seconds')
def get_nameserver_or_domain(context, server, seconds=1):
if context.command_code('systemctl is-active systemd-resolved.service -q') == 0:
# We have systemd-resolvd running
cmd = 'resolvectl dns; resolvectl domain'
else:
cmd = 'cat /etc/resolv.conf'
return check_pattern_command(context, cmd, server, seconds)
@step(u'Nameserver "{server}" is not set')
@step(u'Nameserver "{server}" is not set in "{seconds}" seconds')
@step(u'Domain "{server}" is not set')
@step(u'Domain "{server}" is not set in "{seconds}" seconds')
def get_nameserver_or_domain_not(context, server, seconds=1):
if context.command_code('systemctl is-active systemd-resolved.service -q') == 0:
# We have systemd-resolvd running
cmd = 'systemd-resolve --status |grep -A 100 Link'
else:
cmd = 'cat /etc/resolv.conf'
return check_pattern_command(context, cmd, server, seconds, check_type="not")
@step(u'Noted value contains "{pattern}"')
def note_print_property_b(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is not None, \
"Noted value '%s' does not match the pattern '%s'!" % (context.noted['noted-value'], pattern)
@step(u'Note the output of "{command}" as value "{index}"')
def note_the_output_as(context, command, index):
if not hasattr(context, 'noted'):
context.noted = {}
# use nmci as embed might be big in general
context.noted[index] = nmci.command_output_err(command)[0].strip()
@step(u'Note the output of "{command}"')
def note_the_output_of(context, command):
if not hasattr(context, 'noted'):
context.noted = {}
# use nmci as embed might be big in general
context.noted['noted-value'] = nmci.command_output(command).strip()
def json_compare(pattern, out):
pattern_type = type(pattern)
if pattern_type is dict:
for x in pattern:
if x in out:
r = json_compare(pattern[x], out[x])
if r != 0:
return r
else:
return 1
return 0
elif pattern_type is list:
assert False, "TODO: compare lists soomehow"
else:
if out == pattern:
return 0
else:
return 1
def check_pattern_command(context, command, pattern, seconds, check_type="default", exact_check=False, timeout=180, maxread=100000, interval=1, json_check=False):
seconds = int(seconds)
orig_seconds = seconds
while seconds > 0:
proc = context.pexpect_spawn(command, shell=True, timeout=timeout, maxread=maxread, codec_errors='ignore')
if exact_check:
ret = proc.expect_exact([pattern, pexpect.EOF])
elif json_check:
proc.expect([pexpect.EOF])
out = proc.before
json_out = json.loads(out)
json_pattern = json.loads(pattern)
ret = json_compare(json_pattern, json_out)
else:
ret = proc.expect([pattern, pexpect.EOF])
if check_type == "default":
if ret == 0:
return True
elif check_type == "not":
if ret != 0:
return True
elif check_type == "full":
assert ret == 0, 'Pattern "%s" disappeared after %d seconds, ouput was:\n%s' % (pattern, orig_seconds-seconds, proc.before)
elif check_type == "not_full":
assert ret != 0, 'Pattern "%s" appeared after %d seconds, output was:\n%s%s' % (pattern, orig_seconds-seconds, proc.before, proc.after)
seconds = seconds - 1
time.sleep(interval)
if check_type == "default":
assert False, 'Did not see the pattern "%s" in %d seconds, output was:\n%s' % (pattern, orig_seconds, proc.before)
elif check_type == "not":
assert False, 'Did still see the pattern "%s" in %d seconds, output was:\n%s%s' % (pattern, orig_seconds, proc.before, proc.after)
@step(u'Noted value is visible with command "{command}"')
@step(u'Noted value is visible with command "{command}" in "{seconds}" seconds')
def noted_visible_command(context, command, seconds=2):
check_pattern_command(context, command, context.noted['noted-value'], seconds, exact_check=True)
@step(u'Noted value is not visible with command "{command}"')
@step(u'Noted value is not visible with command "{command}" in "{seconds}" seconds')
def noted_not_visible_command(context, command, seconds=2):
return check_pattern_command(context, command, context.noted['noted-value'], seconds, check_type="not", exact_check=True)
@step(u'Noted value "{index}" is visible with command "{command}"')
@step(u'Noted value "{index}" is visible with command "{command}" in "{seconds}" seconds')
def noted_index_visible_command(context, command, index, seconds=2):
return check_pattern_command(context, command, context.noted[index], seconds, exact_check=True)
@step(u'Noted value "{index}" is not visible with command "{command}"')
@step(u'Noted value "{index}" is not visible with command "{command}" in "{seconds}" seconds')
def noted_index_not_visible_command(context, command, index, seconds=2):
return check_pattern_command(context, command, context.noted[index], seconds, check_type="not", exact_check=True)
@step(u'"{pattern}" is visible with command "{command}"')
@step(u'"{pattern}" is visible with command "{command}" in "{seconds}" seconds')
def pattern_visible_command(context, command, pattern, seconds=2):
return check_pattern_command(context, command, pattern, seconds)
@step(u'"{pattern}" is not visible with command "{command}"')
@step(u'"{pattern}" is not visible with command "{command}" in "{seconds}" seconds')
def pattern_not_visible_command(context, command, pattern, seconds=2):
return check_pattern_command(context, command, pattern, seconds, check_type="not")
@step(u'String "{string}" is visible with command "{command}"')
@step(u'String "{string}" is visible with command "{command}" in "{seconds}" seconds')
def string_visible_command(context, command, string, seconds=2):
return check_pattern_command(context, command, string, seconds, exact_check=True)
@step(u'String "{string}" is not visible with command "{command}"')
@step(u'String "{string}" is not visible with command "{command}" in "{seconds}" seconds')
def string_not_visible_command(context, command, string, seconds=2):
return check_pattern_command(context, command, string, seconds, check_type="not", exact_check=True)
@step(u'JSON "{string}" is visible with command "{command}"')
@step(u'JSON "{string}" is visible with command "{command}" in "{seconds}" seconds')
def json_visible_command(context, command, string, seconds=2):
return check_pattern_command(context, command, string, seconds, json_check=True)
@step(u'JSON "{string}" is not visible with command "{command}"')
@step(u'JSON "{string}" is not visible with command "{command}" in "{seconds}" seconds')
def json_not_visible_command(context, command, string, seconds=2):
return check_pattern_command(context, command, string, seconds, check_type="not", json_check=True)
@step(u'"{pattern}" is visible with command "{command}" for full "{seconds}" seconds')
def check_pattern_visible_with_command_fortime(context, pattern, command, seconds):
return check_pattern_command(context, command, pattern, seconds, check_type="full")
@step(u'"{pattern}" is not visible with command "{command}" for full "{seconds}" seconds')
def check_pattern_not_visible_with_command_fortime(context, pattern, command, seconds):
return check_pattern_command(context, command, pattern, seconds, check_type="not_full")
@step(u'"{pattern}" is visible with tab after "{command}"')
def check_pattern_visible_with_tab_after_command(context, pattern, command):
os.system('echo "set page-completions off" > ~/.inputrc')
exp = context.pexpect_spawn('/bin/bash')
exp.send(command)
exp.sendcontrol('i')
exp.sendcontrol('i')
exp.sendcontrol('i')
exp.sendeof()
assert exp.expect([pattern, pexpect.EOF]) == 0, 'pattern %s is not visible with "%s"' % (pattern, command)
@step(u'"{pattern}" is not visible with tab after "{command}"')
def check_pattern_not_visible_with_tab_after_command(context, pattern, command):
context.run('echo "set page-completions off" > ~/.inputrc')
exp = context.pexpect_spawn('/bin/bash')
exp.send(command)
exp.sendcontrol('i')
exp.sendcontrol('i')
exp.sendcontrol('i')
exp.sendeof()
assert exp.expect([pattern, pexpect.EOF, pexpect.TIMEOUT]) != 0, 'pattern %s is visible with "%s"' % (pattern, command)
@step(u'Run child "{command}"')
def run_child_process(context, command):
context.children = getattr(context, "children", [])
child = context.pexpect_service(command, shell=True)
context.children.append(child)
@step(u'Run child "{command}" without shell')
def run_child_process_no_shell(context, command):
context.children = getattr(context, "children", [])
child = context.pexpect_service(command)
context.children.append(child)
@step(u'Kill children')
def kill_children(context):
for child in getattr(context, "children", []):
child.kill(9)
@step(u'Start following journal')
def start_tailing_journal(context):
context.journal = context.pexpect_service('sudo journalctl --follow -o cat', timeout=180)
time.sleep(0.3)
@step(u'Look for "{content}" in journal')
def find_tailing_journal(context, content):
if context.journal.expect([content, pexpect.TIMEOUT, pexpect.EOF]) == 1:
raise Exception('Did not see the "%s" in journal output before timeout (180s)' % content)
@step(u'Wait for at least "{secs}" seconds')
def wait_for_x_seconds(context, secs):
time.sleep(int(secs))
assert True
@step(u'Look for "{content}" in tailed file')
def find_tailing(context, content):
assert context.tail.expect([content, pexpect.TIMEOUT, pexpect.EOF]) != 1, \
'Did not see the "%s" in tail output before timeout (180s)' % content
@step(u'Start tailing file "{archivo}"')
def start_tailing(context, archivo):
context.tail = context.pexpect_service('sudo tail -f %s' % archivo, timeout=180)
time.sleep(0.3)
@step('Ping "{domain}"')
@step('Ping "{domain}" "{number}" times')
def ping_domain(context, domain, number=2):
if number != 2:
rc = context.command_code("ping -q -4 -c %s %s" % (number, domain))
else:
rc = context.command_code("curl -s %s" % (domain))
assert rc == 0
@step(u'Ping "{domain}" from "{device}" device')
def ping_domain_from_device(context, domain, device):
rc = context.command_code("ping -4 -c 2 -I %s %s" % (device, domain))
assert rc == 0
@step(u'Ping6 "{domain}"')
def ping6_domain(context, domain):
rc = context.command_code("ping6 -c 2 %s" % domain)
assert rc == 0
@step(u'Unable to ping "{domain}"')
def cannot_ping_domain(context, domain):
rc = context.command_code('curl %s' % domain)
assert rc != 0
@step(u'Unable to ping "{domain}" from "{device}" device')
def cannot_ping_domain_from_device(context, domain, device):
rc = context.command_code('ping -c 2 -I %s %s ' % (device, domain))
assert rc != 0
@step(u'Unable to ping6 "{domain}"')
def cannot_ping6_domain(context, domain):
rc = context.command_code('ping6 -c 2 %s' % domain)
assert rc != 0
@step(u'Metered status is "{value}"')
def check_metered_status(context, value):
cmd = 'dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager \
/org/freedesktop/NetworkManager \
org.freedesktop.DBus.Properties.Get \
string:"org.freedesktop.NetworkManager" \
string:"Metered" |grep variant| awk \'{print $3}\''
ret = context.command_output(cmd).strip()
assert ret == value, "Metered value is %s but should be %s" % (ret, value)
@step(u'Network trafic "{state}" dropped')
def network_dropped(context, state):
if state == "is":
assert context.command_code('ping -c 1 -W 1 boston.com') != 0
if state == "is not":
assert context.command_code('ping -c 1 -W 1 boston.com') == 0
@step(u'Network trafic "{state}" dropped on "{device}"')
def network_dropped_two(context, state, device):
if state == "is":
assert context.command_code('ping -c 2 -I %s -W 1 8.8.8.8' % device) != 0
if state == "is not":
assert context.command_code('ping -c 2 -I %s -W 1 8.8.8.8' % device) == 0
@step(u'Send lifetime scapy packet')
@step(u'Send lifetime scapy packet with "{hlim}"')
@step(u'Send lifetime scapy packet from "{srcaddr}"')
@step(u'Send lifetime scapy packet to dst "{prefix}"')
@step(u'Send lifetime scapy packet with lifetimes "{valid}" "{pref}"')
def send_packet(context, srcaddr=None, hlim=None, valid=3600, pref=1800, prefix="fd00:8086:1337::"):
from scapy.all import get_if_hwaddr
from scapy.all import sendp, Ether, IPv6
from scapy.all import ICMPv6ND_RA
from scapy.all import ICMPv6NDOptPrefixInfo
in_if = "test10"
out_if = "test11"
p = Ether(dst=get_if_hwaddr(out_if), src=get_if_hwaddr(in_if))
if srcaddr or hlim:
if hlim:
p /= IPv6(dst="ff02::1", hlim=int(hlim))
else:
p /= IPv6(dst="ff02::1", src=srcaddr)
else:
p /= IPv6(dst="ff02::1")
valid, pref = int(valid), int(pref)
p /= ICMPv6ND_RA()
p /= ICMPv6NDOptPrefixInfo(prefix=prefix, prefixlen=64, validlifetime=valid, preferredlifetime=pref)
sendp(p, iface=in_if)
sendp(p, iface=in_if)
time.sleep(3)
@step(u'Set logging for "{domain}" to "{level}"')
def set_logging(context, domain, level):
if level == " ":
cli = context.pexpect_spawn('nmcli g l domains %s' % (domain), timeout=60)
else:
cli = context.pexpect_spawn('nmcli g l level %s domains %s' % (level, domain), timeout=60)
r = cli.expect(['Error', 'Timeout', pexpect.TIMEOUT, pexpect.EOF])
if r != 3:
assert False, 'Something bad happened when changing log level'
@step(u'Note NM log')
def note_NM_log(context):
if not hasattr(context, 'noted'):
context.noted = {}
# do not use context, as log might be too big to embed
context.noted['noted-value'] = nmci.command_output("sudo journalctl -all -u NetworkManager --no-pager -o cat %s" % context.log_cursor)
@step(u'Check coredump is not found in "{seconds}" seconds')
def check_no_coredump(context, seconds):
for i in range(int(seconds)):
nmci.lib.check_coredump(context)
if context.crash_embeded:
assert False, "Coredump found"
time.sleep(1)
| NetworkManager/NetworkManager-ci | features/steps/commands.py | Python | gpl-3.0 | 19,422 | 0.003398 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.port.mac import MacPort
from webkitpy.port import port_testcase
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2, MockProcess, ScriptError
from webkitpy.common.system.systemhost_mock import MockSystemHost
class MacTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'lion'
port_name = 'mac-lion'
port_maker = MacPort
def assert_skipped_file_search_paths(self, port_name, expected_paths, use_webkit2=False):
port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
self.assertEqual(port._skipped_file_search_paths(), expected_paths)
def test_default_timeout_ms(self):
super(MacTest, self).test_default_timeout_ms()
self.assertEqual(self.make_port(options=MockOptions(guard_malloc=True)).default_timeout_ms(), 350000)
example_skipped_file = u"""
# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events
fast/events/mouseout-on-window.html
# <rdar://problem/5643675> window.scrollTo scrolls a window with no scrollbars
fast/events/attempt-scroll-with-no-scrollbars.html
# see bug <rdar://problem/5646437> REGRESSION (r28015): svg/batik/text/smallFonts fails
svg/batik/text/smallFonts.svg
# Java tests don't work on WK2
java/
"""
example_skipped_tests = [
"fast/events/mouseout-on-window.html",
"fast/events/attempt-scroll-with-no-scrollbars.html",
"svg/batik/text/smallFonts.svg",
"java",
]
def test_tests_from_skipped_file_contents(self):
port = self.make_port()
self.assertEqual(port._tests_from_skipped_file_contents(self.example_skipped_file), self.example_skipped_tests)
def assert_name(self, port_name, os_version_string, expected):
host = MockSystemHost(os_name='mac', os_version=os_version_string)
port = self.make_port(host=host, port_name=port_name)
self.assertEqual(expected, port.name())
def test_tests_for_other_platforms(self):
platforms = ['mac', 'chromium-linux', 'mac-snowleopard']
port = self.make_port(port_name='mac-snowleopard')
platform_dir_paths = map(port._webkit_baseline_path, platforms)
# Replace our empty mock file system with one which has our expected platform directories.
port._filesystem = MockFileSystem(dirs=platform_dir_paths)
dirs_to_skip = port._tests_for_other_platforms()
self.assertIn('platform/chromium-linux', dirs_to_skip)
self.assertNotIn('platform/mac', dirs_to_skip)
self.assertNotIn('platform/mac-snowleopard', dirs_to_skip)
def test_version(self):
port = self.make_port()
self.assertTrue(port.version())
def test_versions(self):
# Note: these tests don't need to be exhaustive as long as we get path coverage.
self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'lion', 'mac-snowleopard')
self.assert_name('mac', 'lion', 'mac-lion')
self.assert_name('mac-lion', 'lion', 'mac-lion')
self.assert_name('mac', 'mountainlion', 'mac-mountainlion')
self.assert_name('mac-mountainlion', 'lion', 'mac-mountainlion')
self.assert_name('mac', 'mavericks', 'mac-mavericks')
self.assert_name('mac-mavericks', 'mountainlion', 'mac-mavericks')
self.assert_name('mac', 'future', 'mac-future')
self.assert_name('mac-future', 'future', 'mac-future')
self.assertRaises(AssertionError, self.assert_name, 'mac-tiger', 'leopard', 'mac-leopard')
def test_setup_environ_for_server(self):
port = self.make_port(options=MockOptions(leaks=True, guard_malloc=True))
env = port.setup_environ_for_server(port.driver_name())
self.assertEqual(env['MallocStackLogging'], '1')
self.assertEqual(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib:/mock-build/libWebCoreTestShim.dylib')
def _assert_search_path(self, port_name, baseline_path, search_paths, use_webkit2=False):
port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
absolute_search_paths = map(port._webkit_baseline_path, search_paths)
self.assertEqual(port.baseline_path(), port._webkit_baseline_path(baseline_path))
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
def test_baseline_search_path(self):
# Note that we don't need total coverage here, just path coverage, since this is all data driven.
self._assert_search_path('mac-snowleopard', 'mac-snowleopard', ['mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac'])
self._assert_search_path('mac-lion', 'mac-lion', ['mac-lion', 'mac-mountainlion', 'mac'])
self._assert_search_path('mac-mountainlion', 'mac-mountainlion', ['mac-mountainlion', 'mac'])
self._assert_search_path('mac-mavericks', 'mac', ['mac'])
self._assert_search_path('mac-future', 'mac', ['mac'])
self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-lion', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-mountainlion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-mavericks', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
def test_show_results_html_file(self):
port = self.make_port()
# Delay setting a should_log executive to avoid logging from MacPort.__init__.
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK popen: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_default_child_processes(self):
port = self.make_port(port_name='mac-lion')
# MockPlatformInfo only has 2 mock cores. The important part is that 2 > 1.
self.assertEqual(port.default_child_processes(), 2)
bytes_for_drt = 200 * 1024 * 1024
port.host.platform.total_bytes_memory = lambda: bytes_for_drt
expected_logs = "This machine could support 2 child processes, but only has enough memory for 1.\n"
child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
# Make sure that we always use one process, even if we don't have the memory for it.
port.host.platform.total_bytes_memory = lambda: bytes_for_drt - 1
expected_logs = "This machine could support 2 child processes, but only has enough memory for 1.\n"
child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
# SnowLeopard has a CFNetwork bug which causes crashes if we execute more than one copy of DRT at once.
port = self.make_port(port_name='mac-snowleopard')
expected_logs = "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.\n"
child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
def test_get_crash_log(self):
# Mac crash logs are tested elsewhere, so here we just make sure we don't crash.
def fake_time_cb():
times = [0, 20, 40]
return lambda: times.pop(0)
port = self.make_port(port_name='mac-snowleopard')
port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
def test_helper_starts(self):
host = MockSystemHost(MockExecutive())
port = self.make_port(host)
oc = OutputCapture()
oc.capture_output()
host.executive._proc = MockProcess('ready\n')
port.start_helper()
port.stop_helper()
oc.restore_output()
# make sure trying to stop the helper twice is safe.
port.stop_helper()
def test_helper_fails_to_start(self):
host = MockSystemHost(MockExecutive())
port = self.make_port(host)
oc = OutputCapture()
oc.capture_output()
port.start_helper()
port.stop_helper()
oc.restore_output()
def test_helper_fails_to_stop(self):
host = MockSystemHost(MockExecutive())
host.executive._proc = MockProcess()
def bad_waiter():
raise IOError('failed to wait')
host.executive._proc.wait = bad_waiter
port = self.make_port(host)
oc = OutputCapture()
oc.capture_output()
port.start_helper()
port.stop_helper()
oc.restore_output()
def test_sample_process(self):
def logging_run_command(args):
print args
port = self.make_port()
port._executive = MockExecutive2(run_command_fn=logging_run_command)
expected_stdout = "['/usr/bin/sample', 42, 10, 10, '-file', '/mock-build/layout-test-results/test-42-sample.txt']\n"
OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42], expected_stdout=expected_stdout)
def test_sample_process_throws_exception(self):
def throwing_run_command(args):
raise ScriptError("MOCK script error")
port = self.make_port()
port._executive = MockExecutive2(run_command_fn=throwing_run_command)
OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42])
def test_32bit(self):
port = self.make_port(options=MockOptions(architecture='x86'))
def run_script(script, args=None, env=None):
self.args = args
port._run_script = run_script
self.assertEqual(port.architecture(), 'x86')
port._build_driver()
self.assertEqual(self.args, ['ARCHS=i386'])
def test_64bit(self):
# Apple Mac port is 64-bit by default
port = self.make_port()
self.assertEqual(port.architecture(), 'x86_64')
def run_script(script, args=None, env=None):
self.args = args
port._run_script = run_script
port._build_driver()
self.assertEqual(self.args, [])
| sloanyang/aquantic | Tools/Scripts/webkitpy/port/mac_unittest.py | Python | gpl-2.0 | 12,566 | 0.003342 |
#!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: exos_facts
version_added: "2.7"
author:
- "Lance Richardson (@hlrichardson)"
- "Ujwal Koamrla (@ujwalkomarla)"
short_description: Collect facts from devices running Extreme EXOS
description:
- Collects a base set of device facts from a remote device that
is running EXOS. This module prepends all of the base network
fact keys with C(ansible_net_<fact>). The facts module will
always collect a base set of facts from the device and can
enable or disable collection of additional facts.
notes:
- Tested against EXOS 22.5.1.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
type: list
default: ['!config']
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces, vlans etc.
Can specify a list of values to include a larger subset.
Values can also be used with an initial C(M(!)) to specify that
a specific subset should not be collected.
Valid subsets are 'all', 'lldp_global'.
type: list
version_added: "2.9"
"""
EXAMPLES = """
- name: Gather all legacy facts
exos_facts:
gather_subset: all
- name: Gather only the config and default facts
exos_facts:
gather_subset: config
- name: do not gather hardware facts
exos_facts:
gather_subset: "!hardware"
- name: Gather legacy and resource facts
exos_facts:
gather_subset: all
gather_network_resources: all
- name: Gather only the lldp global resource facts and no legacy facts
exos_facts:
gather_subset:
- '!all'
- '!min'
gather_network_resource:
- lldp_global
- name: Gather lldp global resource and minimal legacy facts
exos_facts:
gather_subset: min
gather_network_resource: lldp_global
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
ansible_net_gather_network_resources:
description: The list of fact for network resource subsets collected from the device
returned: when the resource is configured
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All Primary IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.exos.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.exos.facts.facts import Facts
def main():
"""Main entry point for AnsibleModule
"""
argument_spec = FactsArgs.argument_spec
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = ['default value for `gather_subset` '
'will be changed to `min` from `!config` v2.11 onwards']
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/network/exos/exos_facts.py | Python | mit | 5,863 | 0.000682 |
from __future__ import print_function
import sys
import numpy
from arrayaccess import gene_class_ArrayAccess
from test_arrayaccess import LIST_CDT, LIST_NUM
def main(iter_num, list_num, calloc):
clibname = 'arrayaccess.so'
ArrayAccess = gene_class_ArrayAccess(clibname, len(list_num), LIST_CDT)
ArrayAccess._calloc_ = calloc
if iter_num <= 10:
printnow = range(iter_num)
else:
printnow = numpy.linspace(
0, iter_num, num=10, endpoint=False).astype(int)
num_dict = dict(zip(ArrayAccess.num_names, list_num)) # {num_i: 6, ...}
assert ArrayAccess._calloc_ is bool(calloc)
print('[*/%d]:' % iter_num, end=' ')
sys.stdout.flush()
for i in range(iter_num):
ArrayAccess(**num_dict)
if i in printnow:
print(i, end=' ')
sys.stdout.flush()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--nums", default='100, 10, 10, 10, 10',
help="comma separated numbers")
parser.add_option("-t", "--time", default=1000, type=int)
parser.add_option("-c", "--calloc", default=1, type=int)
(opts, args) = parser.parse_args()
if opts.nums:
list_num = eval('[%s]' % opts.nums)
if len(list_num) != len(LIST_NUM):
raise RuntimeError ('%s numbers are expected. %s given.'
% (len(LIST_NUM), len(list_num)))
else:
list_num = LIST_NUM
main(opts.time, list_num, bool(opts.calloc))
| tkf/railgun | tests/check_memory_leak.py | Python | mit | 1,554 | 0.000644 |
from __init__ import redis_db
from werkzeug.security import generate_password_hash, check_password_hash
from os import urandom
from base64 import b64encode
class User(object):
def __init__(self):
self.username = "" # required
self.password_hash = "" # required
self.phone_number = "" # required
self.emergency_contact = "" # not required
self.secret_key = b64encode(urandom(64)).decode("utf-8")
self.contacts = set() # can be empty
def set_password(self, password):
self.password_hash = generate_password_hash(password, method="pbkdf2:sha256", salt_length=32)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def write_to_db(self):
user_dict = {"password_hash": self.password_hash, "phone_number": self.phone_number,
"secret_key": self.secret_key, "emergency_contact": self.emergency_contact}
redis_db.hmset(self.username, user_dict)
redis_db.delete(self.username + ":contacts")
if len(self.contacts):
redis_db.sadd(self.username + ":contacts", *self.contacts)
def deauthenticate(self):
self.secret_key = b64encode(urandom(64)).decode("utf-8")
@classmethod
def get_from_db(cls, username):
user_dict = redis_db.hmget(username, ["password_hash", "phone_number", "secret_key", "emergency_contact"])
fetched_user = User()
fetched_user.username = username
fetched_user.password_hash = user_dict[0]
fetched_user.phone_number = user_dict[1]
fetched_user.secret_key = user_dict[2]
fetched_user.emergency_contact = user_dict[3]
if not fetched_user.password_hash or not fetched_user.phone_number or not fetched_user.secret_key:
return None
else:
fetched_user.contacts = redis_db.smembers(fetched_user.username + ":contacts")
return fetched_user
| BrambleLLC/HackAZ-2016 | server/webapp/models.py | Python | mit | 1,957 | 0.006643 |
"""
Copyright (C) 2018 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
from steam import SteamID
from bs4 import BeautifulSoup
from . import common_network
# https://developer.valvesoftware.com/wiki/Steam_Web_API
class CommonNetworkSteam:
"""
Class for interfacing with Valve Steam
"""
def __init__(self, access_token):
pass
def com_net_steam_id_from_user(user_name):
return SteamID.from_url('https://steamcommunity.com/id/%s', (user_name,))
def com_net_steam_game_server_data_download():
"""
Server ID SteamCMD > Steam Client > Anonymous Login > Notes
"""
steam_servers = []
data = BeautifulSoup(common_network.mk_network_fetch_from_url(
"https://developer.valvesoftware.com/wiki/Dedicated_Servers_List", None),
features="html.parser").find_all('table')[1]
rows = data.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
steam_servers.append([ele for ele in cols if ele])
print(steam_servers, flush=True)
return steam_servers
| MediaKraken/MediaKraken_Deployment | source/common/common_network_steam.py | Python | gpl-3.0 | 1,746 | 0.000573 |
from flask import Blueprint
from zou.app.utils.api import configure_api_from_blueprint
from .resources import (
AllProjectsResource,
OpenProjectsResource,
ProductionTeamResource,
ProductionTeamRemoveResource,
ProductionAssetTypeResource,
ProductionAssetTypeRemoveResource,
ProductionTaskTypeResource,
ProductionTaskTypeRemoveResource,
ProductionTaskTypesResource,
ProductionTaskStatusResource,
ProductionTaskStatusRemoveResource,
ProductionMetadataDescriptorResource,
ProductionMetadataDescriptorsResource,
ProductionMilestonesResource,
ProductionScheduleItemsResource,
ProductionTaskTypeScheduleItemsResource,
ProductionAssetTypesScheduleItemsResource,
ProductionEpisodesScheduleItemsResource,
ProductionSequencesScheduleItemsResource,
ProductionTimeSpentsResource,
)
routes = [
("/data/projects/open", OpenProjectsResource),
("/data/projects/all", AllProjectsResource),
("/data/projects/<project_id>/team", ProductionTeamResource),
(
"/data/projects/<project_id>/task-types",
ProductionTaskTypesResource,
),
(
"/data/projects/<project_id>/team/<person_id>",
ProductionTeamRemoveResource,
),
(
"/data/projects/<project_id>/settings/asset-types",
ProductionAssetTypeResource,
),
(
"/data/projects/<project_id>/settings/asset-types/<asset_type_id>",
ProductionAssetTypeRemoveResource,
),
(
"/data/projects/<project_id>/settings/task-types",
ProductionTaskTypeResource,
),
(
"/data/projects/<project_id>/settings/task-types/<task_type_id>",
ProductionTaskTypeRemoveResource,
),
(
"/data/projects/<project_id>/settings/task-status",
ProductionTaskStatusResource,
),
(
"/data/projects/<project_id>/settings/task-status/<task_status_id>",
ProductionTaskStatusRemoveResource,
),
(
"/data/projects/<project_id>/metadata-descriptors",
ProductionMetadataDescriptorsResource,
),
(
"/data/projects/<project_id>/metadata-descriptors/<descriptor_id>",
ProductionMetadataDescriptorResource,
),
("/data/projects/<project_id>/milestones", ProductionMilestonesResource),
(
"/data/projects/<project_id>/schedule-items",
ProductionScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/task-types",
ProductionTaskTypeScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/asset-types",
ProductionAssetTypesScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/episodes",
ProductionEpisodesScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/sequences",
ProductionSequencesScheduleItemsResource,
),
("/data/projects/<project_id>/time-spents", ProductionTimeSpentsResource),
]
blueprint = Blueprint("projects", "projects")
api = configure_api_from_blueprint(blueprint, routes)
| cgwire/zou | zou/app/blueprints/projects/__init__.py | Python | agpl-3.0 | 3,136 | 0.000319 |
from ...api import generate_media, prepare_media
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Combines and compresses your media files and saves them in _generated_media.'
requires_model_validation = False
def handle(self, *args, **options):
prepare_media()
generate_media()
| Crop-R/django-mediagenerator | mediagenerator/management/commands/generatemedia.py | Python | bsd-3-clause | 349 | 0.005731 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.