code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from . import test_dumpedmocktask
from .test_cacheabletask_directory import MixInTestDataDirectory
class TestDumpedMockTaskDirectory(MixInTestDataDirectory,
test_dumpedmocktask.TestDumpedMockTask):
pass
| tkf/buildlet | buildlet/tests/test_dumpedmocktask_directory.py | Python | bsd-3-clause | 243 |
# imports
import bpy
# object data
def ObjectData(self, context, layout, datablock):
'''
Object data buttons.
'''
if datablock.type != 'EMPTY':
# template id
layout.template_ID(datablock, 'data')
else:
if datablock.empty_draw_type == 'IMAGE':
layout.template_ID(datablock, 'data', open='image.open', unlink='object.unlink_data')
# armature
if datablock.type == 'ARMATURE':
layout.label(text='Skeleton:')
layout.prop(datablock.data, 'pose_position', expand=True)
column = layout.column()
column.label(text='Layers:')
column.prop(datablock.data, 'layers', text='')
column.label(text='Protected Layers:')
column.prop(datablock.data, 'layers_protected', text='')
if context.scene.render.engine == 'BLENDER_GAME':
column = layout.column()
column.label(text='Deform:')
column.prop(datablock.data, 'deform_method', expand=True)
layout.separator()
layout.label('Display:')
layout = self.layout
layout.prop(datablock.data, 'draw_type', expand=True)
split = layout.split()
column = split.column()
column.prop(datablock.data, 'show_names', text='Names')
column.prop(datablock.data, 'show_axes', text='Axes')
column.prop(datablock.data, 'show_bone_custom_shapes', text='Shapes')
column = split.column()
column.prop(datablock.data, 'show_group_colors', text='Colors')
column.prop(datablock, 'show_x_ray', text='X-Ray')
column.prop(datablock.data, 'use_deform_delay', text='Delay Refresh')
if datablock.pose:
layout.separator()
layout.label('Inverse Kinematics:')
layout.prop(datablock.pose, 'ik_solver')
if datablock.pose.ik_param:
layout.prop(datablock.pose.ik_param, 'mode', expand=True)
if datablock.pose.ik_param.mode == 'SIMULATION':
layout.label(text='Reiteration:')
layout.prop(datablock.pose.ik_param, 'reiteration_method', expand=True)
row = layout.row()
row.active = not datablock.pose.ik_param.mode == 'SIMULATION' or datablock.pose.ik_param.reiteration_method != 'NEVER'
row.prop(datablock.pose.ik_param, 'precision')
row.prop(datablock.pose.ik_param, 'iterations')
if datablock.pose.ik_param.mode == 'SIMULATION':
layout.prop(datablock.pose.ik_param, 'use_auto_step')
row = layout.row()
if datablock.pose.ik_param.use_auto_step:
row.prop(datablock.pose.ik_param, 'step_min', text='Min')
row.prop(datablock.pose.ik_param, 'step_max', text='Max')
else:
row.prop(datablock.pose.ik_param, 'step_count')
layout.prop(datablock.pose.ik_param, 'solver')
if datablock.pose.ik_param.mode == 'SIMULATION':
layout.prop(datablock.pose.ik_param, 'feedback')
layout.prop(datablock.pose.ik_param, 'velocity_max')
if datablock.pose.ik_param.solver == 'DLS':
row = layout.row()
row.prop(datablock.pose.ik_param, 'damping_max', text='Damp', slider=True)
row.prop(datablock.pose.ik_param, 'damping_epsilon', text='Eps', slider=True)
# curve
if datablock.type in {'CURVE', 'SURFACE', 'FONT'}:
# label
layout.label(text='Shape:')
if datablock.type == 'CURVE':
row = layout.row()
row.prop(datablock.data, 'dimensions', expand=True)
split = layout.split()
column = split.column()
column.label(text='Resolution:')
sub = column.column(align=True)
sub.prop(datablock.data, 'resolution_u', text='Preview U')
sub.prop(datablock.data, 'render_resolution_u', text='Render U')
if datablock.type == 'CURVE':
column.label(text='Twisting:')
column.prop(datablock.data, 'twist_mode', text='')
column.prop(datablock.data, 'twist_smooth', text='Smooth')
elif datablock.type == 'FONT':
column.label(text='Display:')
column.prop(datablock.data, 'use_fast_edit', text='Fast Editing')
column = split.column()
if datablock.type == 'SURFACE':
sub = column.column()
sub.label(text='')
sub = column.column(align=True)
sub.prop(datablock.data, 'resolution_v', text='Preview V')
sub.prop(datablock.data, 'render_resolution_v', text='Render V')
if datablock.type in {'CURVE', 'FONT'}:
column.label(text='Fill:')
sub = column.column()
sub.active = (datablock.data.dimensions == '2D' or (datablock.data.bevel_object is None and datablock.data.dimensions == '3D'))
sub.prop(datablock.data, 'fill_mode', text='')
column.prop(datablock.data, 'use_fill_deform')
if datablock.type == 'CURVE':
column.label(text='Path / Curve-Deform:')
sub = column.column()
subsub = sub.row()
subsub.prop(datablock.data, 'use_radius')
subsub.prop(datablock.data, 'use_stretch')
sub.prop(datablock.data, 'use_deform_bounds')
layout.separator()
layout.label(text='Texture Space')
row = layout.row()
row.prop(datablock.data, 'use_auto_texspace')
row.prop(datablock.data, 'use_uv_as_generated')
row = layout.row()
row.column().prop(datablock.data, 'texspace_location', text='Location')
row.column().prop(datablock.data, 'texspace_size', text='Size')
layout.operator('curve.match_texture_space')
layout.separator()
# isnt surface
if datablock.type != 'SURFACE':
layout.label(text='Geometry:')
split = layout.split()
column = split.column()
column.label(text='Modification:')
column.prop(datablock.data, 'offset')
column.prop(datablock.data, 'extrude')
column.label(text='Taper Object:')
column.prop(datablock.data, 'taper_object', text='')
column = split.column()
column.label(text='Bevel:')
column.prop(datablock.data, 'bevel_depth', text='Depth')
column.prop(datablock.data, 'bevel_resolution', text='Resolution')
column.label(text='Bevel Object:')
column.prop(datablock.data, 'bevel_object', text='')
if datablock.type != 'FONT':
column = layout.column(align=True)
row = column.row()
row.label(text='Bevel Factor:')
column = layout.column()
column.active = (
(datablock.data.bevel_depth > 0.0) or
(datablock.data.extrude > 0.0) or
(datablock.data.bevel_object is not None)
)
row = column.row(align=True)
row.prop(datablock.data, 'bevel_factor_mapping_start', text='')
row.prop(datablock.data, 'bevel_factor_start', text='Start')
row = column.row(align=True)
row.prop(datablock.data, 'bevel_factor_mapping_end', text='')
row.prop(datablock.data, 'bevel_factor_end', text='End')
row = layout.row()
sub = row.row()
sub.active = datablock.data.taper_object is not None
sub.prop(datablock.data, 'use_map_taper')
sub = row.row()
sub.active = datablock.data.bevel_object is not None
sub.prop(datablock.data, 'use_fill_caps')
layout.separator()
# is curve
if datablock.type == 'CURVE':
layout.prop(datablock.data, 'use_path', text='Path Animation:')
column = layout.column()
column.active = datablock.data.use_path
column.prop(datablock.data, 'path_duration', text='Frames')
column.prop(datablock.data, 'eval_time')
# these are for paths only
row = column.row()
row.prop(datablock.data, 'use_path_follow')
layout.separator()
# isnt font and datablock.data.splines.active
if datablock.type != 'FONT' and datablock.data.splines.active:
layout.label(text='Active Spline:')
split = layout.split()
if datablock.data.splines.active.type == 'POLY':
# These settings are below but its easier to have
# polys set aside since they use so few settings
row = layout.row()
row.label(text='Cyclic:')
row.prop(datablock.data.splines.active, 'use_cyclic_u', text='U')
layout.prop(datablock.data.splines.active, 'use_smooth')
else:
column = split.column()
column.label(text='Cyclic:')
if datablock.data.splines.active.type == 'NURBS':
column.label(text='Bezier:')
column.label(text='Endpoint:')
column.label(text='Order:')
column.label(text='Resolution:')
column = split.column()
column.prop(datablock.data.splines.active, 'use_cyclic_u', text='U')
if datablock.data.splines.active.type == 'NURBS':
sub = column.column()
# sub.active = (not datablock.data.splines.active.use_cyclic_u)
sub.prop(datablock.data.splines.active, 'use_bezier_u', text='U')
sub.prop(datablock.data.splines.active, 'use_endpoint_u', text='U')
sub = column.column()
sub.prop(datablock.data.splines.active, 'order_u', text='U')
column.prop(datablock.data.splines.active, 'resolution_u', text='U')
if datablock.type == 'SURFACE':
column = split.column()
column.prop(datablock.data.splines.active, 'use_cyclic_v', text='V')
# its a surface, assume its a nurbs
sub = column.column()
sub.active = (not datablock.data.splines.active.use_cyclic_v)
sub.prop(datablock.data.splines.active, 'use_bezier_v', text='V')
sub.prop(datablock.data.splines.active, 'use_endpoint_v', text='V')
sub = column.column()
sub.prop(datablock.data.splines.active, 'order_v', text='V')
sub.prop(datablock.data.splines.active, 'resolution_v', text='V')
if datablock.data.splines.active.type == 'BEZIER':
column = layout.column()
column.label(text='Interpolation:')
sub = column.column()
sub.active = (datablock.data.dimensions == '3D')
sub.prop(datablock.data.splines.active, 'tilt_interpolation', text='Tilt')
column.prop(datablock.data.splines.active, 'radius_interpolation', text='Radius')
layout.prop(datablock.data.splines.active, 'use_smooth')
layout.separator()
# is font
if datablock.type == 'FONT':
layout.label(text='Font:')
row = layout.split(percentage=0.25)
row.label(text='Regular')
row.template_ID(datablock.data, 'font', open='font.open', unlink='font.unlink')
row = layout.split(percentage=0.25)
row.label(text='Bold')
row.template_ID(datablock.data, 'font_bold', open='font.open', unlink='font.unlink')
row = layout.split(percentage=0.25)
row.label(text='Italic')
row.template_ID(datablock.data, 'font_italic', open='font.open', unlink='font.unlink')
row = layout.split(percentage=0.25)
row.label(text='Bold & Italic')
row.template_ID(datablock.data, 'font_bold_italic', open='font.open', unlink='font.unlink')
split = layout.split()
column = split.column()
column.prop(datablock.data, 'size', text='Size')
column = split.column()
column.prop(datablock.data, 'shear')
split = layout.split()
column = split.column()
column.label(text='Object Font:')
column.prop(datablock.data, 'family', text='')
column = split.column()
column.label(text='Text on Curve:')
column.prop(datablock.data, 'follow_curve', text='')
split = layout.split()
column = split.column()
sub = column.column(align=True)
sub.label(text='Underline:')
sub.prop(datablock.data, 'underline_position', text='Position')
sub.prop(datablock.data, 'underline_height', text='Thickness')
column = split.column()
column.label(text='datablock.data.edit_formatacter:')
column.prop(datablock.data.edit_format, 'use_bold')
column.prop(datablock.data.edit_format, 'use_italic')
column.prop(datablock.data.edit_format, 'use_underline')
row = layout.row()
row.prop(datablock.data, 'small_caps_scale', text='Small Caps')
row.prop(datablock.data.edit_format, 'use_small_caps')
layout.separator()
layout.label(text='Paragraph:')
layout.label(text='Align:')
layout.prop(datablock.data, 'align', expand=True)
split = layout.split()
column = split.column(align=True)
column.label(text='Spacing:')
column.prop(datablock.data, 'space_character', text='Letter')
column.prop(datablock.data, 'space_word', text='Word')
column.prop(datablock.data, 'space_line', text='Line')
column = split.column(align=True)
column.label(text='Offset:')
column.prop(datablock.data, 'offset_x', text='X')
column.prop(datablock.data, 'offset_y', text='Y')
layout.separator()
layout.label(text='Text Boxes:')
split = layout.split()
column = split.column()
column.operator('font.textbox_add', icon='ZOOMIN')
column = split.column()
for i, tbox in enumerate(datablock.data.text_boxes):
box = layout.box()
row = box.row()
split = row.split()
column = split.column(align=True)
column.label(text='Dimensions:')
column.prop(tbox, 'width', text='Width')
column.prop(tbox, 'height', text='Height')
column = split.column(align=True)
column.label(text='Offset:')
column.prop(tbox, 'x', text='X')
column.prop(tbox, 'y', text='Y')
row.operator('font.textbox_remove', text='', icon='X', emboss=False).index = i
# camera
if datablock.type == 'CAMERA':
layout.label(text='Lens:')
cam = datablock.data
layout.prop(cam, 'type', expand=True)
split = layout.split()
column = split.column()
if cam.type == 'PERSP':
row = column.row()
if cam.lens_unit == 'MILLIMETERS':
row.prop(cam, 'lens')
elif cam.lens_unit == 'FOV':
row.prop(cam, 'angle')
row.prop(cam, 'lens_unit', text='')
elif cam.type == 'ORTHO':
column.prop(cam, 'ortho_scale')
elif cam.type == 'PANO':
engine = context.scene.render.engine
if engine == 'CYCLES':
ccam = cam.cycles
column.prop(ccam, 'panorama_type', text='Type')
if ccam.panorama_type == 'FISHEYE_EQUIDISTANT':
column.prop(ccam, 'fisheye_fov')
elif ccam.panorama_type == 'FISHEYE_EQUISOLID':
row = layout.row()
row.prop(ccam, 'fisheye_lens', text='Lens')
row.prop(ccam, 'fisheye_fov')
elif ccam.panorama_type == 'EQUIRECTANGULAR':
row = layout.row()
sub = row.column(align=True)
sub.prop(ccam, 'latitude_min')
sub.prop(ccam, 'latitude_max')
sub = row.column(align=True)
sub.prop(ccam, 'longitude_min')
sub.prop(ccam, 'longitude_max')
elif engine == 'BLENDER_RENDER':
row = column.row()
if cam.lens_unit == 'MILLIMETERS':
row.prop(cam, 'lens')
elif cam.lens_unit == 'FOV':
row.prop(cam, 'angle')
row.prop(cam, 'lens_unit', text='')
split = layout.split()
column = split.column(align=True)
column.label(text='Shift:')
column.prop(cam, 'shift_x', text='X')
column.prop(cam, 'shift_y', text='Y')
column = split.column(align=True)
column.label(text='Clipping:')
column.prop(cam, 'clip_start', text='Start')
column.prop(cam, 'clip_end', text='End')
layout.separator()
if context.scene.render.use_multiview and context.scene.render.views_format == 'STEREO_3D':
layout.label(text='Stereoscopy:')
st = datablock.data.stereo
column = layout.column()
column.row().prop(st, 'convergence_mode', expand=True)
sub = column.column()
sub.active = st.convergence_mode != 'PARALLEL'
sub.prop(st, 'convergence_distance')
column.prop(st, 'interocular_distance')
column.label(text='Pivot:')
column.row().prop(st, 'pivot', expand=True)
layout.separator()
layout.label(text='Camera:')
cam = datablock.data
row = layout.row(align=True)
row.menu('CAMERA_MT_presets', text=bpy.types.CAMERA_MT_presets.bl_label)
row.operator('camera.preset_add', text='', icon='ZOOMIN')
row.operator('camera.preset_add', text='', icon='ZOOMOUT').remove_active = True
layout.label(text='Sensor:')
split = layout.split()
column = split.column(align=True)
if cam.sensor_fit == 'AUTO':
column.prop(cam, 'sensor_width', text='Size')
else:
sub = column.column(align=True)
sub.active = cam.sensor_fit == 'HORIZONTAL'
sub.prop(cam, 'sensor_width', text='Width')
sub = column.column(align=True)
sub.active = cam.sensor_fit == 'VERTICAL'
sub.prop(cam, 'sensor_height', text='Height')
column = split.column(align=True)
column.prop(cam, 'sensor_fit', text='')
# empty
if datablock.type == 'EMPTY':
layout.label(text='Empty:')
layout.prop(datablock, 'empty_draw_type', text='Display')
if datablock.empty_draw_type == 'IMAGE':
layout.template_image(datablock, 'data', datablock.image_user, compact=True)
row = layout.row(align=True)
row = layout.row(align=True)
layout.prop(datablock, 'color', text='Transparency', index=3, slider=True)
row = layout.row(align=True)
row.prop(datablock, 'empty_image_offset', text='Offset X', index=0)
row.prop(datablock, 'empty_image_offset', text='Offset Y', index=1)
layout.prop(datablock, 'empty_draw_size', text='Size')
# lattice
if datablock.type == 'LATTICE':
layout.label(text='Lattice:')
lat = datablock.data
row = layout.row()
row.prop(lat, 'points_u')
row.prop(lat, 'interpolation_type_u', text='')
row = layout.row()
row.prop(lat, 'points_v')
row.prop(lat, 'interpolation_type_v', text='')
row = layout.row()
row.prop(lat, 'points_w')
row.prop(lat, 'interpolation_type_w', text='')
row = layout.row()
row.prop(lat, 'use_outside')
row.prop_search(lat, 'vertex_group', context.object, 'vertex_groups', text='')
# mball
if datablock.type == 'META':
layout.label(text='Metaball:')
mball = datablock.data
split = layout.split()
column = split.column()
column.label(text='Resolution:')
sub = column.column(align=True)
sub.prop(mball, 'resolution', text='View')
sub.prop(mball, 'render_resolution', text='Render')
column = split.column()
column.label(text='Settings:')
column.prop(mball, 'threshold', text='Threshold')
layout.label(text='Update:')
layout.prop(mball, 'update_method', expand=True)
layout.separator()
layout.label(text='Texture Space:')
mball = datablock.data
layout.prop(mball, 'use_auto_texspace')
row = layout.row()
row.column().prop(mball, 'texspace_location', text='Location')
row.column().prop(mball, 'texspace_size', text='Size')
if datablock.data.elements.active:
layout.separator()
layout.label(text='Active Element:')
metaelem = datablock.data.elements.active
layout.prop(metaelem, 'type')
split = layout.split()
column = split.column(align=True)
column.label(text='Settings:')
column.prop(metaelem, 'stiffness', text='Stiffness')
column.prop(metaelem, 'use_negative', text='Negative')
column.prop(metaelem, 'hide', text='Hide')
column = split.column(align=True)
if metaelem.type in {'CUBE', 'ELLIPSOID'}:
column.label(text='Size:')
column.prop(metaelem, 'size_x', text='X')
column.prop(metaelem, 'size_y', text='Y')
column.prop(metaelem, 'size_z', text='Z')
elif metaelem.type == 'TUBE':
column.label(text='Size:')
column.prop(metaelem, 'size_x', text='X')
elif metaelem.type == 'PLANE':
column.label(text='Size:')
column.prop(metaelem, 'size_x', text='X')
column.prop(metaelem, 'size_y', text='Y')
# mesh
if datablock.type == 'MESH':
layout.label(text='Normals')
mesh = datablock.data
split = layout.split()
column = split.column()
column.prop(mesh, 'use_auto_smooth')
sub = column.column()
sub.active = mesh.use_auto_smooth and not mesh.has_custom_normals
sub.prop(mesh, 'auto_smooth_angle', text='Angle')
split.prop(mesh, 'show_double_sided')
layout.separator()
layout.label(text='Texture Space:')
mesh = datablock.data
layout.prop(mesh, 'texture_mesh')
layout.separator()
layout.prop(mesh, 'use_auto_texspace')
row = layout.row()
row.column().prop(mesh, 'texspace_location', text='Location')
row.column().prop(mesh, 'texspace_size', text='Size')
layout.separator()
layout.label(text='Geometry Data:')
obj = datablock
me = datablock.data
column = layout.column()
column.operator('mesh.customdata_mask_clear', icon='X')
column.operator('mesh.customdata_skin_clear', icon='X')
if me.has_custom_normals:
column.operator('mesh.customdata_custom_splitnormals_clear', icon='X')
else:
column.operator('mesh.customdata_custom_splitnormals_add', icon='ZOOMIN')
column = layout.column()
column.enabled = (obj.mode != 'EDIT')
column.prop(me, 'use_customdata_vertex_bevel')
column.prop(me, 'use_customdata_edge_bevel')
column.prop(me, 'use_customdata_edge_crease')
# speaker
if datablock.type == 'SPEAKER':
if context.scene.render.engine in {'CYCLES', 'BLENDER_RENDER'}:
layout.label(text='Sound:')
speaker = datablock.data
split = layout.split(percentage=0.75)
split.template_ID(speaker, 'sound', open='sound.open_mono')
split.prop(speaker, 'muted')
row = layout.row()
row.prop(speaker, 'volume')
row.prop(speaker, 'pitch')
layout.separator()
layout.label(text='Distance:')
speaker = datablock.data
split = layout.split()
column = split.column()
column.label('Volume:')
column.prop(speaker, 'volume_min', text='Minimum')
column.prop(speaker, 'volume_max', text='Maximum')
column.prop(speaker, 'attenuation')
column = split.column()
column.label('Distance:')
column.prop(speaker, 'distance_max', text='Maximum')
column.prop(speaker, 'distance_reference', text='Reference')
layout.separator()
layout.label(text='Cone:')
speaker = datablock.data
split = layout.split()
column = split.column()
column.label('Angle:')
column.prop(speaker, 'cone_angle_outer', text='Outer')
column.prop(speaker, 'cone_angle_inner', text='Inner')
column = split.column()
column.label('Volume:')
column.prop(speaker, 'cone_volume_outer', text='Outer')
| proxeIO/name-panel | addon/interface/buttons/objectdata.py | Python | gpl-3.0 | 25,779 |
## features.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: features.py,v 1.25 2009/04/07 07:11:48 snakeru Exp $
"""
This module contains variable stuff that is not worth splitting into separate modules.
Here is:
DISCO client and agents-to-DISCO and browse-to-DISCO emulators.
IBR and password manager.
jabber:iq:privacy methods
All these methods takes 'disp' first argument that should be already connected
(and in most cases already authorised) dispatcher instance.
"""
from protocol import *
REGISTER_DATA_RECEIVED='REGISTER DATA RECEIVED'
### DISCO ### http://jabber.org/protocol/disco ### JEP-0030 ####################
### Browse ### jabber:iq:browse ### JEP-0030 ###################################
### Agents ### jabber:iq:agents ### JEP-0030 ###################################
def _discover(disp,ns,jid,node=None,fb2b=0,fb2a=1):
""" Try to obtain info from the remote object.
If remote object doesn't support disco fall back to browse (if fb2b is true)
and if it doesnt support browse (or fb2b is not true) fall back to agents protocol
(if gb2a is true). Returns obtained info. Used internally. """
iq=Iq(to=jid,typ='get',queryNS=ns)
if node: iq.setQuerynode(node)
rep=disp.SendAndWaitForResponse(iq)
if fb2b and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_BROWSE)) # Fallback to browse
if fb2a and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_AGENTS)) # Fallback to agents
if isResultNode(rep): return [n for n in rep.getQueryPayload() if isinstance(n, Node)]
return []
def discoverItems(disp,jid,node=None):
""" Query remote object about any items that it contains. Return items list. """
""" According to JEP-0030:
query MAY have node attribute
item: MUST HAVE jid attribute and MAY HAVE name, node, action attributes.
action attribute of item can be either of remove or update value."""
ret=[]
for i in _discover(disp,NS_DISCO_ITEMS,jid,node):
if i.getName()=='agent' and i.getTag('name'): i.setAttr('name',i.getTagData('name'))
ret.append(i.attrs)
return ret
def discoverInfo(disp,jid,node=None):
""" Query remote object about info that it publishes. Returns identities and features lists."""
""" According to JEP-0030:
query MAY have node attribute
identity: MUST HAVE category and name attributes and MAY HAVE type attribute.
feature: MUST HAVE var attribute"""
identities , features = [] , []
for i in _discover(disp,NS_DISCO_INFO,jid,node):
if i.getName()=='identity': identities.append(i.attrs)
elif i.getName()=='feature': features.append(i.getAttr('var'))
elif i.getName()=='agent':
if i.getTag('name'): i.setAttr('name',i.getTagData('name'))
if i.getTag('description'): i.setAttr('name',i.getTagData('description'))
identities.append(i.attrs)
if i.getTag('groupchat'): features.append(NS_GROUPCHAT)
if i.getTag('register'): features.append(NS_REGISTER)
if i.getTag('search'): features.append(NS_SEARCH)
return identities , features
### Registration ### jabber:iq:register ### JEP-0077 ###########################
def getRegInfo(disp,host,info={},sync=True):
""" Gets registration form from remote host.
You can pre-fill the info dictionary.
F.e. if you are requesting info on registering user joey than specify
info as {'username':'joey'}. See JEP-0077 for details.
'disp' must be connected dispatcher instance."""
iq=Iq('get',NS_REGISTER,to=host)
for i in info.keys(): iq.setTagData(i,info[i])
if sync:
resp=disp.SendAndWaitForResponse(iq)
_ReceivedRegInfo(disp.Dispatcher,resp, host)
return resp
else: disp.SendAndCallForResponse(iq,_ReceivedRegInfo, {'agent': host})
def _ReceivedRegInfo(con, resp, agent):
iq=Iq('get',NS_REGISTER,to=agent)
if not isResultNode(resp): return
df=resp.getTag('query',namespace=NS_REGISTER).getTag('x',namespace=NS_DATA)
if df:
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, DataForm(node=df)))
return
df=DataForm(typ='form')
for i in resp.getQueryPayload():
if type(i)<>type(iq): pass
elif i.getName()=='instructions': df.addInstructions(i.getData())
else: df.setField(i.getName()).setValue(i.getData())
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, df))
def register(disp,host,info):
""" Perform registration on remote server with provided info.
disp must be connected dispatcher instance.
Returns true or false depending on registration result.
If registration fails you can get additional info from the dispatcher's owner
attributes lastErrNode, lastErr and lastErrCode.
"""
iq=Iq('set',NS_REGISTER,to=host)
if type(info)<>type({}): info=info.asDict()
for i in info.keys(): iq.setTag('query').setTagData(i,info[i])
resp=disp.SendAndWaitForResponse(iq)
if isResultNode(resp): return 1
def unregister(disp,host):
""" Unregisters with host (permanently removes account).
disp must be connected and authorized dispatcher instance.
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('remove')]))
if isResultNode(resp): return 1
def changePasswordTo(disp,newpassword,host=None):
""" Changes password on specified or current (if not specified) server.
disp must be connected and authorized dispatcher instance.
Returns true on success."""
if not host: host=disp._owner.Server
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('username',payload=[disp._owner.Server]),Node('password',payload=[newpassword])]))
if isResultNode(resp): return 1
### Privacy ### jabber:iq:privacy ### draft-ietf-xmpp-im-19 ####################
#type=[jid|group|subscription]
#action=[allow|deny]
def getPrivacyLists(disp):
""" Requests privacy lists from connected server.
Returns dictionary of existing lists on success."""
try:
dict={'lists':[]}
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY))
if not isResultNode(resp): return
for list in resp.getQueryPayload():
if list.getName()=='list': dict['lists'].append(list.getAttr('name'))
else: dict[list.getName()]=list.getAttr('name')
return dict
except: pass
def getPrivacyList(disp,listname):
""" Requests specific privacy list listname. Returns list of XML nodes (rules)
taken from the server responce."""
try:
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return resp.getQueryPayload()[0]
except: pass
def setActivePrivacyList(disp,listname=None,typ='active'):
""" Switches privacy list 'listname' to specified type.
By default the type is 'active'. Returns true on success."""
if listname: attrs={'name':listname}
else: attrs={}
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node(typ,attrs)]))
if isResultNode(resp): return 1
def setDefaultPrivacyList(disp,listname=None):
""" Sets the default privacy list as 'listname'. Returns true on success."""
return setActivePrivacyList(disp,listname,'default')
def setPrivacyList(disp,list):
""" Set the ruleset. 'list' should be the simpleXML node formatted
according to RFC 3921 (XMPP-IM) (I.e. Node('list',{'name':listname},payload=[...]) )
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[list]))
if isResultNode(resp): return 1
def delPrivacyList(disp,listname):
""" Deletes privacy list 'listname'. Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return 1
| lezizi/A-Framework | python/net-soruce/src/xmpp/features.py | Python | apache-2.0 | 8,760 |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 00:36:01 2016
@author: Tian
"""
import sys
import re
TYPE_STR='<type> should be one of: func, cut, dug, other'
def _procCommon(header, data, headHeader, regPat, regRep):
newHeader=headHeader+header;
newData=[]
cr=re.compile(regPat)
for line in data:
nl=cr.sub(regRep, line)
newData.append(nl)
return (newHeader, newData)
def _checkMatch(pat, line):
if not re.match(pat, line):
raise Exception('Cannot match content of the summary file. '
'Please check the prefix.')
def procFunc(header, data, tstPrefix):
# tst-0.4-diff-1-0.3.txt
# tst-(0.\d+)-diff-(\d(?:\.\d)?)-(0\.\d+)\.txt
_regPattern=tstPrefix+r'-(0.\d+)-\w+-(\d(?:\.\d)?)-(0\.\d+)\.txt'
_regReplace=r'\1\t\3\t\2'
_headHeader='theta\tminsup\talpha\t'+'num\t'
_checkMatch(_regPattern, data[0])
return _procCommon(header,data, _headHeader, _regPattern, _regReplace)
def procCut(header, data, tstPrefix):
# res-0.2-24-0.1-0.1.txt
_regPattern=tstPrefix+r'-(0.\d+)-(\d)(\d+)?-(0\.\d+)-(0\.\d+)\.txt'
_regReplace=r'\1\t\4\t\5\t\2\t\3'
_headHeader='theta\tf-pos\tf-neg\tn-min\tn-max\t'+'num\t'
_checkMatch(_regPattern, data[0])
return _procCommon(header,data, _headHeader, _regPattern, _regReplace)
def procDug(header, data, tstPrefix):
# res-0.03.txt
_regPattern=tstPrefix+r'-(0.\d+)\.txt'
_regReplace=r'\1'
_headHeader='thres\t'+'num\t'
_checkMatch(_regPattern, data[0])
return _procCommon(header,data, _headHeader, _regPattern, _regReplace)
def procOther(header, data, tstPrefix):
# tst-0.4-gspan-0.3.txt
# tst-0.4-apriori-0.3.txt
# tst-(0.\d+)-gspan-(\d(?:\.\d)?)\.txt
_regPattern=tstPrefix+r'-(0.\d+)-\w+-(\d(?:\.\d)?)\.txt'
_regReplace=r'\1\t\2'
_headHeader='theta\tminsup\t'+'num\t'
_checkMatch(_regPattern, data[0])
return _procCommon(header,data, _headHeader, _regPattern, _regReplace)
def output(fn, header, data):
with open(fn,'w') as f:
f.write(header)
for line in data:
f.write(line)
def main(smyType, smyFile, outputFn, tstPrefix):
if 'func'==smyType:
pFun=procFunc;
elif 'cut'==smyType:
pFun=procCut;
elif 'dug'==smyType:
pFun=procDug;
else:
#print('ERROR: '+TYPE_STR)
#exit()
print("Warning: trying general pattern.")
pFun=procOther;
with open(smyFile, 'r') as f:
# '\n' is kept at the end of each line
header=f.readline()
data=f.readlines()
f.close()
(newHeader,newData)=pFun(header,data,tstPrefix)
output(outputFn, newHeader, newData)
if __name__=='__main__':
if len(sys.argv)<4 or len(sys.argv)>5:
print('Usage: <type> <summary file> <output file> [prefix of test result files (def: tst)]')
print('\t'+TYPE_STR)
exit()
smyType=sys.argv[1]
smyFile=sys.argv[2]
outputFn=sys.argv[3]
tstPrefix='tst'
if len(sys.argv)==5:
tstPrefix=sys.argv[4]
main(smyType, smyFile, outputFn, tstPrefix)
| yxtj/GSDM | SupportScript/tabularize-summary.py | Python | apache-2.0 | 3,107 |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class Apis(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_api_versions(self, **kwargs):
"""
get available API versions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_versions(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_versions_with_http_info(**kwargs)
else:
(data) = self.get_api_versions_with_http_info(**kwargs)
return data
def get_api_versions_with_http_info(self, **kwargs):
"""
get available API versions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_versions_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_versions" % key
)
params[key] = val
del params['kwargs']
resource_path = '/apis'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| detiber/lib_openshift | lib_openshift/apis/apis.py | Python | apache-2.0 | 5,018 |
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**kwargs)
def oep_hf(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs)
def oep(atoms,orbs,energy_func,grad_func=None,**kwargs):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = kwargs.get('verbose')
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
opt_method = kwargs.get('opt_method',settings.OEPOptMethod)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = kwargs.get('return_flag')
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in xrange(nocc):
for a in xrange(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = kwargs.get('return_flag')
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in xrange(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in xrange(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in xrange(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in xrange(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in xrange(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
| berquist/PyQuante | PyQuante/OEP.py | Python | bsd-3-clause | 25,427 |
#!/usr/bin/env python3
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pprint
import argparse
import lib.logger as logger
from lib.config import Config
from lib.switch import SwitchFactory
from lib.switch_exception import SwitchException
from lib.genesis import GEN_PATH
# from write_switch_memory import WriteSwitchMemory
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
PP = pprint.PrettyPrinter(indent=1, width=120)
class Tree(dict):
"""Instantiates a nested dictionary which allows assignment to arbitrary
depths.
"""
def __getitem__(self, key):
if key in self:
return self.get(key)
return self.setdefault(key, Tree())
def _get_port_chan_list():
"""
Args:
Returns:
Tree of switches and port channels or mlag port channels. Switches in
an MLAG are grouped in pairs.
"""
log = logger.getlogger()
ifcs = CFG.get_interfaces()
# Gather bond definintions from interfaces list
bond_ifcs = {}
for ifc in ifcs:
if 'bond_mode' in ifc:
for _ifc in ifcs:
if 'bond_master' in _ifc and _ifc['bond_master'] == ifc['iface']:
if ifc['label'] in bond_ifcs:
bond_ifcs[ifc['label']].append(_ifc['label'])
else:
bond_ifcs[ifc['label']] = [_ifc['label']]
elif 'BONDING_MASTER' in ifc:
for _ifc in ifcs:
if 'MASTER' in _ifc and _ifc['MASTER'] == ifc['DEVICE']:
if ifc['label'] in bond_ifcs:
bond_ifcs[ifc['label']].append(_ifc['label'])
else:
bond_ifcs[ifc['label']] = [_ifc['label']]
pretty_str = PP.pformat(bond_ifcs)
log.debug('bond_ifcs')
log.debug('\n' + pretty_str)
# Gather bond node template, switch and port information
bonds = Tree()
for bond in bond_ifcs:
for ntmpl_ind, ntmpl_label in enumerate(CFG.yield_ntmpl_label()):
ntmpl_ifcs = CFG.get_ntmpl_ifcs_all(ntmpl_ind)
if bond in ntmpl_ifcs:
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
phyintf = CFG.get_ntmpl_phyintf_data_ifc(
ntmpl_ind, phyintf_idx)
if phyintf in bond_ifcs[bond]:
switch = CFG.get_ntmpl_phyintf_data_switch(
ntmpl_ind, phyintf_idx)
ports = CFG.get_ntmpl_phyintf_data_ports(
ntmpl_ind, phyintf_idx)
ports = [str(ports[i]) for i in range(len(ports))]
bonds[bond][ntmpl_label][phyintf][switch] = ports
pretty_str = PP.pformat(bonds)
log.debug('Bonds:')
log.debug('\n' + pretty_str)
# For each bond, aggregate ports across node templates and group into port
# channel groups
ports_list = Tree()
for bond in bonds:
for ntmpl in bonds[bond]:
bond_ports_list = Tree()
for ifc in bonds[bond][ntmpl]:
for switch in bonds[bond][ntmpl][ifc]:
ports = bonds[bond][ntmpl][ifc][switch]
if switch not in bond_ports_list:
bond_ports_list[switch] = [ports]
else:
bond_ports_list[switch].append(ports)
for switch in bond_ports_list:
# group the ports into channel groups
if switch not in ports_list[bond][ntmpl]:
ports_list[bond][ntmpl][switch] = zip(*bond_ports_list[switch])
else:
ports_list[bond][ntmpl][switch] += zip(*bond_ports_list[switch])
pretty_str = PP.pformat(ports_list)
log.debug('ports_list:')
log.debug('\n' + pretty_str)
chan_ports = Tree()
# Aggregate port groups across switches or mlag switch pairs.
# Final data structure is a dictionary organized by bond, node template,
# switch / switch pair.
for bond in ports_list:
for ntmpl in ports_list[bond]:
for switch in ports_list[bond][ntmpl]:
peer_switch = CFG.get_sw_data_mlag_peer(switch)
mstr_switch = CFG.get_sw_data_mstr_switch([switch, peer_switch])
chan_ports[bond][ntmpl][mstr_switch][switch] = \
ports_list[bond][ntmpl][switch]
pretty_str = PP.pformat(chan_ports)
log.debug('Port channel ports:')
log.debug('\n' + pretty_str)
return chan_ports
def _get_vlan_info(ifc):
ifcs = CFG.get_interfaces()
vlan_num = None
vlan_ifc_name = ''
for _ifc in ifcs:
if _ifc['label'] == ifc:
if 'vlan_raw_device' in _ifc:
vlan_num = int(_ifc['iface'].rpartition('.')[2])
vlan_ifc_name = _ifc['vlan_raw_device']
break
elif 'VLAN' in _ifc:
vlan_num = int(_ifc['DEVICE'].rpartition('.')[2])
vlan_ifc_name = _ifc['DEVICE'].rpartition('.')[0]
break
return vlan_num, vlan_ifc_name
def _get_vlan_slaves(vlan_ifc_name):
ifcs = CFG.get_interfaces()
vlan_slaves = []
for _ifc in ifcs:
if 'bond_master' in _ifc and _ifc['bond_master'] == vlan_ifc_name:
vlan_slaves.append(_ifc['label'])
elif 'MASTER' in _ifc and _ifc['MASTER'] == vlan_ifc_name:
vlan_slaves.append(_ifc['label'])
return vlan_slaves
def _get_vlan_list():
""" Aggregate vlan data.
Args:
Returns:
Tree of switches and vlan information by port
"""
log = logger.getlogger()
vlan_list = Tree()
for ntmpl_ind in CFG.yield_ntmpl_ind():
ntmpl_ifcs = CFG.get_ntmpl_ifcs_all(ntmpl_ind)
for ifc in ntmpl_ifcs:
vlan_num, vlan_ifc_name = _get_vlan_info(ifc)
if vlan_num:
vlan_slaves = _get_vlan_slaves(vlan_ifc_name)
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
phy_ifc_lbl = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx)
if phy_ifc_lbl in vlan_slaves:
vlan_ports = CFG.get_ntmpl_phyintf_data_ports(
ntmpl_ind, phyintf_idx)
switch = CFG.get_ntmpl_phyintf_data_switch(
ntmpl_ind, phyintf_idx)
if vlan_num in vlan_list[switch]:
vlan_list[switch][vlan_num] += vlan_ports
else:
vlan_list[switch][vlan_num] = vlan_ports
pretty_str = PP.pformat(vlan_list)
log.debug('vlan list')
log.debug('\n' + pretty_str)
# Aggregate by switch and port number
port_vlans = Tree()
for switch in vlan_list:
for vlan in vlan_list[switch]:
for port in vlan_list[switch][vlan]:
if str(port) in port_vlans[switch]:
port_vlans[switch][str(port)].append(vlan)
else:
port_vlans[switch][str(port)] = [vlan]
pretty_str = PP.pformat(port_vlans)
log.debug('port_vlans')
log.debug('\n' + pretty_str)
return port_vlans
def _get_mtu_list():
""" Aggregate mtu port data.
Returns: Dictionary of {switch : {port : mtu value, ...}}
"""
log = logger.getlogger()
mtu_list = Tree()
for ntmpl_ind in CFG.yield_ntmpl_ind():
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
mtu = ''
phy_ifc = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx)
ifc = CFG.get_interface(phy_ifc)
if 'mtu' in ifc:
mtu = ifc['mtu']
elif 'MTU' in ifc:
mtu = ifc['MTU']
if mtu:
switch = CFG.get_ntmpl_phyintf_data_switch(ntmpl_ind, phyintf_idx)
ports = CFG.get_ntmpl_phyintf_data_ports(ntmpl_ind, phyintf_idx)
if switch in mtu_list and mtu in mtu_list[switch]:
mtu_list[switch][mtu] += ports
else:
mtu_list[switch][mtu] = ports
pretty_str = PP.pformat(mtu_list)
log.debug('mtu_list')
log.debug('\n' + pretty_str)
return mtu_list
def _get_mlag_info():
""" Get mlag switches and their config info
Returns:
dict of : mlag config info
"""
log = logger.getlogger()
mlag_list = Tree()
for sw_lbl in CFG.yield_sw_data_label():
peer_lbl = CFG.get_sw_data_mlag_peer(sw_lbl)
mstr_sw = CFG.get_sw_data_mstr_switch([sw_lbl, peer_lbl])
if peer_lbl and mstr_sw == sw_lbl and mstr_sw not in mlag_list:
mlag_list[mstr_sw][sw_lbl]
mlag_list[mstr_sw][peer_lbl]
for mstr_sw in mlag_list:
for sw in mlag_list[mstr_sw]:
sw_idx = CFG.get_sw_data_index_by_label(sw)
for link_idx, link in enumerate(CFG.yield_sw_data_links_target(sw_idx)):
if link in mlag_list[mstr_sw]:
mlag_list[mstr_sw][sw]['vlan'] = \
CFG.get_sw_data_links_vlan(sw_idx, link_idx)
if sw == mstr_sw:
mlag_list[mstr_sw][sw]['vip'] = None
else:
mlag_list[mstr_sw][sw]['vip'] = \
CFG.get_sw_data_links_vip(sw_idx, link_idx) + ' /' + \
str(CFG.get_depl_netw_mgmt_prefix()[0])
mlag_list[mstr_sw][sw]['ports'] = \
CFG.get_sw_data_links_port(sw_idx, link_idx)
mlag_list[mstr_sw][sw]['cidr'] = \
CFG.get_sw_data_links_ip(sw_idx, link_idx) + ' /' + \
str(CFG.get_sw_data_links_prefix(sw_idx, link_idx))
if len(mlag_list[mstr_sw]) == 2:
keys = sorted(mlag_list[mstr_sw].keys())
mlag_list[mstr_sw][keys[0]]['peer_ip'] = \
str(mlag_list[mstr_sw][keys[1]]['cidr']).split(' /')[0]
mlag_list[mstr_sw][keys[1]]['peer_ip'] = \
str(mlag_list[mstr_sw][keys[0]]['cidr']).split(' /')[0]
break
pretty_str = PP.pformat(mlag_list)
log.debug('mlag_list')
log.debug('\n' + pretty_str)
return mlag_list
def _is_port_in_a_port_channel(switch, port, chan_ports):
""" Returns True if port in a port channel, else returns False.
Args:
switch (str): switch label
port (int or str): port number
"""
for sw in chan_ports:
for _sw in chan_ports[sw]:
if switch == _sw:
for port_group in chan_ports[sw][_sw]:
if port in port_group:
return True
break
return False
def _get_port_vlans(switch, port, port_vlans):
if port in port_vlans[switch]:
return port_vlans[switch][port]
def _get_port_mtu(switch, port, mtu_list):
for mtu in mtu_list[switch]:
if port in mtu_list[switch][mtu]:
return mtu
def _get_channel_num(port_grp):
""" Return a channel number given a port group. The lowest value
port number in the group is returned. No checks are made to insure
that all ports are in the same chassis.
Args:
port_group: (tuple or list of str representing port numbers
of the form 'n' or 'm/n' or 'ethm/n' or similar
"""
return min([int(port_grp[i].rpartition('/')[-1])
for i in range(len(port_grp))])
def configure_data_switch(config_path):
""" Configures data (access) switches. Configuration is driven by the
config.yml file.
Args:
Returns:
"""
log = logger.getlogger()
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
# Create switch class instances for each switch
sw_dict = {}
# create dictionaries to hold enumerations for each switch
port_mode = {}
allow_op = {}
for sw_ai in CFG.yield_sw_data_access_info():
label = sw_ai[0]
sw_dict[label] = SwitchFactory.factory(*sw_ai[1:])
port_mode[label], allow_op[label] = sw_dict[label].get_enums()
# Program switch vlans
for switch in port_vlans:
vlans = []
for port in port_vlans[switch]:
print('.', end="")
sys.stdout.flush()
for vlan in port_vlans[switch][port]:
if vlan not in vlans:
vlans.append(vlan)
sw_dict[switch].create_vlan(vlan)
log.debug('Creating vlan {} on switch {}'.format(vlan, switch))
try:
sw_dict[switch].set_switchport_mode(port, port_mode[switch].TRUNK)
except SwitchException as exc:
log.warning('Switch: {}. Failed setting port {} to trunk mode'.
format(switch, port))
log.warning(str(exc))
try:
sw_dict[switch].allowed_vlans_port(port, allow_op[switch].ADD,
port_vlans[switch][port])
except SwitchException as exc:
log.warning('Switch: {}. Failed adding vlans {} to port {}'.
format(switch, port_vlans[switch][port], port))
log.warning(str(exc))
log.debug('switch: {} port: {} vlans: {}'.format(
switch, port, port_vlans[switch][port]))
# Program switch mtu
for switch in mtu_list:
for mtu in mtu_list[switch]:
for port in mtu_list[switch][mtu]:
sw_dict[switch].set_mtu_for_port(port, mtu)
log.debug('port: {} set mtu: {}'.format(port, mtu))
# Configure MLAG
for mstr_sw in mlag_list:
log.debug('Configuring MLAG. mlag switch mstr: ' + mstr_sw)
for sw in mlag_list[mstr_sw]:
is_mlag = sw_dict[sw].is_mlag_configured()
log.debug('vPC/MLAG configured on switch: {}, {}'.format(sw, is_mlag))
if not is_mlag:
print('.', end="")
sys.stdout.flush()
log.debug('Configuring MLAG on switch {}'.format(sw))
sw_dict[sw].configure_mlag(
mlag_list[mstr_sw][sw]['vlan'],
min(mlag_list[mstr_sw][mstr_sw]['ports']),
mlag_list[mstr_sw][sw]['cidr'],
mlag_list[mstr_sw][sw]['peer_ip'],
mlag_list[mstr_sw][sw]['vip'],
mlag_list[mstr_sw][sw]['ports'])
else:
log.debug('MLAG already configured. Skipping'
' MLAG configuration on switch {}.'.format(sw))
for sw in mlag_list[mstr_sw]:
if sw_dict[sw].is_mlag_configured():
sw_dict[sw].enable_mlag()
# Configure port channels and MLAG port channels
for bond in chan_ports:
for ntmpl in chan_ports[bond]:
for mstr_sw in chan_ports[bond][ntmpl]:
if len(chan_ports[bond][ntmpl][mstr_sw]) == 2:
# MLAG
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for idx, port_grp in enumerate(
chan_ports[bond][ntmpl][mstr_sw][sw]):
chan_num = _get_channel_num(port_grp)
log.debug('create mlag interface {} on switch {}'.
format(chan_num, sw))
sw_dict[sw].remove_mlag_interface(chan_num)
sw_dict[sw].create_mlag_interface(chan_num)
print('.', end="")
sys.stdout.flush()
# All ports in a port group should have the same vlans
# So use any one for setting the MLAG port channel vlans
vlan_port = chan_ports[bond][ntmpl][mstr_sw][sw][idx][0]
vlans = _get_port_vlans(sw, vlan_port, port_vlans)
_port_mode = port_mode[sw].TRUNK if vlans \
else port_mode[sw].ACCESS
sw_dict[sw].set_mlag_port_channel_mode(chan_num, _port_mode)
mtu = _get_port_mtu(sw, chan_num, mtu_list)
if vlans:
log.debug('Switch {}, add vlans {} to mlag port '
'channel {}.'.format(sw, vlans, chan_num))
sw_dict[sw].allowed_vlans_mlag_port_channel(
chan_num, allow_op[sw].NONE)
sw_dict[sw].allowed_vlans_mlag_port_channel(
chan_num, allow_op[sw].ADD, vlans)
if mtu:
log.debug('set_mtu_for_mlag_port_channel: {}'.
format(mtu))
sw_dict[sw].set_mtu_for_lag_port_channel(
chan_num, mtu)
log.debug('Switch {}, adding ports {} to mlag chan '
'num: {}'.format(sw, port_grp, chan_num))
try:
sw_dict[sw].bind_ports_to_mlag_interface(
port_grp, chan_num)
except SwitchException as exc:
log.warning('Failure configuring port in switch:'
' {}.\n{}'.format(sw, str(exc)))
else:
# Configure LAG
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for port_grp in chan_ports[bond][ntmpl][mstr_sw][sw]:
chan_num = _get_channel_num(port_grp)
print('.', end="")
sys.stdout.flush()
log.debug('Lag channel group: {} on switch: {}'.format(
chan_num, sw))
sw_dict[sw].create_port_channel_ifc(chan_num)
vlans = _get_port_vlans(sw, port_grp[0], port_vlans)
_port_mode = port_mode[sw].TRUNK if vlans else \
port_mode[sw].ACCESS
sw_dict[sw].set_port_channel_mode(chan_num, _port_mode)
mtu = _get_port_mtu(sw, chan_num, mtu_list)
if vlans:
log.debug('switch {}, add vlans {} to lag port '
'channel {}'.format(sw, vlans, chan_num))
sw_dict[sw].allowed_vlans_port_channel(
chan_num, allow_op[sw].NONE)
sw_dict[sw].allowed_vlans_port_channel(
chan_num, allow_op[sw].ADD, vlans)
if mtu:
log.debug('set mtu for port channel: {}'.format(mtu))
sw_dict[sw].set_mtu_for_port_channel(chan_num, mtu)
log.debug('Switch: {}, adding port(s) {} to lag chan'
' num: {}'.format(sw, port_grp, chan_num))
try:
sw_dict[sw].remove_ports_from_port_channel_ifc(
port_grp)
sw_dict[sw].add_ports_to_port_channel_ifc(
port_grp, chan_num)
except SwitchException as exc:
log.warning('Failure configuring port in switch:'
'{}.\n {}'.format(sw, str(exc)))
def deconfigure_data_switch(config_path):
""" Deconfigures data (access) switches. Deconfiguration is driven by the
config.yml file. Generally deconfiguration is done in reverse order of
configuration.
Args:
Returns:
"""
log = logger.getlogger()
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
# Create switch class instances for each switch
sw_dict = {}
port_mode = {}
allow_op = {}
for sw_ai in CFG.yield_sw_data_access_info():
label = sw_ai[0]
sw_dict[label] = SwitchFactory.factory(*sw_ai[1:])
port_mode[label], allow_op[label] = sw_dict[label].get_enums()
# Deconfigure channel ports and MLAG channel ports
for bond in chan_ports:
for ntmpl in chan_ports[bond]:
for mstr_sw in chan_ports[bond][ntmpl]:
if len(chan_ports[bond][ntmpl][mstr_sw]) == 2:
# Deconfigure mlag channel ports
for sw in chan_ports[bond][ntmpl][mstr_sw]:
if sw_dict[sw].is_mlag_configured():
for idx, port_grp in enumerate(chan_ports[bond][ntmpl]
[mstr_sw][sw]):
chan_num = _get_channel_num(port_grp)
log.info('Deleting mlag interface: {} on'
' switch: {}'.format(chan_num, sw))
sw_dict[sw].remove_mlag_interface(chan_num)
else:
# deconfigure LAG channel ports
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for port_grp in chan_ports[bond][ntmpl][mstr_sw][sw]:
chan_num = _get_channel_num(port_grp)
log.info('Deleting Lag interface {} on switch: {}'.format(
chan_num, sw))
sw_dict[sw].remove_port_channel_ifc(chan_num)
# Deconfigure MLAG
for mstr_sw in mlag_list:
for sw in mlag_list[mstr_sw]:
is_mlag = sw_dict[sw].is_mlag_configured()
log.info('vPC/MLAG configured on sw {}: {}'.format(sw, is_mlag))
if is_mlag:
print('\n\nDo you wish to deconfigure MLAG on switch {}?'.format(sw))
print('This will stop all MLAG communication on all switch ports')
print('OK to deconfigure MLAG?')
resp = input("Enter (Y/yes/n): ")
if resp in ['Y', 'yes']:
log.info('Deconfiguring MLAG on switch: {}'.format(sw))
sw_dict[sw].deconfigure_mlag()
else:
log.debug('\nMLAG not configured on switch: {}'.format(sw))
# Deconfigure switch vlans - first remove from ports
for switch in port_vlans:
for port in port_vlans[switch]:
log.info('switch: {}, port: {}, removing vlans: {}'.format(
switch, port, port_vlans[switch][port]))
sw_dict[switch].allowed_vlans_port(
port, allow_op[switch].REMOVE, port_vlans[switch][port])
log.info('Switch {}, setting port: {} to access mode'.format(
switch, port))
sw_dict[switch].set_switchport_mode(port, port_mode[switch].ACCESS)
# Delete the vlans
for switch in port_vlans:
vlans = []
for port in port_vlans[switch]:
for vlan in port_vlans[switch][port]:
if vlan not in vlans:
vlans.append(vlan)
sw_dict[switch].delete_vlan(vlan)
log.info('Switch: {}, deleting vlan: {}'.format(switch, vlan))
# Deconfigure switch mtu
for switch in mtu_list:
for mtu in mtu_list[switch]:
for port in mtu_list[switch][mtu]:
sw_dict[switch].set_mtu_for_port(port, 0)
log.info('switch: {}, port: {}, setting mtu: {}'.format(
switch, port, 'default mtu'))
def gather_and_display(config_path):
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
print('\n\nport_vlans:')
PP.pprint(port_vlans)
print('\nmtu_list:')
PP.pprint(mtu_list)
print('\nmlag_list:')
PP.pprint(mlag_list)
print('\nchan_ports:')
PP.pprint(chan_ports)
# if self.cfg.is_write_switch_memory():
# switch = WriteSwitchMemory(LOG, INV_FILE)
# switch.write_data_switch_memory()
if __name__ == '__main__':
""" Configures or deconfigures data switches.
Args: optional log level or optional deconfig in any order
"""
parser = argparse.ArgumentParser()
parser.add_argument('config_path', nargs='?',
help='path to config file',
default='config.yml')
parser.add_argument('--display', action='store_true',
help='display gathered switch info')
parser.add_argument('--deconfig', action='store_true',
help='deconfigure switch')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
logger.create(args.log_lvl_print, args.log_lvl_file)
if not os.path.isfile(args.config_path):
args.config_path = GEN_PATH + args.config_path
print('Using config path: {}'.format(args.config_path))
if not os.path.isfile(args.config_path):
sys.exit('{} does not exist'.format(args.config_path))
if args.display:
gather_and_display(args.config_path)
sys.exit()
if args.deconfig:
deconfigure_data_switch(args.config_path)
sys.exit()
configure_data_switch(args.config_path)
| open-power-ref-design-toolkit/cluster-genesis | scripts/python/configure_data_switches.py | Python | apache-2.0 | 26,960 |
# -*- coding: utf-8 -*-
from django.db import models
class QueueItemManager(models.Manager):
def append(self, video):
return self.create(related_video=video)
def get_next(self):
try:
return self.get_queryset().select_related().\
filter(error=False).order_by('uploaded')[0]
except IndexError:
return None
class VideoManager(models.Manager):
pass
| samsath/cpcc_backend | src/mediastore/mediatypes/video/managers.py | Python | gpl-3.0 | 426 |
from vigra import *
def computeFeatures():
pass
class FilterList(object):
def __init__(self, shape):
pass
filterList = FilterList()
| timoMa/vigra | vigranumpy/examples/rag_features.py | Python | mit | 161 |
# coding: utf-8
"""
AuthenticationApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, unicode_literals
from six import iteritems
from ..api_client import ApiClient
from ..configuration import Configuration
class AuthenticationApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def login_post(self, authentication_string, **kwargs):
"""
Returns a session token to be included in the rest of the requests.
Note that API key authentication is required for all subsequent requests
and user auth is required for routes in the `User` section
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.login_post(authentication_string, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Auth authentication_string: JSON string containing your authentication details. (required)
:return: Token
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authentication_string', 'callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method login_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authentication_string' is set
if ('authentication_string' not in params) or (params['authentication_string'] is None):
raise ValueError("Missing the required parameter `authentication_string` when calling `login_post`")
resource_path = '/login'
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'authentication_string' in params:
body_params = params['authentication_string']
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='Token',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def refresh_token_get(self, **kwargs):
"""
Refreshes your current, valid JWT token and returns a new token.
Hit this route so that you do not have to post to `/login` with your API key and credentials
once you have already been authenticated.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.refresh_token_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Auth
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method refresh_token_get" % key
)
params[key] = val
del params['kwargs']
resource_path = '/refresh_token'
method = 'GET'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['jwtToken']
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='Auth',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| fernandog/Medusa | lib/tvdbapiv2/apis/authentication_api.py | Python | gpl-3.0 | 7,036 |
"""Module containing a memory memory manager which provides a sliding window on a number of memory mapped files"""
import os
import sys
import mmap
from mmap import mmap, ACCESS_READ
try:
from mmap import ALLOCATIONGRANULARITY
except ImportError:
# in python pre 2.6, the ALLOCATIONGRANULARITY does not exist as it is mainly
# useful for aligning the offset. The offset argument doesn't exist there though
from mmap import PAGESIZE as ALLOCATIONGRANULARITY
# END handle pythons missing quality assurance
__all__ = ["align_to_mmap", "is_64_bit", "buffer",
"MapWindow", "MapRegion", "MapRegionList", "ALLOCATIONGRANULARITY"]
#{ Utilities
try:
# Python 2
buffer = buffer
except NameError:
# Python 3 has no `buffer`; only `memoryview`
def buffer(obj, offset, size):
# Actually, for gitpython this is fastest ... .
return memoryview(obj)[offset:offset+size]
# doing it directly is much faster !
# return obj[offset:offset + size]
def string_types():
if sys.version_info[0] >= 3:
return str
else:
return basestring
def align_to_mmap(num, round_up):
"""
Align the given integer number to the closest page offset, which usually is 4096 bytes.
:param round_up: if True, the next higher multiple of page size is used, otherwise
the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0)
:return: num rounded to closest page"""
res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY
if round_up and (res != num):
res += ALLOCATIONGRANULARITY
# END handle size
return res
def is_64_bit():
""":return: True if the system is 64 bit. Otherwise it can be assumed to be 32 bit"""
return sys.maxsize > (1 << 32) - 1
#}END utilities
#{ Utility Classes
class MapWindow(object):
"""Utility type which is used to snap windows towards each other, and to adjust their size"""
__slots__ = (
'ofs', # offset into the file in bytes
'size' # size of the window in bytes
)
def __init__(self, offset, size):
self.ofs = offset
self.size = size
def __repr__(self):
return "MapWindow(%i, %i)" % (self.ofs, self.size)
@classmethod
def from_region(cls, region):
""":return: new window from a region"""
return cls(region._b, region.size())
def ofs_end(self):
return self.ofs + self.size
def align(self):
"""Assures the previous window area is contained in the new one"""
nofs = align_to_mmap(self.ofs, 0)
self.size += self.ofs - nofs # keep size constant
self.ofs = nofs
self.size = align_to_mmap(self.size, 1)
def extend_left_to(self, window, max_size):
"""Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area"""
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs
def extend_right_to(self, window, max_size):
"""Adjust the size to make our window end where the right window begins, but don't
get larger than max_size"""
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
class MapRegion(object):
"""Defines a mapped region of memory, aligned to pagesizes
**Note:** deallocates used region automatically on destruction"""
__slots__ = [
'_b', # beginning of mapping
'_mf', # mapped memory chunk (as returned by mmap)
'_uc', # total amount of usages
'_size', # cached size of our memory map
'__weakref__'
]
_need_compat_layer = sys.version_info[0] < 3 and sys.version_info[1] < 6
if _need_compat_layer:
__slots__.append('_mfb') # mapped memory buffer to provide offset
# END handle additional slot
#{ Configuration
# Used for testing only. If True, all data will be loaded into memory at once.
# This makes sure no file handles will remain open.
_test_read_into_memory = False
#} END configuration
def __init__(self, path_or_fd, ofs, size, flags=0):
"""Initialize a region, allocate the memory map
:param path_or_fd: path to the file to map, or the opened file descriptor
:param ofs: **aligned** offset into the file to be mapped
:param size: if size is larger then the file on disk, the whole file will be
allocated the the size automatically adjusted
:param flags: additional flags to be given when opening the file.
:raise Exception: if no memory can be allocated"""
self._b = ofs
self._size = 0
self._uc = 0
if isinstance(path_or_fd, int):
fd = path_or_fd
else:
fd = os.open(path_or_fd, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags)
# END handle fd
try:
kwargs = dict(access=ACCESS_READ, offset=ofs)
corrected_size = size
sizeofs = ofs
if self._need_compat_layer:
del(kwargs['offset'])
corrected_size += ofs
sizeofs = 0
# END handle python not supporting offset ! Arg
# have to correct size, otherwise (instead of the c version) it will
# bark that the size is too large ... many extra file accesses because
# if this ... argh !
actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size)
if self._test_read_into_memory:
self._mf = self._read_into_memory(fd, ofs, actual_size)
else:
self._mf = mmap(fd, actual_size, **kwargs)
# END handle memory mode
self._size = len(self._mf)
if self._need_compat_layer:
self._mfb = buffer(self._mf, ofs, self._size)
# END handle buffer wrapping
finally:
if isinstance(path_or_fd, string_types()):
os.close(fd)
# END only close it if we opened it
# END close file handle
# We assume the first one to use us keeps us around
self.increment_client_count()
def _read_into_memory(self, fd, offset, size):
""":return: string data as read from the given file descriptor, offset and size """
os.lseek(fd, offset, os.SEEK_SET)
mf = ''
bytes_todo = size
while bytes_todo:
chunk = 1024 * 1024
d = os.read(fd, chunk)
bytes_todo -= len(d)
mf += d
# END loop copy items
return mf
def __repr__(self):
return "MapRegion<%i, %i>" % (self._b, self.size())
#{ Interface
def buffer(self):
""":return: a buffer containing the memory"""
return self._mf
def map(self):
""":return: a memory map containing the memory"""
return self._mf
def ofs_begin(self):
""":return: absolute byte offset to the first byte of the mapping"""
return self._b
def size(self):
""":return: total size of the mapped region in bytes"""
return self._size
def ofs_end(self):
""":return: Absolute offset to one byte beyond the mapping into the file"""
return self._b + self._size
def includes_ofs(self, ofs):
""":return: True if the given offset can be read in our mapped region"""
return self._b <= ofs < self._b + self._size
def client_count(self):
""":return: number of clients currently using this region"""
return self._uc
def increment_client_count(self, ofs = 1):
"""Adjust the usage count by the given positive or negative offset.
If usage count equals 0, we will auto-release our resources
:return: True if we released resources, False otherwise. In the latter case, we can still be used"""
self._uc += ofs
assert self._uc > -1, "Increments must match decrements, usage counter negative: %i" % self._uc
if self.client_count() == 0:
self.release()
return True
else:
return False
# end handle release
def release(self):
"""Release all resources this instance might hold. Must only be called if there usage_count() is zero"""
self._mf.close()
# re-define all methods which need offset adjustments in compatibility mode
if _need_compat_layer:
def size(self):
return self._size - self._b
def ofs_end(self):
# always the size - we are as large as it gets
return self._size
def buffer(self):
return self._mfb
def includes_ofs(self, ofs):
return self._b <= ofs < self._size
# END handle compat layer
#} END interface
class MapRegionList(list):
"""List of MapRegion instances associating a path with a list of regions."""
__slots__ = (
'_path_or_fd', # path or file descriptor which is mapped by all our regions
'_file_size' # total size of the file we map
)
def __new__(cls, path):
return super(MapRegionList, cls).__new__(cls)
def __init__(self, path_or_fd):
self._path_or_fd = path_or_fd
self._file_size = None
def path_or_fd(self):
""":return: path or file descriptor we are attached to"""
return self._path_or_fd
def file_size(self):
""":return: size of file we manager"""
if self._file_size is None:
if isinstance(self._path_or_fd, string_types()):
self._file_size = os.stat(self._path_or_fd).st_size
else:
self._file_size = os.fstat(self._path_or_fd).st_size
# END handle path type
# END update file size
return self._file_size
#} END utility classes
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/smmap/util.py | Python | agpl-3.0 | 10,076 |
from math import *
octree_node_size = 112
def recurse(depth):
if depth == 0:
return 1
else:
return pow(8, depth) + recurse(depth - 1)
def octree_size(depth):
return recurse(depth) * octree_node_size
print("Size %d" % (octree_size(3)))
| galek/anki-3d-engine | docs/drafts/octree.py | Python | bsd-3-clause | 246 |
INT_MAX = 2 ** 31 - 1
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == 0 or divisor == 1:
return dividend
if divisor == -1:
return -dividend if -dividend < INT_MAX else INT_MAX
sign = 1
if (dividend < 0 and divisor > 0) or (dividend > 0 and divisor < 0):
sign = -1
dividend, divisor = abs(dividend), abs(divisor)
result = 0
step_divisor = divisor
step_result = 1
while dividend >= step_divisor + step_divisor:
step_divisor += step_divisor
step_result += step_result
while dividend >= divisor:
if dividend >= step_divisor:
dividend -= step_divisor
result += step_result
step_divisor = (step_divisor >> 1)
step_result = (step_result >> 1)
return sign * result
| BigEgg/LeetCode | Python/LeetCode/_001_050/_029_DivideTwoIntegers.py | Python | mit | 918 |
import logging
from django.contrib import messages
from django.contrib.auth import get_user_model, authenticate, login
from django.contrib.auth.models import User
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpRequest
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django_tables2 import RequestConfig
from treatment.models import Request as RequestBO, Treatment, Researcher
from treatment.tables import RequestTable
from .forms import TreatmentCreationForm, SubjectAssignmentForm, TreatmentSelectionForm
from .backend import create_treatment, assign_patiences_to_treatment
from .tables import TreatmentsTable
# Get an instance of a logger
logger = logging.getLogger(__name__)
@login_required
def researcher_index(request):
if request.method == 'GET':
current_user = request.user
researcher_inst = cache.get("res_%s"%current_user.id)
if researcher_inst is None:
researcher_inst = Researcher.objects.get(user__id=current_user.id)
cache.set("res_%s" % current_user.id, researcher_inst)
table = TreatmentsTable(Researcher.objects.get(id=researcher_inst.id).membership_set.all())
RequestConfig(request, paginate={"per_page": 10}).configure(table)
return render(request,'researcher_index.html', {'studies_table' : table, 'user': current_user})
@login_required
def res_treatments_create(request):
if request.method == 'GET':
form = TreatmentCreationForm()
else:
form = TreatmentCreationForm(request.POST)
if form.is_valid():
#user must be authenticated at this point
current_user = request.user
output = create_treatment(form, current_user)
if output['result']:
messages.success(request, output['status'])
form = TreatmentCreationForm()
else:
messages.error(request, output['status'])
return render(request,'researcher_treatments_create.html', {'form':form})
@login_required
def res_treatments_modify(request, treatment_id):
if request.method == 'GET':
form = TreatmentSelectionForm(treatmentId=treatment_id)
return render(request,'researcher_treatments_modify.html', {'form':form, 'treatment_id':treatment_id })
@login_required
def res_treatments_select(request):
if request.method == 'GET':
form = TreatmentSelectionForm()
return render(request,'researcher_treatments_modify.html', {'form':form})
@login_required
def res_requests(request):
if request.method == 'GET':
form = SubjectAssignmentForm()
table = RequestTable(RequestBO.objects.filter(processed=False))
RequestConfig(request, paginate={"per_page": 5}).configure(table)
else:
form = SubjectAssignmentForm(request.POST)
table = RequestTable(RequestBO.objects.filter(processed=False))
RequestConfig(request, paginate={"per_page": 5}).configure(table)
if form.is_valid():
current_user = request.user
output = assign_patiences_to_treatment(form, current_user)
if output['result']:
messages.success(request, output['status'])
form = SubjectAssignmentForm()
else:
messages.error(request, output['status'])
return render(request,'researcher_requests.html', {'form':form,'request_table' : table })
@login_required
def res_subjects(request):
return render(request,'researcher_subjects.html')
@login_required
def res_invites(request):
return render(request,'researcher_invites.html')
@login_required
def res_create_survey(request, treatment_id):
return render(request,'create_survey.html')
| luisen14/treatment-tracking-project | treatment_tracker/researchers/views.py | Python | apache-2.0 | 3,823 |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import threading
import six
from oslo_config import cfg
from oslo_db import options
from oslo_log import log as logging
from oslo_utils import excutils
from sqlalchemy.sql import select
from designate import exceptions
from designate.i18n import _LC
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
LOG = logging.getLogger(__name__)
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
__backend_status__ = 'integrated'
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup('backend:powerdns')
opts = copy.deepcopy(options.database_opts)
# Strip connection options
discard_opts = ('sqlite_db', 'connection', 'slave_connection')
opts = [opt for opt in opts if opt.name not in discard_opts]
return [(group, opts,)]
def __init__(self, target):
super(PowerDNSBackend, self).__init__(target)
self.local_store = threading.local()
default_connection = 'sqlite:///%(state_path)s/powerdns.sqlite' % {
'state_path': cfg.CONF.state_path
}
self.connection = self.options.get('connection', default_connection)
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(
self.name, self.connection, self.target.id)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
def _parse_master(master):
return '%s:%d' % (master.host, master.port)
masters = six.moves.map(_parse_master, self.masters)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': ','.join(masters),
'type': 'SLAVE',
'account': context.tenant
}
self._create(tables.domains, domain_values)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
# TODO(kiall): We should make this match create_domain with regard to
# transactions.
try:
self._get(tables.domains, domain['id'], exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
| kiall/designate-py3 | designate/backend/impl_powerdns/__init__.py | Python | apache-2.0 | 5,270 |
"""
Arithmetic helper functions.
"""
def is_number(s):
"""
Determine if a given input can be interpreted as a number
This is possible for types like float, int and so on.
"""
try:
float(s)
return True
except ValueError:
return False
| mre/tracker | arith.py | Python | lgpl-3.0 | 264 |
# -*- coding: utf-8 -*-
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.message import Message
import email.message
import tempfile
import shutil
import socket
import threading
import ssl
import select
import time
import os
import sys
import pytest
import yaml
from muttdown import main
from muttdown.main import convert_tree
from muttdown.main import process_message
from muttdown.config import Config
@pytest.fixture
def basic_config():
return Config()
@pytest.fixture
def tempdir():
# workaround because pytest's bultin tmpdir fixture is broken on python 3.3
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
@pytest.fixture
def config_with_css(tempdir):
with open('%s/test.css' % tempdir, 'w') as f:
f.write('html, body, p { font-family: serif; }\n')
c = Config()
c.merge_config({'css_file': '%s/test.css' % tempdir})
return c
def test_unmodified_no_match(basic_config):
msg = Message()
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.set_payload('This message has no sigil')
converted = process_message(msg, basic_config)
assert converted == msg
def test_simple_message(basic_config):
msg = MIMEMultipart()
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.preamble = 'Outer preamble'
msg.attach(MIMEText("!m This is the main message body"))
attachment = MIMEText('this is an attachment', 'x-misc')
attachment.add_header('Content-Disposition', 'attachment')
msg.attach(attachment)
converted, _ = convert_tree(msg, basic_config)
assert converted['Subject'] == 'Test Message'
assert converted['From'] == '[email protected]'
assert converted['To'] == '[email protected]'
assert converted.get('Bcc', None) is None
assert isinstance(converted, MIMEMultipart)
assert converted.preamble == 'Outer preamble'
assert len(converted.get_payload()) == 2
alternatives_part = converted.get_payload()[0]
assert isinstance(alternatives_part, MIMEMultipart)
assert alternatives_part.get_content_type() == 'multipart/alternative'
assert len(alternatives_part.get_payload()) == 2
text_part = alternatives_part.get_payload()[0]
html_part = alternatives_part.get_payload()[1]
assert isinstance(text_part, MIMEText)
assert text_part.get_content_type() == 'text/plain'
assert isinstance(html_part, MIMEText)
assert html_part.get_content_type() == 'text/html'
attachment_part = converted.get_payload()[1]
assert isinstance(attachment_part, MIMEText)
assert attachment_part['Content-Disposition'] == 'attachment'
assert attachment_part.get_content_type() == 'text/x-misc'
def test_with_css(config_with_css):
msg = Message()
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.set_payload('!m\n\nThis is a message')
converted, _ = convert_tree(msg, config_with_css)
assert isinstance(converted, MIMEMultipart)
assert len(converted.get_payload()) == 2
text_part = converted.get_payload()[0]
assert text_part.get_payload(decode=True) == b'!m\n\nThis is a message'
html_part = converted.get_payload()[1]
assert html_part.get_payload(decode=True) == b'<p style="font-family: serif">This is a message</p>'
def test_headers_when_multipart_signed(basic_config):
msg = MIMEMultipart('signed')
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.preamble = 'Outer preamble'
msg.attach(MIMEText("!m This is the main message body"))
msg.attach(MIMEApplication('signature here', 'pgp-signature', name='signature.asc'))
converted, _ = convert_tree(msg, basic_config)
assert converted['Subject'] == 'Test Message'
assert converted['From'] == '[email protected]'
assert converted['To'] == '[email protected]'
assert isinstance(converted, MIMEMultipart)
assert converted.preamble == 'Outer preamble'
assert len(converted.get_payload()) == 2
assert converted.get_content_type() == 'multipart/alternative'
html_part = converted.get_payload()[0]
original_signed_part = converted.get_payload()[1]
assert isinstance(html_part, MIMEText)
assert html_part.get_content_type() == 'text/html'
assert isinstance(original_signed_part, MIMEMultipart)
assert original_signed_part.get_content_type() == 'multipart/signed'
assert original_signed_part['Subject'] is None
text_part = original_signed_part.get_payload()[0]
signature_part = original_signed_part.get_payload()[1]
assert text_part.get_content_type() == 'text/plain'
assert signature_part.get_content_type() == 'application/pgp-signature'
class MockSmtpServer(object):
def __init__(self):
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.bind(('127.0.0.1', 0))
self.address = self._s.getsockname()[0:2]
self._t = None
self._started = threading.Event()
self.messages = []
self.running = False
def start(self):
self._t = threading.Thread(target=self.run)
self._t.start()
if self._started.wait(5) is not True:
raise ValueError('SMTP Server Thread failed to start!')
def run(self):
if hasattr(ssl, 'create_default_context'):
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(certfile='tests/data/cert.pem', keyfile='tests/data/key.pem')
self._s.listen(128)
self._started.set()
self.running = True
while self.running:
r, _, x = select.select([self._s], [self._s], [self._s], 0.5)
if r:
start = time.time()
conn, addr = self._s.accept()
conn = context.wrap_socket(conn, server_side=True)
message = b''
conn.sendall(b'220 localhost SMTP Fake\r\n')
message += conn.recv(1024)
conn.sendall(b'250-localhost\r\n250 DSN\r\n')
# MAIL FROM
message += conn.recv(1024)
conn.sendall(b'250 2.1.0 Ok\r\n')
# RCPT TO
message += conn.recv(1024)
conn.sendall(b'250 2.1.0 Ok\r\n')
# DATA
message += conn.recv(6)
conn.sendall(b'354 End data with <CR><LF>.<CR><LF>\r\n')
while time.time() < start + 5:
chunk = conn.recv(4096)
if not chunk:
break
message += chunk
if b'\r\n.\r\n' in message:
break
conn.sendall(b'250 2.1.0 Ok\r\n')
message += conn.recv(1024)
conn.sendall(b'221 Bye\r\n')
conn.close()
self.messages.append((addr, message))
def stop(self):
if self._t is not None:
self.running = False
self._t.join()
@pytest.fixture
def smtp_server():
s = MockSmtpServer()
s.start()
try:
yield s
finally:
s.stop()
def test_main_smtplib(tempdir, smtp_server, mocker):
config_path = os.path.join(tempdir, 'config.yaml')
with open(config_path, 'w') as f:
yaml.dump({
'smtp_host': smtp_server.address[0],
'smtp_port': smtp_server.address[1],
'smtp_ssl': True
}, f)
msg = Message()
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.set_payload('This message has no sigil')
mocker.patch.object(main, 'read_message', return_value=msg.as_string())
main.main(['-c', config_path, '-f', '[email protected]', '[email protected]'])
assert len(smtp_server.messages) == 1
attr, transcript = smtp_server.messages[0]
assert b'Subject: Test Message' in transcript
assert b'no sigil' in transcript
def test_main_passthru(tempdir, mocker):
output_path = os.path.join(tempdir, 'output')
sendmail_path = os.path.join(tempdir, 'sendmail')
with open(sendmail_path, 'w') as f:
f.write('#!{0}\n'.format(sys.executable))
f.write('import sys\n')
f.write('output_path = "{0}"\n'.format(output_path))
f.write('open(output_path, "w").write(sys.stdin.read())\n')
f.write('sys.exit(0)')
os.chmod(sendmail_path, 0o750)
config_path = os.path.join(tempdir, 'config.yaml')
with open(config_path, 'w') as f:
yaml.dump({
'sendmail': sendmail_path
}, f)
msg = Message()
msg['Subject'] = 'Test Message'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Bcc'] = 'bananas'
msg.set_payload('This message has no sigil')
mocker.patch.object(main, 'read_message', return_value=msg.as_string())
main.main(['-c', config_path, '-f', '[email protected]', '-s', '[email protected]'])
with open(output_path, 'rb') as f:
transcript = f.read()
assert b'Subject: Test Message' in transcript
assert b'no sigil' in transcript
def test_raw_unicode(basic_config):
raw_message = b'Date: Fri, 1 Mar 2019 17:54:06 -0800\nFrom: Test <[email protected]>\nTo: Test <[email protected]>\nSubject: Re: Fwd: Important: 2019 =?utf-8?Q?Securit?=\n =?utf-8?B?eSBVcGRhdGUg4oCU?=\nReferences: <BypjV000000000000000000000000000000000000000000000PNNK9E00HcUNxx_7QEaZBosNNgqKSw@sfdc.net>\n <CAPe=KFgfaFd5U7KX=3ugNs5vPzHkRgAij9md8TL-WX-ypEszug@mail.gmail.com>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=utf-8\nContent-Disposition: inline\nContent-Transfer-Encoding: 8bit\nUser-Agent: Mutt/1.11.3 (2019-02-01)\n\nThis is a test\n\n\nOn Fri, Mar 01, 2019 at 03:08:35PM -0800, Test Wrote:\n> :)\n> \n> \n> \xc3\x98 Text\n> \n> \xc2\xb7 text\n-- \nend\n' # noqa
if sys.version_info > (3, 0):
mail = email.message_from_string(raw_message.decode('utf-8'))
else:
mail = email.message_from_string(raw_message)
converted = process_message(mail, basic_config)
assert converted['From'] == 'Test <[email protected]>'
assert 'Ø' in converted.get_payload()
| Roguelazer/muttdown | tests/test_basic.py | Python | isc | 10,618 |
"""
Test the SLSQP optimizer driver
"""
import unittest
import numpy
# pylint: disable=F0401,E0611
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.main.datatypes.api import Float, Array, Str
from openmdao.lib.casehandlers.api import ListCaseRecorder
from openmdao.main.interfaces import IHasParameters, implements
from openmdao.main.hasparameters import HasParameters
from openmdao.util.decorators import add_delegate
from openmdao.lib.drivers.slsqpdriver import SLSQPdriver
from openmdao.util.testutil import assert_raises, assert_rel_error
class OptRosenSuzukiComponent(Component):
""" From the CONMIN User's Manual:
EXAMPLE 1 - CONSTRAINED ROSEN-SUZUKI FUNCTION. NO GRADIENT INFORMATION.
MINIMIZE OBJ = X(1)**2 - 5*X(1) + X(2)**2 - 5*X(2) +
2*X(3)**2 - 21*X(3) + X(4)**2 + 7*X(4) + 50
Subject to:
G(1) = X(1)**2 + X(1) + X(2)**2 - X(2) +
X(3)**2 + X(3) + X(4)**2 - X(4) - 8 .LE.0
G(2) = X(1)**2 - X(1) + 2*X(2)**2 + X(3)**2 +
2*X(4)**2 - X(4) - 10 .LE.0
G(3) = 2*X(1)**2 + 2*X(1) + X(2)**2 - X(2) +
X(3)**2 - X(4) - 5 .LE.0
This problem is solved beginning with an initial X-vector of
X = (1.0, 1.0, 1.0, 1.0)
The optimum design is known to be
OBJ = 6.000
and the corresponding X-vector is
X = (0.0, 1.0, 2.0, -1.0)
"""
x = Array(iotype='in', low=-10, high=99)
g = Array([1., 1., 1.], iotype='out')
result = Float(iotype='out')
obj_string = Str(iotype='out')
opt_objective = Float(iotype='out')
# pylint: disable=C0103
def __init__(self):
super(OptRosenSuzukiComponent, self).__init__()
self.x = numpy.array([1., 1., 1., 1.], dtype=float)
self.result = 0.
self.opt_objective = 6.
self.opt_design_vars = [0., 1., 2., -1.]
def execute(self):
"""calculate the new objective value"""
x = self.x
self.result = (x[0]**2 - 5.*x[0] + x[1]**2 - 5.*x[1] +
2.*x[2]**2 - 21.*x[2] + x[3]**2 + 7.*x[3] + 50)
self.obj_string = "Bad"
self.g[0] = (x[0]**2 + x[0] + x[1]**2 - x[1] +
x[2]**2 + x[2] + x[3]**2 - x[3] - 8)
self.g[1] = (x[0]**2 - x[0] + 2*x[1]**2 + x[2]**2 +
2*x[3]**2 - x[3] - 10)
self.g[2] = (2*x[0]**2 + 2*x[0] + x[1]**2 - x[1] +
x[2]**2 - x[3] - 5)
class SLSPQdriverTestCase(unittest.TestCase):
"""test SLSQP optimizer component"""
def setUp(self):
self.top = set_as_top(Assembly())
self.top.add('driver', SLSQPdriver())
self.top.add('comp', OptRosenSuzukiComponent())
self.top.driver.workflow.add('comp')
self.top.driver.iprint = 0
def tearDown(self):
self.top = None
def test_opt1(self):
# Run with scalar parameters, scalar constraints.
self.top.driver.add_objective('comp.result')
map(self.top.driver.add_parameter,
['comp.x[0]', 'comp.x[1]', 'comp.x[2]', 'comp.x[3]'])
# pylint: disable=C0301
map(self.top.driver.add_constraint, [
'comp.x[0]**2+comp.x[0]+comp.x[1]**2-comp.x[1]+comp.x[2]**2+comp.x[2]+comp.x[3]**2-comp.x[3] < 8',
'comp.x[0]**2-comp.x[0]+2*comp.x[1]**2+comp.x[2]**2+2*comp.x[3]**2-comp.x[3] < 10',
'2*comp.x[0]**2+2*comp.x[0]+comp.x[1]**2-comp.x[1]+comp.x[2]**2-comp.x[3] < 5'])
self.top.recorders = [ListCaseRecorder()]
self.top.run()
# pylint: disable=E1101
self.assertAlmostEqual(self.top.comp.opt_objective,
self.top.driver.eval_objective(), places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[0],
self.top.comp.x[0], places=1)
self.assertAlmostEqual(self.top.comp.opt_design_vars[1],
self.top.comp.x[1], places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[2],
self.top.comp.x[2], places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[3],
self.top.comp.x[3], places=1)
cases = self.top.recorders[0].get_iterator()
end_case = cases[-1]
self.assertEqual(self.top.comp.x[1],
end_case.get_input('comp.x[1]'))
self.assertEqual(self.top.comp.result,
end_case.get_output('_pseudo_0.out0'))
def test_max_iter(self):
self.top.driver.add_objective('comp.result')
map(self.top.driver.add_parameter,
['comp.x[0]', 'comp.x[1]', 'comp.x[2]', 'comp.x[3]'])
self.top.driver.maxiter = 2
self.top.run()
self.assertEqual(self.top.driver.error_code, 9)
def test_array(self):
# Run with array parameter, array constraint.
self.top.driver.add_objective('comp.result')
self.top.driver.add_parameter('comp.x')
self.top.driver.add_constraint('comp.g < 0')
self.top.recorders = [ListCaseRecorder()]
self.top.run()
# pylint: disable=E1101
self.assertAlmostEqual(self.top.comp.opt_objective,
self.top.driver.eval_objective(), places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[0],
self.top.comp.x[0], places=1)
self.assertAlmostEqual(self.top.comp.opt_design_vars[1],
self.top.comp.x[1], places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[2],
self.top.comp.x[2], places=2)
self.assertAlmostEqual(self.top.comp.opt_design_vars[3],
self.top.comp.x[3], places=1)
cases = self.top.recorders[0].get_iterator()
end_case = cases[-1]
self.assertEqual(self.top.comp.x[1],
end_case.get_input('comp.x')[1])
self.assertEqual(self.top.comp.result,
end_case.get_output('_pseudo_0.out0'))
def test_reconfig(self):
# Test that ArrayParameters can be configured at run() time.
class MyComp(Component):
x = Array([0, 0], iotype="in", dtype=float)
y = Array([0, 0], iotype="out", dtype=float)
def execute(self):
self.y = self.x**2
class MyAssembly(Assembly):
def configure(self):
self.add('comp1', MyComp())
self.add('comp2', MyComp())
driver = self.add('driver', SLSQPdriver())
driver.add_parameter('comp1.x', low=-10, high=10)
driver.add_parameter('comp2.x', low=-10, high=10)
driver.add_objective('comp1.y[0] + comp2.y[1]')
asm = set_as_top(MyAssembly())
asm.comp1.x = [1, 2, 3]
asm.comp2.x = [2.781828, 3.14159]
asm.run()
assert_rel_error(self, asm.comp1.x, [0, 2, 3], 1e-6)
assert_rel_error(self, asm.comp1.y, [0, 4, 9], 1e-6)
assert_rel_error(self, asm.comp2.x, [2.781828, 0], 1e-6)
assert_rel_error(self, asm.comp2.y, [7.738567, 0], 1e-6)
def test_invalid_reconfig(self):
# Test invalid reconfiguration of ArrayParameter.
class MyComp(Component):
x = Array([0, 0], iotype="in", dtype=float)
y = Array([0, 0], iotype="out", dtype=float)
def execute(self):
self.y = self.x**2
class MyAssembly(Assembly):
def configure(self):
self.add('comp1', MyComp())
self.add('comp2', MyComp())
driver = self.add('driver', SLSQPdriver())
driver.add_parameter('comp1.x', low=-10, high=[10, 10])
driver.add_parameter('comp2.x', low=-10, high=10)
driver.add_objective('comp1.y[0] + comp2.y[1]')
asm = set_as_top(MyAssembly())
asm.comp1.x = [1, 2, 3]
asm.comp2.x = [2.781828, 3.14159]
assert_raises(self, 'asm.run()', globals(), locals(), RuntimeError,
"Parameter comp1.x can't be reconfigured,"
" 'high' was not specified as a scalar")
def test_initial_run(self):
# Test the fix that added a run_iteration top of the
# start_iteration method
class MyComp(Component):
x = Float(0.0, iotype='in', low=-10, high=10)
xx = Float(0.0, iotype='in', low=-10, high=10)
f_x = Float(iotype='out')
y = Float(iotype='out')
def execute(self):
if self.xx != 1.0:
self.raise_exception("Lazy", RuntimeError)
self.f_x = 2.0*self.x
self.y = self.x
@add_delegate(HasParameters)
class SpecialDriver(Driver):
implements(IHasParameters)
def execute(self):
self.set_parameters([1.0])
top = set_as_top(Assembly())
top.add('comp', MyComp())
top.add('driver', SLSQPdriver())
top.add('subdriver', SpecialDriver())
top.driver.workflow.add('subdriver')
top.subdriver.workflow.add('comp')
top.subdriver.add_parameter('comp.xx')
top.driver.add_parameter('comp.x')
top.driver.add_constraint('comp.y > 1.0')
top.driver.add_objective('comp.f_x')
top.run()
if __name__ == "__main__":
unittest.main()
| HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.lib-0.13.0-py2.7.egg/openmdao/lib/drivers/test/test_slsqpdriver.py | Python | gpl-2.0 | 9,605 |
# -*- coding: utf-8 -*-
"""
apps.accounts.models
~~~~~~~~~~~~~~
account related models
:copyright: (c) 2012 by arruda.
"""
import datetime
from decimal import Decimal
from django.db import models
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
class UserProfile(models.Model):
"represents a user profile, for now it has only information about the quota"
user = models.ForeignKey('auth.User')
class Meta:
app_label = 'accounts'
def __unicode__(self):
"return the user email"
return self.user.email
def used_monthly_quota(self,month=0):
"""Return the amount of the used quota for a given month, if none given then consider this
If a month is given, then it will consider the actual month - the given... so if the user wants to see
1 month behind, then should pass 1, if want to see 2 months behind, then pass 2.
"""
today = datetime.date.today()
total_purchased = self.user.books.filter(purchased=True,
purchase_date__year=today.year,
purchase_date__month=(today.month-month)
).aggregate(
Sum("purchase_value")
)['purchase_value__sum']
return total_purchased if total_purchased != None else 0
# return 0
@property
def quota(self):
"return the user quota for the actual month or creates it based in the last existing, or a new one with 0 quota"
today = datetime.date.today()
try:
return self.quotas.get(date__month=today.month, date__year=today.year)
except Quota.DoesNotExist:
last_month_quota = 0
try:
last_month_quota = self.quotas.all()[0].quota
except:
pass
new_month_quota = self.quotas.create(date=today,quota=last_month_quota)
return new_month_quota
class Quota(models.Model):
"""
Represents an user's monthly quota
"""
user = models.ForeignKey(UserProfile,related_name="quotas")
date = models.DateField(_("Date"), default=datetime.date.today)
quota = models.DecimalField(_("Quota"),max_digits=10, decimal_places=2,default=Decimal("0"),null=True, blank=True)
class Meta:
app_label = 'accounts'
ordering = ['-date',]
def __unicode__(self):
"return the user email"
return "%s - %s - %s/%s" % (self.quota, self.user.user.email, self.date.month, self.date.year)
def save(self, *args, **kwargs):
"""
always stay with the day 1
"""
#reset to day 1
if self.date.day != 1:
self.date = self.date - datetime.timedelta(days=self.date.day -1)
super(Quota, self).save(*args, **kwargs) | arruda/rmr | rmr/apps/accounts/models.py | Python | mit | 2,999 |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import PretrainedModelJob
__all__ = ['PretrainedModelJob']
| gheinrich/DIGITS-GAN | digits/pretrained_model/__init__.py | Python | bsd-3-clause | 174 |
# coding: utf-8
# pylint: disable=missing-docstring, invalid-name
from __future__ import absolute_import
from google.appengine.api import users
import flask
import auth
import model
import util
from main import app
@app.route('/signin/google/')
def signin_google():
auth.save_request_params()
google_url = users.create_login_url(flask.url_for('google_authorized'))
return flask.redirect(google_url)
@app.route('/_s/callback/google/authorized/')
def google_authorized():
google_user = users.get_current_user()
if google_user is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
user_db = retrieve_user_from_google(google_user)
return auth.signin_via_social(user_db)
def retrieve_user_from_google(google_user):
auth_id = 'federated_%s' % google_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return auth.create_or_get_user_db(
auth_id=auth_id,
name=util.create_name_from_email(google_user.email()),
username=google_user.email(),
email=google_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
| sidharta/hansel-app | main/auth/google.py | Python | mit | 1,359 |
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example lists account-level filter sets."""
import argparse
import os
import pprint
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
_OWNER_NAME_TEMPLATE = ('bidders/{bidders_resource_id}/'
'accounts/{accounts_resource_id}')
DEFAULT_ACCOUNT_RESOURCE_ID = 'ENTER_ACCOUNT_RESOURCE_ID_HERE'
DEFAULT_BIDDER_RESOURCE_ID = 'ENTER_BIDDER_RESOURCE_ID_HERE'
def main(ad_exchange_buyer, owner_name):
try:
# Construct and execute the request.
filter_sets = ad_exchange_buyer.bidders().accounts().filterSets().list(
ownerName=owner_name).execute()
print(f'Listing FilterSets for account: "{owner_name}".')
pprint.pprint(filter_sets)
except HttpError as e:
print(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Creates a bidder-level filter set with the specified options'
)
# Required fields.
parser.add_argument(
'-a', '--account_resource_id', default=DEFAULT_ACCOUNT_RESOURCE_ID,
help=('The resource ID of the bidders.accounts resource for which the '
'filter sets were created. This will be used to construct the '
'ownerName used as a path parameter for filter set requests. For '
'additional information on how to configure the ownerName path '
'parameter, see: https://developers.google.com/authorized-buyers/'
'apis/reference/rest/v2beta1/bidders.accounts.filterSets/'
'list#body.PATH_PARAMETERS.owner_name'))
parser.add_argument(
'-b', '--bidder_resource_id', default=DEFAULT_BIDDER_RESOURCE_ID,
help=('The resource ID of the bidders resource for which the filter '
'sets were created. This will be used to construct the ownerName '
'used as a path parameter for filter set requests. For additional '
'information on how to configure the ownerName path parameter, '
'see: https://developers.google.com/authorized-buyers/apis/'
'reference/rest/v2beta1/bidders.accounts.filterSets/list'
'#body.PATH_PARAMETERS.owner_name'))
args = parser.parse_args()
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, _OWNER_NAME_TEMPLATE.format(
bidders_resource_id=args.bidder_resource_id,
accounts_resource_id=args.account_resource_id))
| googleads/googleads-adxbuyer-examples | python/samples/v2_x/list_account_level_filter_sets.py | Python | apache-2.0 | 3,206 |
## Script (Python) "getProjectTypes"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=return list of project types
ptypes = { 'Auftragsforschung':'Auftragsforschung',
'Sachverstand':'Sachverstand',
'Seminare':'Seminare',
'Grundlagenforschung':'Grundlagenforschung',
'zweckbetr. Forschung':'zweckbetr. Forschung'}
return ptypes
| syslabcom/imu | skins/imu/getProjectTypes.py | Python | gpl-2.0 | 475 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import test_crm_lead
| jeremiahyan/odoo | addons/crm_livechat/tests/__init__.py | Python | gpl-3.0 | 128 |
from chimera.core.manager import Manager
from chimera.core.chimeraobject import ChimeraObject
from chimera.core.proxy import Proxy
from chimera.core.event import event
from nose.tools import assert_raises
import time
import math
class Publisher (ChimeraObject):
def __init__ (self):
ChimeraObject.__init__(self)
self.counter = 0
def __start__ (self):
# ATTENTION: getProxy works only after __init__
self.fooDone += self.getProxy().fooDoneClbk
return True
def __stop__ (self):
self.fooDone -= self.getProxy().fooDoneClbk
def foo (self):
self.fooDone(time.time())
return 42
@event
def fooDone (self, when):
pass
def fooDoneClbk (self, when):
self.counter += 1
def getCounter (self):
return self.counter
class Subscriber (ChimeraObject):
def __init__ (self):
ChimeraObject.__init__(self)
self.counter = 0
self.results = []
def fooDoneClbk (self, when):
self.results.append((when, time.time()))
self.counter += 1
assert when, "When it happened?"
def getCounter (self):
return self.counter
def getResults (self):
return self.results
class TestEvents (object):
def setup (self):
self.manager = Manager()
def teardown (self):
self.manager.shutdown()
del self.manager
def test_publish (self):
assert self.manager.addClass (Publisher, "p") != False
assert self.manager.addClass (Subscriber, "s") != False
p = self.manager.getProxy("/Publisher/p")
assert isinstance(p, Proxy)
s = self.manager.getProxy("/Subscriber/s")
assert isinstance(s, Proxy)
p.fooDone += s.fooDoneClbk
assert p.foo() == 42
time.sleep (0.5) # delay to get messages delivered
assert s.getCounter() == 1
assert p.getCounter() == 1
assert p.foo() == 42
time.sleep (0.5) # delay to get messages delivered
assert s.getCounter() == 2
assert p.getCounter() == 2
# unsubscribe
p.fooDone -= s.fooDoneClbk
p.fooDone -= p.fooDoneClbk
assert p.foo() == 42
time.sleep (0.5) # delay to get messages delivered
assert s.getCounter() == 2
assert p.getCounter() == 2
def test_performance (self):
assert self.manager.addClass (Publisher, "p") != False
assert self.manager.addClass (Subscriber, "s") != False
p = self.manager.getProxy("/Publisher/p")
assert isinstance(p, Proxy)
s = self.manager.getProxy("/Subscriber/s")
assert isinstance(s, Proxy)
p.fooDone += s.fooDoneClbk
for check in range (1):
start = time.time()
for i in range (100):
p.foo()
end = time.time()
time.sleep (5)
results = s.getResults()
dt = [ (t - t0)*1000 for t0, t in results]
mean = sum (dt) / len(dt)
sigma = math.sqrt(sum([ (t - mean)**2 for t in dt]) / len(dt))
print "#"*25
print "# %d events (%.3f s)" % (len(dt), (end-start))
print "# %.2f events/s" % (len(dt)/(end-start))
print "# min : %-6.3f ms" % min(dt)
print "# max : %-6.3f ms" % max(dt)
print "# mean : %-6.3f ms" % mean
print "# sigma : %-6.3f ms" % sigma
print "#"*25
| wschoenell/chimera_imported_googlecode | src/chimera/core/tests/test_events.py | Python | gpl-2.0 | 3,578 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import genetics
def main(argv):
spectrum = files.read_line_of_ints(argv[0])
counts = genetics.convolution_counts(spectrum)
print ' '.join(str(c) for c in genetics.convolution_list(counts))
if __name__ == "__main__":
main(sys.argv[1:])
| cowboysmall/rosalind | src/textbook/rosalind_ba4h.py | Python | mit | 362 |
"""
WSGI config for Databaes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Databaes.settings")
application = get_wsgi_application() | stevetu717/Databaes | Databaes/wsgi.py | Python | mit | 394 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
SetVectorStyle.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterFile,
QgsProcessingParameterVectorLayer,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SetVectorStyle(QgisAlgorithm):
INPUT = 'INPUT'
STYLE = 'STYLE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector general')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT,
self.tr('Vector layer')))
self.addParameter(QgsProcessingParameterFile(self.STYLE,
self.tr('Style file'), extension='qml'))
self.addOutput(QgsProcessingOutputVectorLayer(self.INPUT,
self.tr('Styled')))
def name(self):
return 'setstyleforvectorlayer'
def displayName(self):
return self.tr('Set style for vector layer')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
style = self.parameterAsFile(parameters, self.STYLE, context)
layer.loadNamedStyle(style)
layer.triggerRepaint()
return {self.INPUT: layer}
| t-hey/QGIS-Original | python/plugins/processing/algs/qgis/SetVectorStyle.py | Python | gpl-2.0 | 2,499 |
import pytest
from api.base.settings.defaults import API_BASE
from api_tests import utils
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
)
@pytest.mark.django_db
class TestFileMetadataRecordsList:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def private_file(self, user):
private_node = ProjectFactory(creator=user)
return utils.create_test_file(private_node, user, filename='private_file')
@pytest.fixture()
def public_file(self, user):
public_node = ProjectFactory(creator=user, is_public=True)
return utils.create_test_file(public_node, user, filename='public_file')
def get_url(self, file):
return '/{}files/{}/metadata_records/'.format(API_BASE, file._id)
def test_metadata_record_list(self, app, user, public_file, private_file):
# test_unauthenticated_can_view_public_file_metadata_records
res = app.get(self.get_url(public_file))
assert res.status_code == 200
assert len(res.json['data']) == public_file.records.count()
# test_authenticated_can_view_public_file_metadata_records
res = app.get(self.get_url(public_file), auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == public_file.records.count()
# test_unauthenticated_cannot_view_private_file_metadata_records
res = app.get(self.get_url(private_file), expect_errors=True)
assert res.status_code == 401
# test_authenticated_can_view_private_file_metadata_records
res = app.get(self.get_url(private_file), auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == private_file.records.count()
# test_unauthorized_cannot_view_private_file_metadata_records
unauth = AuthUserFactory()
res = app.get(self.get_url(private_file), auth=unauth.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_create_metadata_records
res = app.post_json_api(self.get_url(private_file), auth=user.auth, expect_errors=True)
assert res.status_code == 405
| Johnetordoff/osf.io | api_tests/files/views/test_file_metadata_records_list.py | Python | apache-2.0 | 2,185 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests fuzzers.mutator_plugin."""
import os
import shutil
import unittest
from pyfakefs import fake_filesystem_unittest
from clusterfuzz._internal.bot.fuzzers import mutator_plugin
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import test_utils
class FindMutatorPluginTest(fake_filesystem_unittest.TestCase):
"""Tests find_mutator_plugin."""
def setUp(self):
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_find_mutator_plugin_with_usable(self):
"""Tests that the right path is returned by find_mutator_plugin when there
is a usable mutator plugin available."""
usable_plugin_path = os.path.join(
self.plugins_root_dir, 'plugins',
mutator_plugin.MUTATOR_SHARED_OBJECT_FILENAME)
self.fs.create_file(usable_plugin_path)
self.assertEqual(usable_plugin_path, mutator_plugin.find_mutator_plugin())
def test_set_mutator_plugin_without_usable(self):
"""Tests that None is returned by find_mutator_plugin when there isn't a
usable mutator plugin available."""
self.assertIsNone(mutator_plugin.find_mutator_plugin())
# pylint: disable=protected-access
class GetDirectoryFunctionsTest(unittest.TestCase):
"""Tests functions for get plugin directories."""
def setUp(self):
helpers.patch_environ(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_get_mutator_plugins_subdir(self):
"""Tests that _get_mutator_plugins_subdir returns the path to the correct
subdirectory."""
subdir = 'x'
self.assertEqual(
os.path.join(self.plugins_root_dir, subdir),
mutator_plugin._get_mutator_plugins_subdir(subdir))
def test_get_mutator_plugins_archives_dir(self):
"""Tests that _get_mutator_plugins_archives_dir returns the path to the
mutator plugin archives directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir,
mutator_plugin.ARCHIVES_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_archives_dir())
def test_get_mutator_plugins_unpacked_dir(self):
"""Tests that _get_mutator_plugins_unpacked_dir returns the path to the
unpacked mutator plugin directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir, mutator_plugin.PLUGINS_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_unpacked_dir())
# pylint: disable=protected-access
class PluginGetterTest(fake_filesystem_unittest.TestCase):
"""Tests PluginGetter."""
def setUp(self):
"""Setup for plugin getter test."""
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
self.fuzzer_binary_name = 'test_fuzzer'
self.name = 'myplugin'
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter = mutator_plugin.PluginGetter(self.fuzzer_binary_name)
self.plugins_archives_dir = os.path.join(self.plugins_root_dir, 'archives')
self.plugin_archive_filename = '%s-%s-%s.zip' % (
self.name, os.environ['JOB_NAME'], self.fuzzer_binary_name)
self.plugin_archive_path = os.path.join(self.plugins_archives_dir,
self.plugin_archive_filename)
self.plugins_dir = os.path.join(self.plugins_root_dir, 'plugins')
helpers.patch(self, [
'clusterfuzz._internal.google_cloud_utils.storage.copy_file_from',
'clusterfuzz._internal.bot.fuzzers.mutator_plugin._get_mutator_plugins_from_bucket',
])
def mocked_copy_file_from(gcs_url, file_path):
expected_url = '%s/%s' % (mutator_plugin._get_mutator_plugins_bucket_url(
), self.plugin_archive_filename)
self.assertEqual(expected_url, gcs_url)
self.assertEqual(file_path, self.plugin_archive_path)
return file_path
self.mock.copy_file_from.side_effect = mocked_copy_file_from
def test_create_directories(self):
"""Tests that create_directories creates the right directories."""
shutil.rmtree(self.plugins_root_dir)
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter.create_directories()
directories = [
os.path.join(self.plugins_root_dir, 'plugins'),
os.path.join(self.plugins_root_dir, 'archives')
]
self.assertTrue(all(os.path.isdir(directory) for directory in directories))
def test_recognizes_usable(self):
"""Tests that _is_plugin_usable recognizes a usable plugin archive."""
self.assertTrue(
self.plugin_getter._is_plugin_usable(self.plugin_archive_filename))
def test_recognizes_unusable(self):
"""Tests that _is_plugin_usable recognizes an unusable plugin archive."""
unusable_plugin_archive_filename = self.plugin_archive_filename.replace(
self.fuzzer_binary_name, 'other_binary')
self.assertFalse(
self.plugin_getter._is_plugin_usable(unusable_plugin_archive_filename))
def test_download_mutator_plugin_archive(self):
"""Tests that _download_mutator_plugin_archive downloads an archive to the
correct location."""
self.assertEqual(
self.plugin_archive_path,
mutator_plugin._download_mutator_plugin_archive(
self.plugin_archive_filename))
class ExtractNameFromArchiveTest(unittest.TestCase):
"""Tests for _extract_name_from_archive."""
def test_extract_name_from_archive(self):
"""Tests that _extract_name_from_archive extracts the name from the
archive."""
name = 'myplugin'
fuzzer_binary_name = 'test_fuzzer'
job_name = 'libfuzzer_asan_test'
plugin_archive_filename = '%s-%s-%s.zip' % (name, job_name,
fuzzer_binary_name)
extracted_name, job_and_fuzz_target = (
mutator_plugin._extract_name_from_archive(plugin_archive_filename))
self.assertEqual(name, extracted_name)
expected_job_and_fuzz_target = '%s-%s' % (job_name, fuzzer_binary_name)
self.assertEqual(expected_job_and_fuzz_target, job_and_fuzz_target)
| google/clusterfuzz | src/clusterfuzz/_internal/tests/core/bot/fuzzers/mutator_plugin_test.py | Python | apache-2.0 | 6,800 |
import textfsm
from utils import read_txt_file, convert_uptime
def parse_get_facts(text):
tplt = read_txt_file("textfsm_templates/fastiron_show_version.template")
t = textfsm.TextFSM(tplt)
result = t.ParseText(text)
if result is not None:
(os_version, model, serial_no, day, hour, minute, second) = result[0]
uptime = convert_uptime(day, hour, minute, second)
return {"os_version": os_version, "model": model, "serial_no": serial_no, "uptime": uptime}
else:
raise ValueError("show_version parser failed") | gaberger/napalm-brocade-fastiron | napalm_brocade_fastiron/utils/parsers.py | Python | apache-2.0 | 561 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Required for storage accounts where kind = BlobStorage. The access tier used for billing.
"""
HOT = "Hot"
COOL = "Cool"
class AccountStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the status indicating whether the primary location of the storage account is available or
unavailable.
"""
AVAILABLE = "available"
UNAVAILABLE = "unavailable"
class Bypass(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are
any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to
bypass none of those traffics.
"""
NONE = "None"
LOGGING = "Logging"
METRICS = "Metrics"
AZURE_SERVICES = "AzureServices"
class CorsRuleAllowedMethodsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DELETE = "DELETE"
GET = "GET"
HEAD = "HEAD"
MERGE = "MERGE"
POST = "POST"
OPTIONS = "OPTIONS"
PUT = "PUT"
class DefaultAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the default action of allow or deny when no other rules match.
"""
ALLOW = "Allow"
DENY = "Deny"
class GeoReplicationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the secondary location. Possible values are: - Live: Indicates that the secondary
location is active and operational. - Bootstrap: Indicates initial synchronization from the
primary location to the secondary location is in progress.This typically occurs when
replication is first enabled. - Unavailable: Indicates that the secondary location is
temporarily unavailable.
"""
LIVE = "Live"
BOOTSTRAP = "Bootstrap"
UNAVAILABLE = "Unavailable"
class HttpProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol permitted for a request made with the account SAS.
"""
HTTPS_HTTP = "https,http"
HTTPS = "https"
class ImmutabilityPolicyState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked.
"""
LOCKED = "Locked"
UNLOCKED = "Unlocked"
class ImmutabilityPolicyUpdateType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and
extend.
"""
PUT = "put"
LOCK = "lock"
EXTEND = "extend"
class KeyPermission(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Permissions for the key -- read-only or full permissions.
"""
READ = "Read"
FULL = "Full"
class KeySource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage,
Microsoft.Keyvault
"""
MICROSOFT_STORAGE = "Microsoft.Storage"
MICROSOFT_KEYVAULT = "Microsoft.Keyvault"
class Kind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates the type of storage account.
"""
STORAGE = "Storage"
STORAGE_V2 = "StorageV2"
BLOB_STORAGE = "BlobStorage"
FILE_STORAGE = "FileStorage"
BLOCK_BLOB_STORAGE = "BlockBlobStorage"
class LeaseContainerRequestAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the lease action. Can be one of the available actions.
"""
ACQUIRE = "Acquire"
RENEW = "Renew"
CHANGE = "Change"
RELEASE = "Release"
BREAK_ENUM = "Break"
class LeaseDuration(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies whether the lease on a container is of infinite or fixed duration, only when the
container is leased.
"""
INFINITE = "Infinite"
FIXED = "Fixed"
class LeaseState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Lease state of the container.
"""
AVAILABLE = "Available"
LEASED = "Leased"
EXPIRED = "Expired"
BREAKING = "Breaking"
BROKEN = "Broken"
class LeaseStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The lease status of the container.
"""
LOCKED = "Locked"
UNLOCKED = "Unlocked"
class ManagementPolicyName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "default"
class Permissions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The signed permissions for the account SAS. Possible values include: Read (r), Write (w),
Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p).
"""
R = "r"
D = "d"
W = "w"
L = "l"
A = "a"
C = "c"
U = "u"
P = "p"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the status of the storage account at the time the operation was called.
"""
CREATING = "Creating"
RESOLVING_DNS = "ResolvingDNS"
SUCCEEDED = "Succeeded"
class PublicAccess(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies whether data in the container may be accessed publicly and the level of access.
"""
CONTAINER = "Container"
BLOB = "Blob"
NONE = "None"
class Reason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the reason that a storage account name could not be used. The Reason element is only
returned if NameAvailable is false.
"""
ACCOUNT_NAME_INVALID = "AccountNameInvalid"
ALREADY_EXISTS = "AlreadyExists"
class ReasonCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for the restriction. As of now this can be "QuotaId" or
"NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the
subscription does not belong to that quota. The "NotAvailableForSubscription" is related to
capacity at DC.
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class Services(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The signed services accessible with the account SAS. Possible values include: Blob (b), Queue
(q), Table (t), File (f).
"""
B = "b"
Q = "q"
T = "t"
F = "f"
class SignedResource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The signed services accessible with the service SAS. Possible values include: Blob (b),
Container (c), File (f), Share (s).
"""
B = "b"
C = "c"
F = "f"
S = "s"
class SignedResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The signed resource types that are accessible with the account SAS. Service (s): Access to
service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to
object-level APIs for blobs, queue messages, table entities, and files.
"""
S = "s"
C = "c"
O = "o"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the SKU name. Required for account creation; optional for update. Note that in
older versions, SKU name was called accountType.
"""
STANDARD_LRS = "Standard_LRS"
STANDARD_GRS = "Standard_GRS"
STANDARD_RAGRS = "Standard_RAGRS"
STANDARD_ZRS = "Standard_ZRS"
PREMIUM_LRS = "Premium_LRS"
PREMIUM_ZRS = "Premium_ZRS"
class SkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the SKU tier. This is based on the SKU name.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
class State(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the state of virtual network rule.
"""
PROVISIONING = "provisioning"
DEPROVISIONING = "deprovisioning"
SUCCEEDED = "succeeded"
FAILED = "failed"
NETWORK_SOURCE_DELETED = "networkSourceDeleted"
class UsageUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the unit of measurement.
"""
COUNT = "Count"
BYTES = "Bytes"
SECONDS = "Seconds"
PERCENT = "Percent"
COUNTS_PER_SECOND = "CountsPerSecond"
BYTES_PER_SECOND = "BytesPerSecond"
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/models/_storage_management_client_enums.py | Python | mit | 8,706 |
#!/usr/bin/env python
# Copyright (C) 2006-2017 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestCoverSongSimilarity(TestCase):
'''Unit tests for essentia CoverSongSimilarity algorithm'''
# pre-defined binary similarity matrix for the test
sim_matrix = array([[1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1]])
# expected cover similarity distance
expected_distance = 1.732
def testEmpty(self):
self.assertComputeFails(CoverSongSimilarity(), [])
def testRegressionStandard(self):
'''Test regression of CoverSongSimilarity algorithm in standard mode'''
sim = CoverSongSimilarity()
score_matrix, distance = sim.compute(self.sim_matrix)
self.assertAlmostEqualFixedPrecision(self.expected_distance, distance)
warn = "Expected shape of output score_matrix is %s, instead of %s" % (self.sim_matrix.shape, score_matrix.shape)
self.assertEqual(score_matrix.shape[0], self.sim_matrix.shape[0], warn)
self.assertEqual(score_matrix.shape[1], self.sim_matrix.shape[1], warn)
def testInvalidParam(self):
self.assertConfigureFails(CoverSongSimilarity(), { 'distanceType': 'test' })
self.assertConfigureFails(CoverSongSimilarity(), { 'alignmentType': 'test' })
def testRegressionStreaming(self):
'''Test regression of CoverSongSimilarity algorithm in streaming mode'''
from essentia.streaming import CoverSongSimilarity as CoverSongSimilarityStreaming
matrix_input = VectorInput(self.sim_matrix)
coversim_streaming = CoverSongSimilarityStreaming(pipeDistance=True)
pool = Pool()
matrix_input.data >> coversim_streaming.inputArray
coversim_streaming.scoreMatrix >> (pool, 'scoreMatrix')
coversim_streaming.distance >> (pool, 'distance')
# run the algorithm network
run(matrix_input)
self.assertAlmostEqualFixedPrecision(self.expected_distance, pool['distance'][-1])
suite = allTests(TestCoverSongSimilarity)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| MTG/essentia | test/src/unittests/highlevel/test_coversongsimilarity.py | Python | agpl-3.0 | 3,115 |
from setuptools import setup, find_packages
#install_requires = open('requirements.txt').readlines()
setup(name='moxie-feedback',
version='0.1',
packages=find_packages(),
description='Feedback module for Moxie',
author='Mobile Oxford',
author_email='[email protected]',
url='https://github.com/ox-it/moxie-feedback',
include_package_data=True,
setup_requires=["setuptools"],
#install_requires=install_requires,
#test_suite="moxie_events.tests",
)
| ox-it/moxie-feedback | setup.py | Python | apache-2.0 | 497 |
"""Implement the rules of each Java build utility type."""
import json
import logging
import os
import subprocess
import mool.jar_merger as jm
import mool.shared_utils as su
import mool.jar_testng_runner as testng_runner
JAVA_VERSION_DEP_RULE_TYPES = [su.JAVA_BIN_TYPE, su.JAVA_LIB_TYPE,
su.JAVA_TEST_TYPE]
MAX_LOOP_COUNT = 5000
class Error(su.Error):
"""Error class for this module."""
def perform_linking(link_details):
"""Perform Java linking of multiple jars to a single jar."""
lib_details, jar_out_file, main_class = link_details
jm.do_merge(lib_details, jar_out_file, main_class)
def perform_java_linkall_currdir(params):
"""Perform Java linking of current directory to single jar."""
assert 1 == len(params)
target_file = params[0]
# Ensure current directory is not empty.
subprocess.check_call(su.get_mkdir_command(su.JAR_MANIFEST_PATH))
jar_create_command = [su.JAR_BIN, 'cf', target_file]
jar_create_command.extend(os.listdir('.'))
subprocess.check_call(jar_create_command)
def get_maven_download_paths(maven_identifiers):
"""Returns tuple for jar download url and jar download paths in cache for
main and sources jars."""
artifact_id, classifier, group_id, repo_url, version = maven_identifiers
group_id = group_id.replace('.', '/')
jar_name = '{}-{}{}.jar'.format(artifact_id, version, classifier)
src_jar_name = '{}-{}{}-sources.jar'.format(artifact_id, version, classifier)
url = '/'.join([repo_url, group_id, artifact_id, version, jar_name])
output_path = os.path.join(su.JAR_SEARCH_PATH, group_id, artifact_id,
version, jar_name)
srcs_url = '/'.join([repo_url, group_id, artifact_id, version, src_jar_name])
srcs_output_path = os.path.join(su.JAR_SEARCH_PATH, group_id, artifact_id,
version, src_jar_name)
return (url, output_path, srcs_url, srcs_output_path)
def export_mvn_deps(params):
"""Export mvn deps to file."""
all_deps = []
def _expand_deps(dep_list, dep_type):
"""Format the given dependency list into dictionary."""
for dep in dep_list:
artifact_id, classifier, group_id, repo_url, version, file_path = dep
jar_url, jar_path, srcs_url, srcs_path = (
get_maven_download_paths(dep[0:5]))
dep_elem = {'artifactId': artifact_id, 'classifier': classifier,
'groupId': group_id, 'jarBuildPath': file_path,
'jarCachePath': jar_path, 'jarUrl': jar_url,
'repoUrl': repo_url, 'scope': dep_type,
'srcsCachePath': srcs_path, 'srcsUrl': srcs_url,
'version': version}
all_deps.append(dep_elem)
out_file, maven_included_deps, maven_compile_deps = params
_expand_deps(maven_included_deps, 'compile')
_expand_deps(maven_compile_deps, 'provided')
all_deps = sorted(all_deps, key=lambda x: x['scope'])
mvn_dict = {'deps': all_deps}
su.write_file(out_file, json.dumps(mvn_dict, indent=4))
def java_testng_runner(args):
"""Run java tests using TestNg framework."""
testng_runner.do_main(args)
def get_java_compile_command(rule_details, compile_libs, dir_path, file_list,
warn_as_error):
"""Get java compile command."""
compile_params = rule_details.get(su.COMPILE_PARAMS_KEY, [])
java_version = rule_details.get(su.JAVA_VERSION_KEY, su.JAVA_DEFAULT_VERSION)
compile_command = su.get_javac_bin(java_version).split()
if warn_as_error:
compile_command.extend(['-Werror'])
else:
compile_command.extend(['-nowarn'])
class_path = []
if compile_libs:
class_path.extend([c for c in compile_libs if isinstance(c, str)])
class_path = ':'.join(class_path)
if class_path:
compile_command.extend(['-cp', class_path])
compile_command.extend(['-d', dir_path])
compile_command.extend(compile_params or [])
compile_command.extend(file_list)
return compile_command
def compare_java_versions(rule_version, dependency_version):
"""Compares two given java versions. rule_version should be greater than
equal the dependency_version."""
return float(rule_version) >= float(dependency_version)
def handle_java_version_key(rule_details, details_map):
"""Set java compiler version and check the version dependencies on other
rules. Any dependency of this rule shouldn't have a higher java version."""
valid_rule_types = JAVA_VERSION_DEP_RULE_TYPES
if all([su.JAVA_VERSION_KEY in rule_details,
rule_details[su.TYPE_KEY] not in valid_rule_types]):
raise Error(('Java version should only be specified for [{}]'
'types!').format(', '.join(valid_rule_types)))
rule_dependencies = []
rule_dependencies.extend(rule_details[su.DEPS_KEY])
rule_dependencies.extend(rule_details[su.COMPILE_DEPS_KEY])
java_version = rule_details.get(su.JAVA_VERSION_KEY, su.JAVA_DEFAULT_VERSION)
for dependency in rule_dependencies:
dep_rule = details_map[dependency]
dep_java_ver = dep_rule.get(su.JAVA_VERSION_KEY, su.JAVA_DEFAULT_VERSION)
if all([dep_rule[su.TYPE_KEY] in valid_rule_types,
(any(dep_rule.get(su.SRCS_KEY, [])) or
su.JAVA_VERSION_KEY in dep_rule),
not compare_java_versions(java_version, dep_java_ver)]):
msg = ('Java version dependency check failed.\n'
'Build rule {}(Java {}) depends on {}(Java {})!')
msg = msg.format(rule_details[su.SYMBOL_KEY], java_version,
dep_rule[su.SYMBOL_KEY], dep_java_ver)
raise Error(msg)
def _get_maven_identifiers(maven_details):
"""Parse the maven details to obtain key identifiers."""
artifact_id = maven_details[su.MAVEN_ARTIFACT_ID_KEY]
classifier = maven_details.get(su.MAVEN_CLASSIFIER_KEY, '')
classifier = '-{}'.format(classifier) if classifier else ''
group_id = maven_details[su.MAVEN_GROUP_ID_KEY]
repo_url = maven_details[su.MAVEN_REPO_URL_KEY]
version = maven_details[su.MAVEN_VERSION_KEY]
return artifact_id, classifier, group_id, repo_url, version
def _get_recursive_compile_deps(rule_details, details_map):
"""Get all compile time deps dependencies for a rule."""
final_deps = set()
active_set = set(rule_details.get(su.DEPS_KEY))
iters = 0
while active_set:
iters += 1
assert iters <= MAX_LOOP_COUNT, "Too many recursive iterations."
temp_set = set()
for dep in active_set:
final_deps = final_deps.union(
set(details_map[dep].get(su.COMPILE_DEPS_KEY, [])))
temp_set = temp_set.union(details_map[dep][su.DEPS_KEY])
active_set = temp_set
return final_deps
class JavaCommon(object):
"""Common Java handler functions."""
@classmethod
def _get_all_pc_deps(cls, rule_details):
"""Get precompiled deps of current rule."""
pc_deps = rule_details.get(su.PC_DEPS_KEY, [])
pc_deps = [su.expand_env_vars(f) for f in list(set(pc_deps))]
if su.JAVA_TEST_TYPE == rule_details[su.TYPE_KEY]:
pc_deps.extend(su.JAVA_TEST_DEFAULT_JARS)
return pc_deps
@classmethod
def _is_test_rule(cls, rule_details):
"""Just check if the given rule is a test rule."""
return rule_details[su.TYPE_KEY] == su.JAVA_TEST_TYPE
@classmethod
def _set_maven_identifiers(cls, rule_details):
"""Set maven identifiers of rule."""
maven_identifiers = []
maven_details = rule_details.get(su.MAVEN_SPECS_KEY, {})
if maven_details:
maven_identifiers = _get_maven_identifiers(maven_details)
rule_details[su.MAVEN_IDENTIFIERS_KEY] = maven_identifiers
@classmethod
def _get_maven_dep(cls, rule_details):
"""Get maven dependency of rule."""
if not rule_details[su.MAVEN_IDENTIFIERS_KEY]:
return []
assert not rule_details[su.SRCS_KEY]
assert not rule_details[su.DEPS_KEY]
assert rule_details[su.LINK_INCLUDE_DEPS_KEY]
url, output_path, srcs_url, srcs_output_path = (
get_maven_download_paths(rule_details[su.MAVEN_IDENTIFIERS_KEY]))
if su.MAVEN_PREFER_LOCAL_REPO:
try:
new_url = url.replace(
rule_details[su.MAVEN_IDENTIFIERS_KEY][3],
'file://{}'.format(su.MAVEN_PREFER_LOCAL_REPO))
su.download_cached_item(new_url, output_path)
except IOError:
logging.warn('File not found in local repo!! Trying remote repo.')
su.download_cached_item(url, output_path)
else:
su.download_cached_item(url, output_path)
if su.is_developer_mode():
try:
su.download_cached_item(srcs_url, srcs_output_path)
except IOError:
pass
return [output_path]
@classmethod
def _get_all_deps(cls, rule_details, details_map):
"""Get all link libraries for a Java build rule."""
link_libs = []
dep_sources = []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
if rule_symbol == rule_details[su.SYMBOL_KEY]:
continue
dep_rule_details = details_map[rule_symbol]
if dep_rule_details[su.TYPE_KEY] == su.JAVA_PROTO_LIB_TYPE:
link_libs.append(su.JAVA_PROTOBUF_JAR)
link_libs.append(dep_rule_details[su.OUT_KEY])
dep_sources.extend(dep_rule_details[su.SRCS_KEY])
link_libs.extend(cls._get_all_pc_deps(rule_details))
link_libs.extend(cls._get_maven_dep(rule_details))
assert all([l.endswith('.jar') for l in link_libs])
link_libs = sorted(list(set(link_libs)))
compile_libs = [details_map[r][su.OUT_KEY]
for r in rule_details[su.COMPILE_DEPS_KEY]]
# For java test rule, recursively add all the compile time dependencies
# as run time dependencies of rule.
if cls._is_test_rule(rule_details):
all_compile_deps = _get_recursive_compile_deps(rule_details, details_map)
compile_libs.extend(
[details_map[dep][su.OUT_KEY] for dep in all_compile_deps])
return link_libs, compile_libs, dep_sources
@classmethod
def _get_recursive_maven_deps(cls, rule_details, details_map):
"""Get all maven dependencies for a rule."""
def _accumulate(deps_key, included_deps, compile_deps):
"""Accumulate maven dependencies from a dependency key."""
for rule_symbol in rule_details[deps_key]:
if rule_symbol == rule_details[su.SYMBOL_KEY]:
continue
dep_rule_details = details_map[rule_symbol]
dep_maven_id = dep_rule_details.get(su.MAVEN_IDENTIFIERS_KEY, [])
if dep_maven_id:
included_deps.append(dep_maven_id + tuple([
dep_rule_details[su.OUT_KEY]]))
maven_deps_pair = dep_rule_details.get(su.MAVEN_DEPS_KEY, ([], []))
included_deps.extend(maven_deps_pair[0])
compile_deps.extend(maven_deps_pair[1])
maven_included_deps = []
maven_compile_deps = []
_accumulate(su.DEPS_KEY, maven_included_deps, maven_compile_deps)
_accumulate(su.COMPILE_DEPS_KEY, maven_compile_deps, maven_compile_deps)
_accumulate(su.ALL_DEPS_KEY, maven_included_deps, maven_compile_deps)
maven_included_deps = sorted(list(set(maven_included_deps)))
maven_compile_deps = sorted(list(set(maven_compile_deps)))
maven_compile_deps = [d for d in maven_compile_deps
if d not in maven_included_deps]
return (maven_included_deps, maven_compile_deps)
@classmethod
def _set_compile_command(cls, rule_details):
"""Set Java compile command."""
rule_details[su.COMPILE_COMMAND_KEY] = []
if not rule_details[su.SRCS_KEY]:
return
compile_libs = []
if rule_details[su.COMPILE_LIBS_KEY]:
compile_libs = [os.path.join(rule_details[su.WDIR_CLSDEPS_KEY], '*')]
compile_command = get_java_compile_command(
rule_details, compile_libs, rule_details[su.WDIR_TARGET_KEY],
[su.get_relative_path(rule_details[su.POSSIBLE_PREFIXES_KEY], f)
for f in rule_details[su.SRCS_KEY]],
not rule_details[su.COMPILE_IGNORE_WARNINGS_KEY])
rule_details[su.COMPILE_COMMAND_KEY].append(compile_command)
@classmethod
def _set_link_command(cls, rule_details):
"""Set Java link command."""
main_class = rule_details.get(su.MAIN_CLASS_KEY,
su.JAVA_FAKE_MAIN_CLASS)
link_commands = []
final_libs = []
# Link classes from current rule to a jar.
if rule_details[su.COMPILE_COMMAND_KEY]:
link_commands.append(
[su.CHANGE_CURR_DIR, rule_details[su.WDIR_TARGET_KEY]])
link_commands.append([su.PERFORM_JAVA_LINK_ALL_CURRDIR,
rule_details[su.TEMP_OUT_KEY]])
final_libs.append(rule_details[su.TEMP_OUT_KEY])
# Pull in the dependencies that need to be included.
if ((rule_details[su.LINK_LIBS_KEY] and
rule_details[su.LINK_INCLUDE_DEPS_KEY])):
final_libs.extend(rule_details[su.LINK_LIBS_KEY])
# There must be some dependencies or sources. Otherwise the target would be
# empty.
assert final_libs
link_commands.append(
[su.JAVA_LINK_JAR_COMMAND,
(rule_details.get(su.JAR_INCLUDE_KEY, []),
rule_details.get(su.JAR_EXCLUDE_KEY, []), final_libs),
rule_details[su.OUT_KEY], main_class])
rule_details[su.LINK_COMMANDS_KEY] = link_commands
@classmethod
def _set_test_commands(cls, rule_details, details_map):
"""Initializing build rule dictionary."""
test_command = [su.JAVA_TESTNG_RUNNER, rule_details[su.OUT_KEY]]
if su.TEST_CLASS_KEY in rule_details:
test_command.append([rule_details[su.TEST_CLASS_KEY]])
else:
test_command.append(rule_details[su.TEST_CLASSES_KEY])
for test_class in test_command[-1]:
if test_class.endswith('.java'):
raise Error(('Invalid test class name %s! It shouldn\'t end with '
'.java!') % test_class)
working_dir = os.path.join(rule_details[su.WDIR_KEY], '.test.wdir')
test_command.append(working_dir)
test_command.append(rule_details.get(su.JAVA_TESTNG_GROUPS, ['unit']))
test_command.append(rule_details[su.WDIR_CLSDEPS_KEY])
test_command.append(rule_details.get(su.RUNTIME_PARAMS_KEY, []))
ext_jar_files = [details_map[rule][su.OUT_KEY] for rule in rule_details[
su.EXTRACT_RESOURCES_DEP_KEY]]
test_command.append(ext_jar_files)
rule_details[su.TEST_COMMANDS_KEY] = [test_command]
@classmethod
def set_precompile_commands(cls, rule_details):
"""Set precompile link command for dependencies."""
rule_details[su.PRECOMPILE_COMMANDS_KEY] = []
if not rule_details[su.SRCS_KEY]:
return
# File-linking all compile-time dependencies to a single directory for the
# benefit of javac compiler. The actual file names here are not important,
# so we use an increasing sequence of names. Also, note that at this level
# it is possible to have different jars in the clsdeps directory with
# different implementations of the same class. The merge-conflict however
# would be resolved at final link time. This is a performance optimization
# used for faster coding.
compile_libs = rule_details[su.COMPILE_LIBS_KEY]
for index in xrange(len(compile_libs)):
compile_lib = compile_libs[index]
dst_file = os.path.join(rule_details[su.WDIR_CLSDEPS_KEY],
'f{}.jar'.format(index))
file_link_command = su.get_copy_command(compile_lib, dst_file, True)
rule_details[su.PRECOMPILE_COMMANDS_KEY].append(file_link_command)
@classmethod
def _set_all_dep_paths(cls, rule_details, link_libs, dep_sources):
"""Set all dependency paths list for the rule."""
all_dep_paths = rule_details[su.SRCS_KEY][:]
all_dep_paths.extend(link_libs)
all_dep_paths.extend(dep_sources)
all_dep_paths.append(rule_details[su.OUT_KEY])
rule_details[su.ALL_DEP_PATHS_KEY].extend(sorted(list(set(all_dep_paths))))
@classmethod
def _normalize_fields(cls, rule_details):
"""Normalize fields in rule details."""
if su.RULE_NORMALIZED_KEY in rule_details:
return
rule_details[su.COMPILE_DEPS_KEY] = (
rule_details.get(su.COMPILE_DEPS_KEY, []))
rule_details[su.COMPILE_PARAMS_KEY] = (
rule_details.get(su.COMPILE_PARAMS_KEY, []))
if cls._is_test_rule(rule_details):
# Unit tests should be fast to build and execute, otherwise valuable
# developer time would end up being misused. If an
# all-dependency-included test jar is really needed, includeDeps should
# be set to "True" explicitly in a copy of the rule in the BLD file. This
# all inclusive rule can be packaged separately.
rule_details[su.LINK_INCLUDE_DEPS_KEY] = su.string_to_bool(
rule_details.get(su.LINK_INCLUDE_DEPS_KEY, 'False'))
else:
rule_details[su.LINK_INCLUDE_DEPS_KEY] = su.string_to_bool(
rule_details.get(su.LINK_INCLUDE_DEPS_KEY, 'True'))
# Do a sanity check. A build rule with zero source files must include
# dependencies. Otherwise, the only point served is to make BLD files more
# compact. Why not achieve build efficiency as well?
if not rule_details.get(su.SRCS_KEY, []):
assert rule_details[su.LINK_INCLUDE_DEPS_KEY]
rule_details[su.COMPILE_IGNORE_WARNINGS_KEY] = su.string_to_bool(
rule_details.get(su.COMPILE_IGNORE_WARNINGS_KEY, 'False'))
rule_details[su.RULE_NORMALIZED_KEY] = True
@classmethod
def _check_dependency_versions(cls, rule_details, details_map):
"""Run all possible version checks across the dependencies."""
handle_java_version_key(rule_details, details_map)
@classmethod
def setup(cls, rule_details, details_map):
"""Initializing build rule dictionary."""
cls._check_dependency_versions(rule_details, details_map)
out_file = '{}.jar'.format(rule_details[su.NAME_KEY])
su.init_rule_common(rule_details, out_file, [su.SRCS_KEY])
su.set_workdir_child(rule_details, su.WDIR_CLSDEPS_KEY, 'clsdeps')
su.set_workdir_child(rule_details, su.WDIR_SRC_KEY, 'code')
su.set_workdir_child(rule_details, su.WDIR_TARGET_KEY, 'target')
su.set_workdir_child(rule_details, su.TEMP_OUT_KEY, '.temp.' + out_file)
cls._normalize_fields(rule_details)
if cls._is_test_rule(rule_details):
cls._set_test_commands(rule_details, details_map)
rule_details[su.POSSIBLE_PREFIXES_KEY] = su.prefix_transform([])
cls._set_maven_identifiers(rule_details)
rule_details[su.MAVEN_DEPS_KEY] = cls._get_recursive_maven_deps(
rule_details, details_map)
rule_details[su.EXPORTED_MVN_DEPS_FILE_KEY] = os.path.join(
rule_details[su.OUTDIR_KEY], out_file + '.mvn_deps.json')
link_libs, compile_libs, dep_sources = cls._get_all_deps(
rule_details, details_map)
rule_details[su.LINK_LIBS_KEY] = link_libs
rule_details[su.COMPILE_LIBS_KEY] = link_libs[:]
rule_details[su.COMPILE_LIBS_KEY].extend(compile_libs)
cls.set_precompile_commands(rule_details)
rule_details[su.ALL_SRCS_KEY] = rule_details[su.SRCS_KEY][:]
cls._set_all_dep_paths(rule_details, link_libs, dep_sources)
cls._set_compile_command(rule_details)
cls._set_link_command(rule_details)
@classmethod
def include_deps_recursively(cls, rule_details):
"""Dependency graph pruning optimization."""
cls._normalize_fields(rule_details)
if rule_details[su.TYPE_KEY] != su.JAVA_LIB_TYPE:
return True
if rule_details[su.LINK_INCLUDE_DEPS_KEY]:
# If the jar built by a java library includes all its dependencies,
# there is no point in including these dependencies in the all_deps key.
return False
return True
@classmethod
def build_commands(cls, rule_details):
"""Generate build command line."""
logging.info('Emitting %s at %s', rule_details[su.TYPE_KEY],
su.log_normalize(rule_details[su.OUT_KEY]))
directory_list = [rule_details[su.OUTDIR_KEY],
rule_details[su.WDIR_CLSDEPS_KEY],
rule_details[su.WDIR_SRC_KEY],
rule_details[su.WDIR_TARGET_KEY]]
command_list = [su.get_mkdir_command(d) for d in directory_list]
command_list.append([su.CHANGE_CURR_DIR, rule_details[su.WDIR_SRC_KEY]])
command_list.extend(su.cp_commands_list(rule_details, su.ALL_SRCS_KEY))
command_list.extend(rule_details[su.PRECOMPILE_COMMANDS_KEY])
command_list.extend(rule_details[su.COMPILE_COMMAND_KEY])
command_list.extend(rule_details[su.LINK_COMMANDS_KEY])
command_list.append([su.EXPORT_MVN_DEPS,
rule_details[su.EXPORTED_MVN_DEPS_FILE_KEY],
rule_details[su.MAVEN_DEPS_KEY][0],
rule_details[su.MAVEN_DEPS_KEY][1]])
return command_list
class JavaLibrary(JavaCommon):
"""Handler class for Java lib build rules."""
class JavaBinary(JavaCommon):
"""Handler class for Java binary build rules."""
class JavaTest(JavaCommon):
"""Handler class for Java test build rules."""
| rocketfuel/mool | build_tool/bu.scripts/mool/java_common.py | Python | bsd-3-clause | 20,823 |
"""customized version of pdb's default debugger.
- sets up a history file
- uses ipython if available to colorize lines of code
- overrides list command to search for current block instead
of using 5 lines of context
"""
try:
import readline
except ImportError:
readline = None
import os
import os.path as osp
import sys
from pdb import Pdb
from cStringIO import StringIO
import inspect
try:
from IPython import PyColorize
except ImportError:
def colorize(source, *args):
"""fallback colorize function"""
return source
else:
def colorize(source, start_lineno, curlineno):
""""""
parser = PyColorize.Parser()
output = StringIO()
parser.format(source, output)
annotated = []
for index, line in enumerate(output.getvalue().splitlines()):
lineno = index + start_lineno
if lineno == curlineno:
annotated.append('%4s\t->\t%s' % (lineno, line))
else:
annotated.append('%4s\t\t%s' % (lineno, line))
return '\n'.join(annotated)
def getsource(obj):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = inspect.getsourcelines(obj)
return ''.join(lines), lnum
################################################################
class Debugger(Pdb):
"""custom debugger
- sets up a history file
- uses ipython if available to colorize lines of code
- overrides list command to search for current block instead
of using 5 lines of context
"""
def __init__(self, tcbk):
Pdb.__init__(self)
self.reset()
while tcbk.tb_next is not None:
tcbk = tcbk.tb_next
self._tcbk = tcbk
self._histfile = osp.join(os.environ["HOME"], ".pdbhist")
def setup_history_file(self):
"""if readline is available, read pdb history file
"""
if readline is not None:
try:
readline.read_history_file(self._histfile)
except IOError:
pass
def start(self):
"""starts the interactive mode"""
self.interaction(self._tcbk.tb_frame, self._tcbk)
def setup(self, frame, tcbk):
"""setup hook: set up history file"""
self.setup_history_file()
Pdb.setup(self, frame, tcbk)
def set_quit(self):
"""quit hook: save commands in the history file"""
if readline is not None:
readline.write_history_file(self._histfile)
Pdb.set_quit(self)
def complete_p(self, text, line, begin_idx, end_idx):
"""provide variable names completion for the ``p`` command"""
namespace = dict(self.curframe.f_globals)
namespace.update(self.curframe.f_locals)
if '.' in text:
return self.attr_matches(text, namespace)
return [varname for varname in namespace if varname.startswith(text)]
def attr_matches(self, text, namespace):
"""implementation coming from rlcompleter.Completer.attr_matches
Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, namespace)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + self.get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(self, klass):
"""implementation coming from rlcompleter.get_class_members"""
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + self.get_class_members(base)
return ret
## specific / overidden commands
def do_list(self, arg):
"""overrides default list command to display the surrounding block
instead of 5 lines of context
"""
self.lastcmd = 'list'
if not arg:
try:
source, start_lineno = getsource(self.curframe)
print colorize(''.join(source), start_lineno,
self.curframe.f_lineno)
except KeyboardInterrupt:
pass
except IOError:
Pdb.do_list(self, arg)
else:
Pdb.do_list(self, arg)
do_l = do_list
def do_open(self, arg):
"""opens source file corresponding to the current stack level"""
filename = self.curframe.f_code.co_filename
lineno = self.curframe.f_lineno
cmd = 'emacsclient --no-wait +%s %s' % (lineno, filename)
os.system(cmd)
do_o = do_open
def pm():
"""use our custom debugger"""
dbg = Debugger(sys.last_traceback)
dbg.start()
| h2oloopan/easymerge | EasyMerge/clonedigger/logilab/common/debugger.py | Python | mit | 5,651 |
from collections import OrderedDict
import itertools
import pandas as pd
import numpy as np
import pandas.util.testing as tm
from pandas_battery.tools.attrdict import attrdict
__all__ = ['frame_targets']
N = 10000
COLS = 5
FLAT_N = N * COLS
shape = (N, COLS)
data_types = OrderedDict()
data_types['int'] = range(N)
data_types['float'] = np.random.randn(N)
data_types['bool'] = np.random.randn(N) > 0
data_types['string'] = np.array([tm.rands(1) for x in range(FLAT_N)]).reshape(shape)
data_types['long_strings'] = np.array([tm.rands(30) for x in range(FLAT_N)]).reshape(shape)
indexes = OrderedDict()
indexes[''] = None
indexes['time'] = pd.date_range(start="2000", freq="D", periods=N)
indexes['period'] = pd.period_range(start="2000", freq="D", periods=N)
column_types = OrderedDict()
column_types[''] = None
column_types['strcol'] = [tm.rands(10) for x in range(COLS)]
target_args = itertools.product(data_types, indexes, column_types)
def maker(data, index, columns):
def _maker():
arr = np.array(data)
# repeat the data for each column
if arr.ndim == 1:
arr = np.repeat(arr.ravel(), COLS).reshape(shape)
return pd.DataFrame(arr, index=index, columns=columns)
return _maker
frame_targets = attrdict()
for args in target_args:
data_type, index_type, column_type = args
obj_name = '_'.join(bit for bit in list(args) + ['frame'] if bit)
data = data_types[data_type]
index = indexes[index_type]
columns = column_types[column_type]
frame_targets[obj_name] = maker(data, index, columns)
| dalejung/pandas-battery | pandas_battery/target/frame.py | Python | mit | 1,571 |
# coding=utf-8
"""Send SMS using SMS API (currently only Free Mobile)"""
import re
import socket
import time
import urllib.error
import urllib.request
import urllib.parse
from nemubot import context
from nemubot.exception import IMException
from nemubot.hooks import hook
from nemubot.tools.xmlparser.node import ModuleState
nemubotversion = 3.4
from nemubot.module.more import Response
def load(context):
context.data.setIndex("name", "phone")
def help_full():
return "!sms /who/[,/who/[,...]] message: send a SMS to /who/."
def send_sms(frm, api_usr, api_key, content):
content = "<%s> %s" % (frm, content)
try:
req = urllib.request.Request("https://smsapi.free-mobile.fr/sendmsg?user=%s&pass=%s&msg=%s" % (api_usr, api_key, urllib.parse.quote(content)))
res = urllib.request.urlopen(req, timeout=5)
except socket.timeout:
return "timeout"
except urllib.error.HTTPError as e:
if e.code == 400:
return "paramètre manquant"
elif e.code == 402:
return "paiement requis"
elif e.code == 403 or e.code == 404:
return "clef incorrecte"
elif e.code != 200:
return "erreur inconnue (%d)" % status
except:
return "unknown error"
return None
def check_sms_dests(dests, cur_epoch):
"""Raise exception if one of the dest is not known or has already receive a SMS recently
"""
for u in dests:
if u not in context.data.index:
raise IMException("Désolé, je sais pas comment envoyer de SMS à %s." % u)
elif cur_epoch - float(context.data.index[u]["lastuse"]) < 42:
raise IMException("Un peu de calme, %s a déjà reçu un SMS il n'y a pas si longtemps." % u)
return True
def send_sms_to_list(msg, frm, dests, content, cur_epoch):
fails = list()
for u in dests:
context.data.index[u]["lastuse"] = cur_epoch
test = send_sms(frm, context.data.index[u]["user"], context.data.index[u]["key"], content)
if test is not None:
fails.append( "%s: %s" % (u, test) )
if len(fails) > 0:
return Response("quelque chose ne s'est pas bien passé durant l'envoi du SMS : " + ", ".join(fails), msg.channel, msg.frm)
else:
return Response("le SMS a bien été envoyé", msg.channel, msg.frm)
@hook.command("sms")
def cmd_sms(msg):
if not len(msg.args):
raise IMException("À qui veux-tu envoyer ce SMS ?")
cur_epoch = time.mktime(time.localtime())
dests = msg.args[0].split(",")
frm = msg.frm if msg.to_response[0] == msg.frm else msg.frm + "@" + msg.to[0]
content = " ".join(msg.args[1:])
check_sms_dests(dests, cur_epoch)
return send_sms_to_list(msg, frm, dests, content, cur_epoch)
@hook.command("smscmd")
def cmd_smscmd(msg):
if not len(msg.args):
raise IMException("À qui veux-tu envoyer ce SMS ?")
cur_epoch = time.mktime(time.localtime())
dests = msg.args[0].split(",")
frm = msg.frm if msg.to_response[0] == msg.frm else msg.frm + "@" + msg.to[0]
cmd = " ".join(msg.args[1:])
content = None
for r in context.subtreat(context.subparse(msg, cmd)):
if isinstance(r, Response):
for m in r.messages:
if isinstance(m, list):
for n in m:
content = n
break
if content is not None:
break
elif isinstance(m, str):
content = m
break
elif isinstance(r, Text):
content = r.message
if content is None:
raise IMException("Aucun SMS envoyé : le résultat de la commande n'a pas retourné de contenu.")
check_sms_dests(dests, cur_epoch)
return send_sms_to_list(msg, frm, dests, content, cur_epoch)
apiuser_ask = re.compile(r"(utilisateur|user|numéro|numero|compte|abonne|abone|abonné|account)\s+(est|is)\s+(?P<user>[0-9]{7,})", re.IGNORECASE)
apikey_ask = re.compile(r"(clef|key|password|mot de passe?)\s+(?:est|is)?\s+(?P<key>[a-zA-Z0-9]{10,})", re.IGNORECASE)
@hook.ask()
def parseask(msg):
if msg.message.find("Free") >= 0 and (
msg.message.find("API") >= 0 or msg.message.find("api") >= 0) and (
msg.message.find("SMS") >= 0 or msg.message.find("sms") >= 0):
resuser = apiuser_ask.search(msg.message)
reskey = apikey_ask.search(msg.message)
if resuser is not None and reskey is not None:
apiuser = resuser.group("user")
apikey = reskey.group("key")
test = send_sms("nemubot", apiuser, apikey,
"Vous avez enregistré vos codes d'authentification dans nemubot, félicitation !")
if test is not None:
return Response("je n'ai pas pu enregistrer tes identifiants : %s" % test, msg.channel, msg.frm)
if msg.frm in context.data.index:
context.data.index[msg.frm]["user"] = apiuser
context.data.index[msg.frm]["key"] = apikey
else:
ms = ModuleState("phone")
ms.setAttribute("name", msg.frm)
ms.setAttribute("user", apiuser)
ms.setAttribute("key", apikey)
ms.setAttribute("lastuse", 0)
context.data.addChild(ms)
context.save()
return Response("ok, c'est noté. Je t'ai envoyé un SMS pour tester ;)",
msg.channel, msg.frm)
| nbr23/nemubot | modules/sms.py | Python | agpl-3.0 | 5,550 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2018 Juca Crispim <[email protected]>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod
import asyncio
import os
from toxicbuild.core.exceptions import VCSError, ExecCmdError
from toxicbuild.core.utils import (exec_cmd, inherit_docs, string2datetime,
datetime2string, utc2localtime,
localtime2utc, LoggerMixin, match_string)
class VCS(LoggerMixin, metaclass=ABCMeta):
""" Generic inteface to a vcs (clone, fetch, get revisions etc...).
"""
vcsbin = None
def __init__(self, workdir):
""":param workdir: Directory where repository will be cloned and
all action will happen.
"""
self.workdir = workdir
async def exec_cmd(self, cmd, cwd=None):
""" Executes a shell command. If ``cwd`` is None ``self.workdir``
will be used.
:param cwd: Directory where the command will be executed.
"""
if cwd is None:
cwd = self.workdir
ret = await exec_cmd(cmd, cwd)
return ret
def workdir_exists(self):
""" Informs if the workdir for this vcs exists
"""
return os.path.exists(self.workdir)
@abstractmethod # pragma no branch
async def clone(self, url):
""" Clones a repository into ``self.workdir``
:param url: repository url
"""
@abstractmethod # pragma no branch
async def fetch(self):
""" Fetch changes from remote repository
"""
@abstractmethod # pragma no branch
async def create_local_branch(self, branch_name, base_name):
"""Creates a branch new in the local repository
:param branch_name: The name for the new branch
:param base_name: The name of the base branch."""
@abstractmethod
async def delete_local_branch(self, branch_name):
"""Deletes a local branch.
:param branch_name: The name of the branch to be deleted."""
@abstractmethod # pragma no branch
async def set_remote(self, url, remote_name):
"""Sets the remote url of the repository.
:param url: The new remote url.
:param remote_name: The name of the remote url to change."""
@abstractmethod # pragma no branch
async def get_remote(self, remote_name):
"""Returns the remote url used in the repo.
:param remote_name: The name of the remote url to change."""
@abstractmethod
async def try_set_remote(self, url, remote_name): # pragma no branch
"""Sets the remote url if the remote is not equal as url.
:param url: The new url for the remote.
:param remote_name: The name of the remote url to change."""
@abstractmethod # pragma no branch
async def add_remote(self, remote_url, remote_name):
"""Adds a new remote to the repository.
:param remote_url: The url of the remote repository.
:param remote_name: The name of the remote."""
@abstractmethod # pragma no branch
async def checkout(self, named_tree):
""" Checkout to ``named_tree``
:param named_tree: A commit, branch, tag...
"""
@abstractmethod # pragma no branch
async def pull(self, branch_name, remote_name='origin'):
""" Pull changes from ``branch_name`` on remote repo.
:param branch_name: A branch name, like 'master'.
:param remote_name: The remote repository to push from.
"""
@abstractmethod # pragma no branch
async def has_changes(self):
""" Informs if there are new revisions in the repository
"""
@abstractmethod
@asyncio.coroutine # pragma no branch
def import_external_branch(self, external_url, external_name,
external_branch, into):
"""Imports a branch from an external (not the origin one)
repository into a local branch.
:param external_url: The url of the external repository.
:param external_name: Name to idenfity the remote url.
:param external_branch: The name of the branch in the external repo.
:param into: The name of the local branch."""
@classmethod # pragma no branch
async def branch_exists(self, branch_name):
"""Checks if a local branch exists.
:param branch_name: The name of the branch to check."""
@abstractmethod # pragma no branch
async def get_revisions(self, since=None, branches=None):
"""Returns the newer revisions ``since`` for ``branches`` from
the default remote repository.
:param since: dictionary in the format: {branch_name: since_date}.
``since`` is a datetime object.
:param branches: A list of branches to look for new revisions. If
``branches`` is None all remote branches will be used. You can use
wildcards in branches to filter the remote branches.
"""
@abstractmethod # pragma no branch
async def get_local_revisions(self, since=None, branches=None):
"""Returns the newer revisions ``since`` for ``branches`` in the
local repository
:param since: dictionary in the format: {branch_name: since_date}.
``since`` is a datetime object.
:param branches: A list of branches to look for new revisions. If
``branches`` is None all remote branches will be used. You can use
wildcards in branches to filter the local branches.
"""
@abstractmethod # pragma no branch
async def get_revisions_for_branch(self, branch, since=None):
""" Returns the revisions for ``branch`` since ``since``.
If ``since`` is None, all revisions will be returned.
:param branch: branch name
:param since: datetime
"""
@abstractmethod # pragma no branch
async def get_remote_branches(self):
""" Returns a list of the remote branches available.
"""
def _filter_remote_branches(self, remote_branches, branch_filters):
"""Filters the remote branches based in filters for the branches'
names."""
return [b for b in remote_branches if match_string(b, branch_filters)]
@inherit_docs
class Git(VCS):
""" An interface to git version control system
"""
vcsbin = 'git'
# this date_format is used to ask git about revisions since
# some date
date_format = '%a %b %d %H:%M:%S %Y'
_commit_separator = '<end-toxiccommit>'
async def _set_remote_origin_config(self):
# when we do a shallow clone of a repo, we need to
# set the remote origins to * otherwise we will not
# be able to fetch all remote branches.
remote = '+refs/heads/*:refs/remotes/origin/*'
cmd = '{} config remote.origin.fetch {}'.format(self.vcsbin,
remote)
await self.exec_cmd(cmd, cwd=self.workdir)
async def clone(self, url):
cmd = '%s clone --depth=2 %s %s --recursive' % (
self.vcsbin, url, self.workdir)
# we can't go to self.workdir while we do not clone the repo
await self.exec_cmd(cmd, cwd='.')
await self._set_remote_origin_config()
async def set_remote(self, url, remote_name='origin'):
cmd = '{} remote set-url {} {}'.format(self.vcsbin, remote_name, url)
await self.exec_cmd(cmd)
async def get_remote(self, remote_name='origin'):
cmd = '{} remote -v | grep -m1 {} | sed -e \'s/{}\s*//g\' '
cmd += '-e \'s/(.*)//g\''
cmd = cmd.format(self.vcsbin, remote_name, remote_name)
remote = await self.exec_cmd(cmd)
return remote
async def add_remote(self, remote_url, remote_name):
cmd = '{} remote add {} {}'.format(self.vcsbin,
remote_name, remote_url)
r = await self.exec_cmd(cmd)
return r
async def rm_remote(self, remote_name):
cmd = '{} remote rm {}'.format(self.vcsbin, remote_name)
r = await self.exec_cmd(cmd)
return r
async def try_set_remote(self, url, remote_name='origin'):
current_remote = await self.get_remote(remote_name)
if current_remote != url:
self.log('Changing remote from {} to {}'.format(
current_remote, url), level='debug')
await self.set_remote(url, remote_name)
async def fetch(self):
cmd = '%s %s' % (self.vcsbin, 'fetch')
fetched = await self.exec_cmd(cmd)
return fetched
async def create_local_branch(self, branch_name, base_name):
await self.checkout(base_name)
cmd = '{} branch {}'.format(self.vcsbin, branch_name)
r = await self.exec_cmd(cmd)
return r
async def delete_local_branch(self, branch_name):
await self.checkout('master')
cmd = '{} branch -D {}'.format(self.vcsbin, branch_name)
r = await self.exec_cmd(cmd)
return r
async def checkout(self, named_tree):
cmd = '{} checkout {}'.format(self.vcsbin, named_tree)
await self.exec_cmd(cmd)
async def pull(self, branch_name, remote_name='origin'):
cmd = '{} pull --no-edit {} {}'.format(self.vcsbin, remote_name,
branch_name)
ret = await self.exec_cmd(cmd)
return ret
async def has_changes(self):
ret = await self.fetch()
return bool(ret)
async def import_external_branch(self, external_url, external_name,
external_branch, into):
exists = await self.branch_exists(into)
if not exists:
await self.create_local_branch(into, 'master')
await self.add_remote(external_url, external_name)
await self.checkout(into)
await self.pull(external_branch, external_name)
await self.rm_remote(external_name)
async def branch_exists(self, branch_name):
cmd = '{} rev-parse --verify {}'.format(self.vcsbin, branch_name)
try:
await self.exec_cmd(cmd)
exists = True
except ExecCmdError:
exists = False
return exists
async def update_submodule(self):
cmd = '{} submodule init'.format(self.vcsbin)
await self.exec_cmd(cmd)
cmd = '{} submodule update'.format(self.vcsbin)
ret = await self.exec_cmd(cmd)
return ret
async def get_local_revisions(self, since=None, branches=None):
since = since or {}
branches = branches or []
revisions = {}
for branch in branches:
try:
await self.checkout(branch)
since_date = since.get(branch)
revs = await self.get_revisions_for_branch(branch, since_date)
if revs:
revisions[branch] = revs
except Exception as e:
msg = 'Error fetching local changes on branch {}. {}'.format(
branch, str(e))
self.log(msg, level='error')
return revisions
async def get_revisions(self, since=None, branches=None):
since = since or {}
# this must be called everytime so we sync our repo
# with the remote repo and then we can see new branches
await self.fetch()
remote_branches = await self.get_remote_branches()
if branches:
remote_branches = self._filter_remote_branches(
remote_branches, branches)
revisions = {}
for branch in remote_branches:
try:
await self.checkout(branch)
await self.pull(branch)
since_date = since.get(branch)
revs = await self.get_revisions_for_branch(branch,
since_date)
if revs:
revisions[branch] = revs
except Exception as e:
msg = 'Error fetching changes on branch {}. {}'.format(
branch, str(e))
self.log(msg, level='error')
return revisions
async def get_revisions_for_branch(self, branch, since=None):
# hash | commit date | author | title
commit_fmt = "%H | %ad | %an | %s | %+b {}".format(
self._commit_separator)
cmd = '{} log --pretty=format:"{}" '.format(
self.vcsbin, commit_fmt)
if since:
# Here we change the time to localtime since we can't get
# utc time in git commits unless we are using git 2.7+
localtime = utc2localtime(since)
date = datetime2string(localtime, self.date_format)
self.log('get revisions for branch {} since {}'.format(branch,
date),
level='debug')
cmd += '--since="%s" ' % date
cmd += '--date=local'
msg = 'Getting revisions for branch {} with command {}'.format(
branch, cmd)
self.log(msg, level='debug')
last_revs = [r for r in (await self.exec_cmd(cmd)).split(
self._commit_separator + '\n') if r]
last_revs.reverse()
self.log('Got {}'.format(last_revs), level='debug')
revisions = []
for rev in last_revs:
rev_uuid, date, author, title, body = rev.split(' | ')
date = string2datetime(date.strip(), dtformat=self.date_format)
# Here we change the date from git, that is in localtime to
# utc before saving to database.
date = localtime2utc(date)
revisions.append({'commit': rev_uuid.strip(), 'commit_date': date,
'author': author, 'title': title, 'body': body})
# The thing here is that the first revision in the list
# is the last one consumed on last time
return revisions[1:]
async def get_remote_branches(self):
await self.fetch()
await self._update_remote_prune()
cmd = '%s branch -r' % self.vcsbin
out = await self.exec_cmd(cmd)
msg = 'Remote branches: {}'.format(out)
self.log(msg, level='debug')
remote_branches = out.split('\n')
# master, with some shitty arrow...
remote_branches[0] = remote_branches[0].split('->')[1].strip()
return set([b.strip().split('/')[1] for b in remote_branches])
async def _update_remote_prune(self):
"""Updates remote branches list, prunning deleted branches."""
cmd = '{} remote update --prune'.format(self.vcsbin)
msg = 'Updating --prune remote'
self.log(msg, level='debug')
await self.exec_cmd(cmd)
VCS_TYPES = {'git': Git}
def get_vcs(vcs_type):
""" Retuns a subclass of :class:`toxicbuild.core.vcs.VCS` for ``vcs_type``
"""
vcs = VCS_TYPES.get(vcs_type)
if not vcs:
raise VCSError('VCS not found for {}'.format(vcs_type))
return vcs
| jucacrispim/toxicbuild | toxicbuild/core/vcs.py | Python | agpl-3.0 | 15,719 |
import urllib2
from bs4 import BeautifulSoup
from lxml import html
page = urllib2.urlopen("http://www.atlasobscura.com/articles/fleeting-wonders-miles-of-seaweed-choking-caribbean-beaches").read()
soup = BeautifulSoup(page)
taglist = [elem.get_text() for elem in soup.select('span.tags-list a')]
print taglist[1]
taglist.append("shit")
print taglist
| facemelters/data-science | Atlas/parse_atlas.py | Python | gpl-2.0 | 353 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields
class account_journal(models.Model):
_inherit = "account.journal"
allow_account_transfer = fields.Boolean(
'Allow Account Transfer?',
default=True,
help='Set if this journals can be used on account transfers'
)
| ClearCorp/account-financial-tools | account_transfer/models/account_journal.py | Python | agpl-3.0 | 559 |
__source__ = 'https://leetcode.com/problems/verify-preorder-sequence-in-binary-search-tree/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/verify-preorder-sequence-in-binary-search-tree.py
# Time: O(n)
# Space: O(1)]
# Stack
#
# Description: Leetcode # 255. Verify Preorder Sequence in Binary Search Tree
#
# Given an array of numbers, verify whether it is the correct preorder traversal sequence of a binary search tree.
#
# You may assume each number in the sequence is unique.
#
# Follow up:
# Could you do it using only constant space complexity?
#
# Hide Company Tags Zenefits
# Companies
# Zenefits
# Related Topics
# Tree Stack
# Similar Questions
# Binary Tree Preorder Traversal
#
import unittest
class Solution:
# @param {integer[]} preorder
# @return {boolean}
def verifyPreorder(self, preorder):
low = float("-inf")
i = -1
for p in preorder:
if p < -1:
return False
while i >= 0 and p > preorder[i]:
low = preorder[i]
i -= 1
i += 1
preorder[i] = p
return True
# Time: O(n)
# Space: O(h)
# 60ms 41.14%
class Solution2:
# @param {integer[]} preorder
# @return {boolean}
def verifyPreorder(self, preorder):
low = float("-inf")
path = []
for p in preorder:
if p < low:
return False
while path and p > path[-1]:
low = path[-1]
path.pop()
path.append(p)
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought:
[10,7,4,8,6,40,23] should be false
# 31ms 56.09%
class Solution {
public boolean verifyPreorder(int[] preorder) {
int low = Integer.MIN_VALUE;
Stack<Integer> path = new Stack();
for (int p : preorder) {
if (p < low)
return false;
while (!path.empty() && p > path.peek())
low = path.pop();
path.push(p);
}
return true;
}
}
# assume no duplicate (since bst doesnt allow duplicate)
# we have to do it in place
# i = is the virtual stack that we maintained
# if we the array index we traverse is smaller than the previous one
# means that we are still traversing to the left subtree,
# if we find out the current index is bigger than the previous one we traverse it
# means that we are on the right subtree or the right hand side of the bst
# so we simply pop out all the elements in the stack that is smaller than the current index
# also use the popped value as the new min
# (since we are in right subtree means we must never come across a smaller number)
# index = index that traverse through the array
# 2ms 100%
class Solution {
public boolean verifyPreorder(int[] preorder) {
int index = -1;
int min = Integer.MIN_VALUE;
for (int i = 0; i < preorder.length; i++) {
if (preorder[i] < min) {
return false;
}
while (index >= 0 && preorder[index] < preorder[i]) {
min = preorder[index--];
}
preorder[++index] = preorder[i];
}
return true;
}
}
# 428ms 14.72%
class Solution {
public boolean verifyPreorder(int[] preorder) {
return verifyPreorder(preorder, 0, preorder.length - 1);
}
private boolean verifyPreorder(int[] preorder, int start, int end) {
if (start >= end) {
return true;
}
int root = preorder[start];
int index = start + 1;
while (index <= end && preorder[index] < root) {
index++;
}
for (int i = index + 1; i<= end; i++) {
if (preorder[i] < root) {
return false;
}
}
return verifyPreorder(preorder, start + 1, index - 1) && verifyPreorder(preorder, index, end);
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/VerifyPreorderSequenceinBinarySearchTree.py | Python | apache-2.0 | 4,042 |
from hashlib import sha1
from django.core.cache import cache
from django.utils.encoding import smart_str
def cached(key=None, timeout=300):
"""
Cache the result of function call.
Args:
key: the key with which value will be saved. If key is None
then it is calculated automatically
timeout: number of seconds after which the cached value would be purged.
"""
_key = key
def func_wrapper(func):
def args_wrapper(*args, **kwargs):
# this is workaround of strange python behaviour
key = _key
if key is None:
# Not sure that this will work correct in all cases
key = sha1(str(func.__module__) + str(func.__name__) +\
smart_str(args) +\
smart_str(frozenset(kwargs.items()))).hexdigest()
value = cache.get(key)
if value:
return value
else:
value = func(*args, **kwargs)
cache.set(key, value)
return value
return args_wrapper
return func_wrapper
| govtrack/django-lorien-common | common/cache.py | Python | bsd-3-clause | 1,136 |
import os, sys, time
from glob import glob
import cv2
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['figure.facecolor'] = 'w'
from scipy.signal import argrelextrema
import scipy.stats as stats
import scipy.io as sio
from scipy import signal
from xlwt import Workbook
# specify these in mm to match your behavior chamber.
CHMAMBER_LENGTH=235
WATER_HIGHT=40
# quick plot should also show xy_within and location_one_third etc
# summary PDF: handle exception when a pickle file missing some fish in other pickle file
## these three taken from http://stackoverflow.com/a/18420730/566035
def strided_sliding_std_dev(data, radius=5):
windowed = rolling_window(data, (2*radius, 2*radius))
shape = windowed.shape
windowed = windowed.reshape(shape[0], shape[1], -1)
return windowed.std(axis=-1)
def rolling_window(a, window):
"""Takes a numpy array *a* and a sequence of (or single) *window* lengths
and returns a view of *a* that represents a moving window."""
if not hasattr(window, '__iter__'):
return rolling_window_lastaxis(a, window)
for i, win in enumerate(window):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win)
a = a.swapaxes(-2, i)
return a
def rolling_window_lastaxis(a, window):
"""Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError, "`window` must be at least 1."
if window > a.shape[-1]:
raise ValueError, "`window` is too long."
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
## stealing ends here... //
def filterheadxy(headx,heady,thrs_denom=10):
b, a = signal.butter(8, 0.125)
dhy = np.abs(np.hstack((0, np.diff(heady,1))))
thrs = np.nanstd(dhy)/thrs_denom
ind2remove = dhy>thrs
headx[ind2remove] = np.nan
heady[ind2remove] = np.nan
headx = interp_nan(headx)
heady = interp_nan(heady)
headx = signal.filtfilt(b, a, headx, padlen=150)
heady = signal.filtfilt(b, a, heady, padlen=150)
return headx,heady
def smoothRad(theta, thrs=np.pi/4*3):
jumps = (np.diff(theta) > thrs).nonzero()[0]
print 'jumps.size', jumps.size
while jumps.size:
# print '%d/%d' % (jumps[0], theta.size)
theta[jumps+1] -= np.pi
jumps = (np.diff(theta) > thrs).nonzero()[0]
return theta
def datadct2array(data, key1, key2):
# put these in a MATLAB CELL
trialN = len(data[key1][key2])
matchedUSnameP = np.zeros((trialN,), dtype=np.object)
fnameP = np.zeros((trialN,), dtype=np.object)
# others to append to a list
eventsP = []
speed3DP = []
movingSTDP = []
d2inflowP = []
xP, yP, zP = [], [], []
XP, YP, ZP = [], [], []
ringpixelsP = []
peaks_withinP = []
swimdir_withinP = []
xy_withinP = []
location_one_thirdP = []
dtheta_shapeP = []
dtheta_velP = []
turns_shapeP = []
turns_velP = []
for n, dct in enumerate(data[key1][key2]):
# MATLAB CELL
matchedUSnameP[n] = dct['matchedUSname']
fnameP[n] = dct['fname']
# 2D array
eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']])
speed3DP.append(dct['speed3D'])
movingSTDP.append(dct['movingSTD'])
d2inflowP.append(dct['d2inflow'])
xP.append(dct['x'])
yP.append(dct['y'])
zP.append(dct['z'])
XP.append(dct['X'])
YP.append(dct['Y'])
ZP.append(dct['Z'])
ringpixelsP.append(dct['ringpixels'])
peaks_withinP.append(dct['peaks_within'])
swimdir_withinP.append(dct['swimdir_within'])
xy_withinP.append(dct['xy_within'])
location_one_thirdP.append(dct['location_one_third'])
dtheta_shapeP.append(dct['dtheta_shape'])
dtheta_velP.append(dct['dtheta_vel'])
turns_shapeP.append(dct['turns_shape'])
turns_velP.append(dct['turns_vel'])
TVroi = np.array(dct['TVroi'])
SVroi = np.array(dct['SVroi'])
return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \
np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \
np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \
np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \
np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi
def pickle2mat(fp, data=None):
# fp : full path to pickle file
# data : option to provide data to skip np.load(fp)
if not data:
data = np.load(fp)
for key1 in data.keys():
for key2 in data[key1].keys():
matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \
ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \
turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2)
datadict = {
'matchedUSname' : matchedUSname,
'fname' : fname,
'events' : events,
'speed3D' : speed3D,
'd2inflow' : d2inflow,
'x' : x,
'y' : y,
'z' : z,
'X' : X,
'Y' : Y,
'Z' : Z,
'ringpixels' : ringpixels,
'peaks_within' : peaks_within,
'swimdir_within' : swimdir_within,
'xy_within' : xy_within,
'dtheta_shape' : dtheta_shape,
'dtheta_vel' : dtheta_vel,
'turns_shape' : turns_shape,
'turns_vel' : turns_vel,
'TVroi' : TVroi,
'SVroi' : SVroi,
}
outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2)
sio.savemat(outfp, datadict, oned_as='row', do_compression=True)
def interp_nan(x):
'''
Replace nan by interporation
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
ok = -np.isnan(x)
if (ok == False).all():
return x
else:
xp = ok.ravel().nonzero()[0]
fp = x[ok]
_x = np.isnan(x).ravel().nonzero()[0]
x[-ok] = np.interp(_x, xp, fp)
return x
def polytest(x,y,rx,ry,rw,rh,rang):
points=cv2.ellipse2Poly(
(rx,ry),
axes=(rw/2,rh/2),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1)
def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3):
z0 = z - SVy1
x0 = x - TVx1
mid = (SVy2-SVy1)/2
adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1))
return z0 + adj + SVy1 # back to abs coord
def putNp2xls(array, ws):
for r, row in enumerate(array):
for c, val in enumerate(row):
ws.write(r, c, val)
def drawLines(mi, ma, events, fps=30.0):
CS, USs, preRange = events
plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor
plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset
if USs:
if len(USs) > 3:
colors = 'r' * len(USs)
else:
colors = [_ for _ in ['r','b','c'][:len(USs)]]
for c,us in zip(colors, USs):
plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset
plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window
xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick
else:
xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick
xticks(xtck, xtck/fps/60)
gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks
def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None):
'''
fishlength: some old scrits may call this with fishlength
thrs: multitrack GUI provides this by ringAppearochLevel spin control.
can be an numpy array (to track water level change etc)
'''
smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same')
peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image.
# now filter peaks by height.
ringLevel = ringpolySVArray[:,1]
if thrs is None:
thrs = ringLevel+fishlength/2
if type(thrs) == int: # can be numpy array or int
thrs = ringLevel.mean() + thrs
peaks = peaks[ z[peaks] < thrs ]
else: # numpy array should be ready to use
peaks = peaks[ z[peaks] < thrs[peaks] ]
# now filter out by TVringCenter
peaks_within = get_withinring(ringpolyTVArray, peaks, x, y)
return smoothedz, peaks_within
def get_withinring(ringpolyTVArray, timepoints, x, y):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
# poly test
peaks_within = []
for p in timepoints:
points=cv2.ellipse2Poly(
(rx[p],ry[p]),
axes=(rw[p]/2,rh[p]/2),
angle=rang[p],
arcStart=0,
arcEnd=360,
delta=3
)
inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1)
if inout > 0:
peaks_within.append(p)
return peaks_within
def location_ring(x,y,ringpolyTVArray):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2)
# filter by radius 20% buffer in case the ring moves around
indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0]
xy_within = get_withinring(ringpolyTVArray, indices, x, y)
return xy_within
def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0):
# smoothing
# z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same')
# two cameras have different zoom settings. So, distance per pixel is different. But, for
# swim direction, it does not matter how much x,y are compressed relative to z.
# ring z level from SV
rz = ringpolySVArray[:,1].astype(np.int)
# ring all other params from TV
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
speed3D = np.hstack(([0], speed3D))
# line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
# x-x0 y-y0 z-z0
# ---- = ---- = ----
# a b c
# solve them for z = rz. x0,y0,z0 are tvx, tvy, svy
# x = (a * (rz-z)) / c + x0
dt = 3 # define slope as diff between current and dt frame before
a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) )
b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) )
c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) )
c[c==0] = np.nan # avoid zero division
water_x = (a * (rz-z) / c) + x
water_y = (b * (rz-z) / c) + y
upwards = c<-2/30.0*fps # not accurate when c is small or negative
xok = (TVx1 < water_x) & (water_x < TVx2)
yok = (TVy1 < water_y) & (water_y < TVy2)
filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y)
water_x[-filtered] = np.nan
water_y[-filtered] = np.nan
# figure()
# ax = subplot(111)
# ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1
# ax.plot(x-TVx1, y-TVy1, 'c')
# ax.plot(water_x-TVx1, water_y-TVy1, 'r.')
# xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
# draw(); show()
SwimDir = []
for n in filtered.nonzero()[0]:
inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n])
SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside
return SwimDir, water_x, water_y
def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None,
pp=None, _title=None, fps=30.0, inmm=False):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(511) # Swimming speed
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind
plot(speed3D)
movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) )
plot(movingSTD, linewidth=2)
plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray')
ylim([-5, speed3D[xmin:xmax].max()])
xlim([xmin,xmax]); title(_title)
if inmm:
ylabel('Speed 3D (mm),\n6SD thr');
else:
ylabel('Speed 3D, 6SD thr');
ax = subplot(512) # z level
drawLines(z.min(), z.max(), events)
plot(z, 'b')
pkx = peaks_within.nonzero()[0]
if inmm:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('Z (mm)')
else:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z')
subplot(513) # x
drawLines(x.min(), x.max(), events)
plot(x, 'b')
plot(y, 'g')
xlim([xmin,xmax]); ylabel('x,y')
subplot(514) # Distance to the inflow tube
xin, yin, zin = inflowpos
d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 )
drawLines(d2inflow.min(), d2inflow.max(), events)
plot(d2inflow)
ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('distance to\ninflow tube')
subplot(515) # ringpixels: it seems i never considered TV x,y for this
rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax])
drawLines(rpmin, rpmax, events)
plot(ringpixels)
plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo')
if swimdir_within is not None:
plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+')
ylim([-100, rpmax*1.2])
xlim([xmin,xmax]); ylabel('ringpixels')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
rng = np.arange(CS-preRange, CS+preRange, dtype=np.int)
return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng]
def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel,
pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(211)
drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events)
plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(dthetasum_shape)
dmax = dthetasum_shape[xmin:xmax].max()
plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o')
temp = np.zeros_like(dthetasum_shape)
temp[turns_shape] = 1
shape_cumsum = np.cumsum(temp)
shape_cumsum -= shape_cumsum[xmin]
plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title)
ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1])
subplot(212)
drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events)
plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(dthetasum_vel)
dmax = dthetasum_vel[xmin:xmax].max()
plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o')
temp = np.zeros_like(dthetasum_vel)
temp[turns_vel] = 1
vel_cumsum = np.cumsum(temp)
vel_cumsum -= vel_cumsum[xmin]
plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1])
xlim([xmin,xmax]); ylabel('Velocity based')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340],
color='b', fps=30.0, ringpolygon=None):
ax.plot(x[rng],y[rng],z[rng], color=color)
ax.view_init(azim=-75, elev=-180+15)
if ringpolygon:
rx, ry, rz = ringpolygon
ax.plot(rx, ry, rz, color='gray')
ax.set_xlim(_xlim[0],_xlim[1])
ax.set_ylim(_ylim[0],_ylim[1])
ax.set_zlim(_zlim[0],_zlim[1])
title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps)))
draw()
def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None):
CS, USs, preRange = events
rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int)
rng2 = np.arange(CS-preRange/2, CS, dtype=int)
if USs:
rng3 = np.arange(CS, min(USs), dtype=int)
rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int)
combined = np.hstack((rng1,rng2,rng3,rng4))
else:
combined = np.hstack((rng1,rng2))
if _xlim is None:
_xlim = map( int, ( x[combined].min(), x[combined].max() ) )
if _ylim is None:
_ylim = map( int, ( y[combined].min(), y[combined].max() ) )
if _zlim is None:
_zlim = map( int, ( z[combined].min(), z[combined].max() ) )
if ringpolygon:
_zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) )
fig3D = plt.figure(figsize=(12,8), facecolor='w')
ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
if USs:
ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon)
ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon)
tight_layout()
if pp:
fig3D.savefig(pp, format='pdf')
def add2DataAndPlot(fp, fish, data, createPDF):
if createPDF:
pp = PdfPages(fp[:-7]+'_'+fish+'.pdf')
else:
pp = None
params = np.load(fp)
fname = os.path.basename(fp).split('.')[0] + '.avi'
dirname = os.path.dirname(fp)
preRange = params[(fname, 'mog')]['preRange']
fps = params[(fname, 'mog')]['fps']
TVx1 = params[(fname, fish)]['TVx1']
TVy1 = params[(fname, fish)]['TVy1']
TVx2 = params[(fname, fish)]['TVx2']
TVy2 = params[(fname, fish)]['TVy2']
SVx1 = params[(fname, fish)]['SVx1']
SVx2 = params[(fname, fish)]['SVx2']
SVx3 = params[(fname, fish)]['SVx3']
SVy1 = params[(fname, fish)]['SVy1']
SVy2 = params[(fname, fish)]['SVy2']
SVy3 = params[(fname, fish)]['SVy3']
ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel']
_npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish)))
# if os.path.exists(_npz):
npData = np.load(_npz)
tvx = npData['TVtracking'][:,0] # x with nan
tvy = npData['TVtracking'][:,1] # y
headx = npData['TVtracking'][:,3] # headx
heady = npData['TVtracking'][:,4] # heady
svy = npData['SVtracking'][:,1] # z
InflowTubeTVArray = npData['InflowTubeTVArray']
InflowTubeSVArray = npData['InflowTubeSVArray']
inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1]
ringpixels = npData['ringpixel']
ringpolyTVArray = npData['ringpolyTVArray']
ringpolySVArray = npData['ringpolySVArray']
TVbg = npData['TVbg']
print os.path.basename(_npz), 'loaded.'
x,y,z = map(interp_nan, [tvx,tvy,svy])
# z level correction by depth (x)
z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3)
smoothedz, peaks_within = approachevents(x, y, z,
ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel)
# convert to numpy array from list
temp = np.zeros_like(x)
temp[peaks_within] = 1
peaks_within = temp
# normalize to mm
longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal
waterlevel = float(SVy2-SVy1)
X = (x-TVx1) / longaxis * CHMAMBER_LENGTH
Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH
Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive
inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH,
(TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH,
(SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT )
# do the swim direction analysis here
swimdir, water_x, water_y = swimdir_analysis(x,y,z,
ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps)
# all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring
sdir = np.array(swimdir)
withinRing = sdir[:,1]>0 # inout>0 are inside ring
temp = np.zeros_like(x)
temp[ sdir[withinRing,0].astype(int) ] = 1
swimdir_within = temp
# location_ring
xy_within = location_ring(x,y, ringpolyTVArray)
temp = np.zeros_like(x)
temp[xy_within] = 1
xy_within = temp
# location_one_third
if (TVx2-TVx1) > (TVy2-TVy1):
if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2:
location_one_third = x-TVx1 > longaxis/3*2
else:
location_one_third = x < longaxis/3
else:
if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2:
location_one_third = y-TVy1 > longaxis/3*2
else:
location_one_third = y < longaxis/3
# turn rate analysis (shape based)
heady, headx = map(interp_nan, [heady, headx])
headx, heady = filterheadxy(headx, heady)
dy = heady - y
dx = headx - x
theta_shape = np.arctan2(dy, dx)
# velocity based
cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y
vx = np.append(0, np.diff(cx))
vy = np.append(0, np.diff(cy))
theta_vel = np.arctan2(vy, vx)
# prepare ringpolygon for trajectory plot
rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above
rz = ringpolySVArray.mean(axis=0)[1].astype(int)
RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH
RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH
RW = rw / longaxis * CHMAMBER_LENGTH / 2
RH = rh / longaxis * CHMAMBER_LENGTH / 2
RZ = (SVy2-rz) / waterlevel * WATER_HIGHT
points = cv2.ellipse2Poly(
(RX.astype(int),RY.astype(int)),
axes=(RW.astype(int),RH.astype(int)),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ]
eventTypeKeys = params[(fname, fish)]['EventData'].keys()
CSs = [_ for _ in eventTypeKeys if _.startswith('CS')]
USs = [_ for _ in eventTypeKeys if _.startswith('US')]
# print CSs, USs
# events
for CS in CSs:
CS_Timings = params[(fname, fish)]['EventData'][CS]
CS_Timings.sort()
# initialize when needed
if CS not in data[fish].keys():
data[fish][CS] = []
# now look around for US after it within preRange
for t in CS_Timings:
tr = len(data[fish][CS])+1
rng = np.arange(t-preRange, t+preRange, dtype=np.int)
matchedUSname = None
for us in USs:
us_Timings = params[(fname, fish)]['EventData'][us]
matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange]
if matched:
events = [t, matched, preRange] # ex. CS+
matchedUSname = us
break
else:
continue
_title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish)
print _title, events
_speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm,
ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True)
# 3d trajectory
_xlim = (0, CHMAMBER_LENGTH)
_zlim = (RZ.max(),0)
plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon)
# turn rate analysis
# shape based
theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2)
dtheta_shape = np.append(0, np.diff(theta_shape)) # full length
kernel = np.ones(4)
dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same')
# 4 frames = 1000/30.0*4 = 133.3 ms
thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms
peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0]
turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ]
# velocity based
theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2)
dtheta_vel = np.append(0, np.diff(theta_vel))
dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same')
peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0]
turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ]
plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps)
_temp = np.zeros_like(dtheta_shape)
_temp[turns_shape] = 1
turns_shape_array = _temp
_temp = np.zeros_like(dtheta_vel)
_temp[turns_vel] = 1
turns_vel_array = _temp
# plot swim direction analysis
fig = figure(figsize=(12,8), facecolor='w')
ax1 = subplot(211)
ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI
ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray')
ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.')
if matched:
ax1.plot( water_x[t:matched[0]]-TVx1,
water_y[t:matched[0]]-TVy1, 'g.')
ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1,
water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.')
xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
title(_title)
ax2 = subplot(212)
ax2.plot( swimdir_within )
ax2.plot( peaks_within*1.15-0.1, 'mo' )
if matched:
xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4
else:
xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps
gzcs = np.cumsum(swimdir_within)
gzcs -= gzcs[xmin]
ax2.plot( gzcs/gzcs[xmax] )
drawLines(0,1.2, events)
ylim([0,1.2])
xlim([xmin, xmax])
ylabel('|: SwimDirection\no: approach events')
data[fish][CS].append( {
'fname' : fname,
'x': x[rng], 'y': y[rng], 'z': z[rng],
'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm)
'speed3D': _speed3D, # calibrate space (mm)
'movingSTD' : _movingSTD, # calibrate space (mm)
'd2inflow': _d2inflow, # calibrate space (mm)
'ringpixels': _ringpixels,
'peaks_within': peaks_within[rng],
'xy_within': xy_within[rng],
'location_one_third' : location_one_third[rng],
'swimdir_within' : swimdir_within[rng],
'dtheta_shape': dtheta_shape[rng],
'dtheta_vel': dtheta_vel[rng],
'turns_shape': turns_shape_array[rng], # already +/- preRange
'turns_vel': turns_vel_array[rng],
'events' : events,
'matchedUSname' : matchedUSname,
'TVroi' : (TVx1,TVy1,TVx2,TVy2),
'SVroi' : (SVx1,SVy1,SVx2,SVy2),
} )
if pp:
fig.savefig(pp, format='pdf')
close('all') # release memory ASAP!
if pp:
pp.close()
def getPDFs(pickle_files, fishnames=None, createPDF=True):
# type checking args
if type(pickle_files) is str:
pickle_files = [pickle_files]
# convert to a list or set of fish names
if type(fishnames) is str:
fishnames = [fishnames]
elif not fishnames:
fishnames = set()
# re-organize trials into a dict "data"
data = {}
# figure out trial number (sometime many trials in one files) for each fish
# go through all pickle_files and use timestamps of file to sort events.
timestamps = []
for fp in pickle_files:
# collect ctime of pickled files
fname = os.path.basename(fp).split('.')[0] + '.avi'
timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") )
# look into the pickle and collect fish analyzed
params = np.load(fp) # loading pickled file!
if type(fishnames) is set:
for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']:
fishnames.add(fish)
timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__)
# For each fish, go thru all pickled files
for fish in fishnames:
data[fish] = {}
# now go thru the sorted
for ind in timestamps:
fp = pickle_files[ind]
print 'processing #%d\n%s' % (ind, fp)
add2DataAndPlot(fp, fish, data, createPDF)
return data
def plotTrials(data, fish, CSname, key, step, offset=0, pp=None):
fig = figure(figsize=(12,8), facecolor='w')
ax1 = fig.add_subplot(121) # raw trace
ax2 = fig.add_subplot(222) # learning curve
ax3 = fig.add_subplot(224) # bar plot
preP, postP, postP2 = [], [], []
longestUS = 0
for n, measurement in enumerate(data[fish][CSname]):
tr = n+1
CS, USs, preRange = measurement['events']
subplot(ax1)
mi = -step*(tr-1)
ma = mi + step
drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange))
longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS])
# 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center
ax1.plot(measurement[key]-step*(tr-1)+offset)
title(CSname+': '+key) # cf. preRange = 3600 frames
pre = measurement[key][:preRange].mean()+offset # 2 min window
post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window
post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US
preP.append(pre)
postP.append(post)
postP2.append(post2)
ax3.plot([1, 2, 3], [pre, post, post2],'o-')
ax1.set_xlim([0,longestUS])
ax1.axis('off')
subplot(ax2)
x = range(1, tr+1)
y = np.diff((preP,postP), axis=0).ravel()
ax2.plot( x, y, 'ko-', linewidth=2 )
ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' )
# grid()
slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y)
title('slope = zero? p-value = %f' % pval)
ax2.set_xlabel("Trial#")
ax2.set_xlim([0.5,tr+0.5])
ax2.set_ylabel('CS - pre')
subplot(ax3)
ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none')
t, pval = stats.ttest_rel(postP, preP)
title('paired t p-value = %f' % pval)
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']])
ax3.set_xlim([0.5,3.5])
ax3.set_ylabel('Raw mean values')
tight_layout(2, h_pad=1, w_pad=1)
if pp:
fig.savefig(pp, format='pdf')
close('all')
return np.vstack((preP, postP, postP2))
def getSummary(data, dirname=None):
for fish in data.keys():
for CSname in data[fish].keys():
if dirname:
pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish)))
print 'generating %s_for_%s.pdf' % (CSname,fish)
book = Workbook()
sheet1 = book.add_sheet('speed3D')
avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp)
putNp2xls(avgs, sheet1)
sheet2 = book.add_sheet('d2inflow')
avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp)
putNp2xls(avgs, sheet2)
# sheet3 = book.add_sheet('smoothedz')
sheet3 = book.add_sheet('Z')
# avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp)
avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp)
putNp2xls(avgs, sheet3)
sheet4 = book.add_sheet('ringpixels')
avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp)
putNp2xls(avgs, sheet4)
sheet5 = book.add_sheet('peaks_within')
avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp)
putNp2xls(avgs, sheet5)
sheet6 = book.add_sheet('swimdir_within')
avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp)
putNp2xls(avgs, sheet6)
sheet7 = book.add_sheet('xy_within')
avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp)
putNp2xls(avgs, sheet7)
sheet8 = book.add_sheet('turns_shape')
avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp)
putNp2xls(avgs, sheet8)
sheet9 = book.add_sheet('turns_vel')
avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp)
putNp2xls(avgs, sheet9)
if dirname:
pp.close()
book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish)))
close('all')
else:
show()
def add2Pickles(dirname, pickle_files):
# dirname : folder to look for pickle files
# pickle_files : output, a list to be concatenated.
pattern = os.path.join(dirname, '*.pickle')
temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and
not os.path.basename(_).startswith('Summary')]
pickle_files += temp
if __name__ == '__main__':
pickle_files = []
# small test data
# add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files)
# outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test'
# show me what you got
for pf in pickle_files:
print pf
fp = os.path.join(outputdir, 'Summary.pickle')
createPDF = True # useful when plotting etc code updated
if 1: # refresh analysis
data = getPDFs(pickle_files, createPDF=createPDF)
import cPickle as pickle
with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f:
pickle.dump(data, f)
else: # or reuse previous
data = np.load(fp)
getSummary(data, outputdir)
pickle2mat(fp, data)
| i-namekawa/TopSideMonitor | plotting.py | Python | bsd-3-clause | 37,323 |
from django.db import models
class Event(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(default=None, max_length=500)
date = models.DateField()
post_code = models.CharField(max_length=20)
contact_number = models.CharField(default=None, max_length=20)
| Glasgow2015/team-10 | project/events/models.py | Python | apache-2.0 | 311 |
from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def is_coverage_running():
"""Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
return False
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage')
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
| legnaleurc/tornado | tornado/test/util.py | Python | apache-2.0 | 4,446 |
# -*- coding: utf-8 -*-
#
# Positronic Brain - Opinionated Buildbot Workflow
# Copyright (C) 2014 Develer S.r.L.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module contains all functions needed to deal with artifacts such as adding the build steps
necessary to host them on worker nodes and then transferring them back to the master.
"""
from os import listdir
from os.path import isdir, join
from shutil import rmtree
from buildbot.process.buildstep import BuildStep
from buildbot.process.buildstep import SUCCESS
from buildbot.process.properties import Interpolate
from buildbot.steps.master import SetProperty
from buildbot.steps.slave import MakeDirectory
from buildbot.steps.slave import RemoveDirectory
from buildbot.steps.transfer import DirectoryUpload
from positronic.brain.config import BuildmasterConfig
from positronic.brain.config import BrainConfig
class PruneOldArtifacts(BuildStep):
name = 'remove old artifacts'
def __init__(self, **kwargs):
super(PruneOldArtifacts, self).__init__(**kwargs)
def start(self):
root = BrainConfig['artifactsDir']
max_artifacts = BrainConfig['maxArtifacts']
artifacts = Interpolate(join(root, '%(prop:buildername)s')).getRenderingFor(self).result
remove_obsolete_artifact_dirs(artifacts, max_artifacts)
self.finished(SUCCESS)
def remove_obsolete_artifact_dirs(root, max_artifacts):
"""Remove obsolete artifacts from the given root directory.
This function asserts that the root directory does not contain files and that all directory
names can be converted to integers in strictly increasing order. Each directory name should
correspond to a build number for that particular builder.
This function also removes empty artifacts directories.
"""
assert max_artifacts > 0
# This ensures we only get files or directories with names that can be converted to an integer,
# we also first sort it in increasing order. Strictly increasing order should be guaranteed by
# fs semantics: you can't have two directories with the same name!
dirs = map(str, sorted(map(int, listdir(root))))
paths = [join(root, d) for d in dirs]
# We only want directories.
for p in paths:
assert isdir(p)
paths_to_remove = paths[:-max_artifacts]
for p in paths:
if not listdir(p):
rmtree(p)
elif p in paths_to_remove:
rmtree(p)
def add_artifact_pre_build_steps(job):
artifacts_dir = Interpolate(join('%(prop:builddir)s', 'artifacts'))
job.add_step(SetProperty(
property="artifactsdir",
value=artifacts_dir,
hideStepIf=True))
job.add_step(RemoveDirectory(
dir=Interpolate('%(prop:artifactsdir)s'),
hideStepIf=True))
job.add_step(MakeDirectory(
dir=Interpolate('%(prop:artifactsdir)s'),
hideStepIf=True))
def add_artifact_post_build_steps(job):
job.add_step(DirectoryUpload(
name='collect artifacts',
slavesrc=Interpolate('%(prop:artifactsdir)s'),
masterdest=Interpolate(
join(BrainConfig['artifactsDir'], '%(prop:buildername)s', '%(prop:buildnumber)s')),
url=Interpolate(BuildmasterConfig[
'buildbotURL'] + 'artifacts/%(prop:buildername)s/%(prop:buildnumber)s/')))
job.add_step(PruneOldArtifacts())
| develersrl/positronic-brain | positronic/brain/artifact.py | Python | gpl-2.0 | 3,920 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_py"
PROJECT_SPACE_DIR = "/home/pi/Documents/desenvolvimentoRos/install"
PROJECT_VERSION = "0.5.13"
| UnbDroid/robomagellan | Codigos/Raspberry/desenvolvimentoRos/build/tf2_py/catkin_generated/pkg.installspace.context.pc.py | Python | gpl-3.0 | 395 |
import os
from datetime import datetime
from PyQt5.QtWidgets import QWizard, QFileDialog
from ..guiresources.export_wizard_auto import Ui_ExportWizard
from ..utils import injector, system_util
from ..utils.exporters import json_exporter, csv_exporter
class ExportWizard(QWizard, Ui_ExportWizard):
def __init__(self, export_list, export_model, suggested_name=None, parent=None):
QWizard.__init__(self, parent=parent)
self.setupUi(self)
self.settings_manager = injector.get_settings_manager()
self.export_list = export_list
self.export_model = export_model
if suggested_name is None:
name = f"{datetime.now().strftime('%m-%d-%Y--%H-%M-%S')} Export"
else:
name = suggested_name
self.export_path_line_edit.setText(system_util.join_path(self.settings_manager.export_file_path, name))
self.path_dialog_button.clicked.connect(self.select_export_path)
self.json_export_map = {
'RedditObjectList': json_exporter.export_reddit_object_list_to_json,
'RedditObject': json_exporter.export_reddit_objects_to_json,
'Post': json_exporter.export_posts_to_json,
'Comment': json_exporter.export_comments_to_json,
'Content': json_exporter.export_content_to_json,
}
self.csv_export_radio.toggled.connect(self.toggle_nested_page)
@property
def extension(self):
if self.csv_export_radio.isChecked():
return 'csv'
else:
return 'json'
def toggle_nested_page(self):
"""
Toggles the nested page settings page on or off depending on the type of export to be performed. CSV export
files cannot be nested.
"""
if self.csv_export_radio.isChecked():
self.removePage(self.nextId())
else:
self.addPage(self.page_two)
self.removePage(self.nextId())
self.addPage(self.page_three)
def select_export_path(self):
file_path, _ = QFileDialog.getSaveFileName(self, 'Export Path', self.export_path_line_edit.text(),
self.extension)
if file_path is not None and file_path != '':
self.export_path_line_edit.setText(file_path)
def accept(self):
if self.export():
super().accept()
def export(self):
if os.path.isdir(os.path.dirname(self.export_path_line_edit.text())):
if self.json_export_radio.isChecked():
self.export_json()
else:
self.export_csv()
return True
else:
self.export_path_line_edit.setStyleSheet('border: 1px solid red;')
return False
def export_json(self):
export_method = self.json_export_map[self.export_model.__name__]
export_method(self.export_list, f'{self.export_path_line_edit.text()}.json',
nested=self.export_complete_nested_radio.isChecked())
def export_csv(self):
csv_exporter.export_csv(self.export_list, self.export_model, f'{self.export_path_line_edit.text()}.csv')
| MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/gui/export_wizard.py | Python | gpl-3.0 | 3,166 |
# this file contains a set of classes that are 'pollable' objects.
"""
This file contains a set of classes called 'pollables'. A pollable is an object with a start(), cancel()
and poll() method. It encapsulates some set of asyncronous functionality that is started and monitored
for completion. The poll() method returns either true or false depending on whether or not the object
has completed its objective.
Pollables are combined later into bootlevels. Each bootlevel consisists of N > 1 pollables. when all
pollables are complete, that bootlevel is complete and the next bootlevel can be run.
The SVCContainer class in the services.py file is another type of pollable. It is customized to use three
internal pollables of specific purpose.
"""
from boto.exception import EC2ResponseError
import logging
import select
import subprocess
import time
from threading import Thread
import datetime
from cloudinitd.exceptions import TimeoutException, IaaSException, APIUsageException, ProcessException, MultilevelException, PollableException
import cloudinitd
import traceback
import os
from cloudinitd.cb_iaas import *
import socket
class Pollable(object):
def __init__(self, timeout=0, done_cb=None):
self._timeout = timeout
self._exception = None
self._done_cb = done_cb
self._end_time = None
self._start_time = None
def get_exception(self):
return self._exception
def start(self):
self._start_time = datetime.datetime.now()
def _execute_done_cb(self):
self._end_time = datetime.datetime.now()
if not self._done_cb:
return
self._done_cb(self)
def pre_start(self):
pass
def poll(self):
if self._timeout == 0:
return False
now = datetime.datetime.now()
diff = now - self._start_time
if diff.seconds > self._timeout:
self._exception = TimeoutException("pollable %s timedout at %d seconds" % (str(self), self._timeout))
raise self._exception
return False
def get_runtime(self):
if not self._end_time:
return None
return self._end_time - self._start_time
class NullPollable(Pollable):
def __init__(self, log=logging):
Pollable.__init__(self)
def start(self):
Pollable.start(self)
def poll(self):
return True
class HostnameCheckThread(Thread):
def __init__(self, host_poller):
Thread.__init__(self)
self.host_poller = host_poller
def run(self):
self.host_poller._thread_poll()
class InstanceTerminatePollable(Pollable):
def __init__(self, instance, log=logging, timeout=600, done_cb=None):
Pollable.__init__(self, timeout, done_cb=done_cb)
self._instance = instance
self._log = log
self._started = False
self._done = False
self.exception = None
self._thread = None
def start(self):
Pollable.start(self)
self._started = True
self._instance.terminate()
def poll(self):
if not self._started:
raise APIUsageException("You must first start the pollable object")
self._execute_done_cb()
return True
def cancel(self):
pass
class PortPollable(Pollable):
def __init__(self, host, port, retry_count=256, log=logging, timeout=600, done_cb=None):
Pollable.__init__(self, timeout, done_cb=done_cb)
self._log = log
self._started = False
self._done = False
self.exception = None
self._thread = None
self._host = host
self._port = port
self._poll_error_count = 0
self._retry_count = retry_count
self._time_delay = datetime.timedelta(seconds=3 )
self._last_run = None
def start(self):
Pollable.start(self)
self._started = True
def poll(self):
if not self._started:
raise APIUsageException("You must first start the pollable object")
if 'CLOUDINITD_TESTENV' in os.environ:
return True
Pollable.poll(self)
now = datetime.datetime.now()
if self._last_run:
if now - self._last_run < self._time_delay:
return False
self._last_run = now
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10.0)
cloudinitd.log(self._log, logging.DEBUG, "Attempting to connect to %s:%d" % (self._host, self._port))
s.connect((self._host, self._port))
self._execute_done_cb()
return True
except Exception, ex:
self._poll_error_count = self._poll_error_count + 1
if self._poll_error_count > self._retry_count:
cloudinitd.log(self._log, logging.ERROR, "safety error count exceeded" + str(ex), tb=traceback)
raise
cloudinitd.log(self._log, logging.INFO, "Retry %d for %s:%d" % (self._poll_error_count, self._host, self._port))
return False
def cancel(self):
pass
class InstanceHostnamePollable(Pollable):
"""
Async poll a IaaS service via boto. Once the VM has an associated hostname, the Pollable object is considered
ready.
"""
def __init__(self, svc=None, log=logging, timeout=600, done_cb=None, instance=None):
Pollable.__init__(self, timeout, done_cb=done_cb)
self._svc = svc
self._instance = instance
self._poll_error_count = 0
self._max_id_error_count = 1
self._log = log
self._done = False
self.exception = None
self._thread = None
self._ok_states = ["networking", "pending", "scheduling", "spawning", "launching"]
def pre_start(self):
if not self._instance:
iaas_con = iaas_get_con(self._svc)
self._instance = iaas_con.run_instance()
# it might be awkward to call back into service here, not sure how i feel
if self._svc:
# this is for historical records in the database
self._svc.new_iaas_instance(self._instance)
def start(self):
self.pre_start()
Pollable.start(self)
self._update()
self._thread = HostnameCheckThread(self)
self._thread.start()
def poll(self):
if self.exception:
raise self.exception
if self._done:
return True
# check time out here
Pollable.poll(self)
# cache the state at tis time on the local call stack, should be thread safe
state = self._instance.get_state()
cloudinitd.log(self._log, logging.DEBUG, "Current iaas state in poll for %s is %s" % (self.get_instance_id(), state))
if state == "running":
self._done = True
self._thread.join()
self._execute_done_cb()
return True
if state not in self._ok_states:
msg = "The current state is %s. Never reached state running" % (state)
cloudinitd.log(self._log, logging.DEBUG, msg, tb=traceback)
self.exception = IaaSException(msg)
raise self.exception
return False
def cancel(self):
self._done = True
if self._instance:
self._instance.cancel()
if self._thread:
self._thread.join(3.0)
def get_instance_id(self):
return self._instance.get_id()
def get_instance(self):
return self._instance
def get_hostname(self):
return self._instance.get_hostname()
def get_status(self):
return self._instance.get_state()
def _update(self):
try:
self._instance.update()
except EC2ResponseError, ecex:
# We allow this error to occur once. It takes ec2 some time
# to be sure of the instance id
if self._poll_error_count > self._max_id_error_count:
# if we poll too quick sometimes aws cannot find the id
cloudinitd.log(self._log, logging.ERROR, "safety error count exceeded" + str(ecex), tb=traceback)
raise
self._poll_error_count = self._poll_error_count + 1
def _thread_poll(self, poll_period=1.0):
done = False
while not self._done and not done:
try:
if self._instance.get_state() not in self._ok_states:
cloudinitd.log(self._log, logging.DEBUG, "%s polling thread done" % (self.get_instance_id()))
done = True
# because update is called in start we will sleep first
else:
time.sleep(poll_period)
self._update()
cloudinitd.log(self._log, logging.DEBUG, "Current iaas state in thread for %s is %s" % (self.get_instance_id(), self._instance.get_state()))
except Exception, ex:
cloudinitd.log(self._log, logging.ERROR, str(ex), tb=traceback)
self.exception = IaaSException(ex)
done = True
class PopenExecutablePollable(Pollable):
"""
This Object will asynchornously for/exec a program and collect all of its stderr/out. The program is allowed to fail
by returning an exit code of != 0 allowed_errors number of times.
"""
def __init__(self, cmd, allowed_errors=64, log=logging, timeout=600, callback=None, done_cb=None):
Pollable.__init__(self, timeout, done_cb=done_cb)
self._cmd = cmd
self._stderr_str = ""
self._stdout_str = ""
self._stdout_eof = False
self._stderr_eof = False
self._error_count = 0
self._allowed_errors = allowed_errors
self._log = log
self._started = False
self._p = None
self._exception = None
self._done = False
self._callback = callback
self._time_delay = datetime.timedelta(seconds=10)
self._last_run = None
def get_stderr(self):
"""Get and reset the current stderr buffer from any (and all) execed programs. Good for logging"""
s = self._stderr_str
return s
def get_stdout(self):
"""Get and reset the current stdout buffer from any (and all) execed programs. Good for logging"""
s = self._stdout_str
return s
def get_output(self):
return self.get_stderr() + os.linesep + self.get_stdout()
def start(self):
Pollable.start(self)
self._run()
self._started = True
def poll(self):
if self._exception:
raise self._exception
if not self._started:
raise APIUsageException("You must call start before calling poll.")
if self._done:
return True
# check timeout here
try:
Pollable.poll(self)
return self._poll()
except TimeoutException, toex:
self._exception = toex
cloudinitd.log(self._log, logging.ERROR, str(toex), tb=traceback)
raise
except Exception, ex:
cloudinitd.log(self._log, logging.ERROR, str(ex), tb=traceback)
self._exception = ProcessException(self, ex, self._stdout_str, self._stderr_str)
raise self._exception
def cancel(self):
if self._done or not self._started:
return
# kill it and set the error count to past the max so that it is not retried
self._p.terminate()
self._error_count = self._allowed_errors
def _execute_cb(self, action, msg):
if not self._callback:
return
self._callback(self, action, msg)
def _poll(self):
"""pool to see of the process has completed. If incomplete None is returned. Otherwise the latest return code is sent"""
if self._last_run:
now = datetime.datetime.now()
if now - self._last_run < self._time_delay:
return False
self._last_run = None
self._execute_cb(cloudinitd.callback_action_transition, "retrying the command")
self._run()
rc = self._poll_process()
if rc is None:
return False
self._log.info("process return code %d" % (rc))
if rc != 0:
self._error_count = self._error_count + 1
if self._error_count >= self._allowed_errors:
ex = Exception("Process exceeded the allowed number of failures %d with %d: %s" % (self._allowed_errors, self._error_count, self._cmd))
raise ProcessException(ex, self._stdout_str, self._stderr_str, rc)
self._last_run = datetime.datetime.now()
return False
self._done = True
self._execute_cb(cloudinitd.callback_action_complete, "Pollable complete")
self._execute_done_cb()
self._p = None
return True
def _poll_process(self, poll_period=0.1):
eof = self._read_output(self._p, poll_period)
if not eof:
return None
self._p.poll()
return self._p.returncode
def _read_output(self, p, poll_period):
selectors = []
if not self._stdout_eof:
selectors.append(p.stdout)
if not self._stderr_eof:
selectors.append(p.stderr)
(rlist,wlist,elist) = select.select(selectors, [], [], poll_period)
if not rlist:
p.poll()
if p.returncode:
self._stdout_eof = True
self._stderr_eof = True
for f in rlist:
line = f.readline()
if f == p.stdout:
# we assume there will be a full line or eof
# not the fastest str concat, but this is small
self._stdout_str = self._stdout_str + line
if not line:
self._stdout_eof = True
else:
cloudinitd.log(self._log, logging.INFO, "stdout: %s" %(line))
else:
self._stderr_str = self._stderr_str + line
if not line:
self._stderr_eof = True
else:
cloudinitd.log(self._log, logging.INFO, "stderr: %s" %(line))
return self._stderr_eof and self._stdout_eof
def _run(self):
cloudinitd.log(self._log, logging.DEBUG, "running the command %s" % (str(self._cmd)))
self._p = subprocess.Popen(self._cmd, shell=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
def get_command(self):
return self._cmd
class MultiLevelPollable(Pollable):
"""
This pollable object monitors a set of pollable levels. Each level is a list of pollable objects. When all
pollables in a list are complete, the next level is polled. When all levels are completed this pollable is
considered complete
"""
def __init__(self, log=logging, timeout=0, callback=None, continue_on_error=False):
Pollable.__init__(self, timeout)
self.levels = []
self.level_times = []
self.level_ndx = -1
self._log = log
self._timeout = timeout
self.exception = None
self._done = False
self._level_error_ex = []
self._all_level_error_exs = []
self._exception_occurred = False
self._continue_on_error = continue_on_error
self._level_error_polls = []
self._callback = callback
self._reversed = False
self.last_exception = None
self._canceled = False
self._current_level_start = None
def get_level(self):
return self.level_ndx + 1
def _get_callback_level(self):
if self._reversed:
ndx = len(self.levels) - self.level_ndx - 1
else:
ndx = self.level_ndx
return ndx
def pre_start(self):
for l in self.levels:
for p in l:
p.pre_start()
def start(self):
Pollable.start(self)
if self.level_ndx >= 0:
return
self._current_level_start_time = datetime.datetime.now()
self.level_ndx = 0
if len(self.levels) == 0:
return
self._start()
def _start(self):
self._execute_cb(cloudinitd.callback_action_started, self._get_callback_level())
for p in self.levels[self.level_ndx]:
try:
p.start()
except Exception, ex:
self._exception_occurred = True
self.last_exception = PollableException(p, ex)
self._level_error_ex.append(self.last_exception)
self._level_error_polls.append(p)
cloudinitd.log(self._log, logging.ERROR, "Multilevel poll error on start %s" % (str(ex)), traceback)
if not self._continue_on_error:
self._execute_cb(cloudinitd.callback_action_error, self._get_callback_level())
self.exception = ex
raise
def poll(self):
if self.exception and not self._continue_on_error:
raise self.exception
if self.level_ndx < 0:
raise APIUsageException("You must call start before calling poll.")
if self.level_ndx == len(self.levels):
return True
Pollable.poll(self)
# allow everything in the level to complete before raising the exception
level = self.levels[self.level_ndx]
done = True
for p in level:
if p not in self._level_error_polls:
try:
rc = p.poll()
if not rc:
done = False
except Exception, ex:
self._exception_occurred = True
self.last_exception = PollableException(p, ex)
self._level_error_ex.append(self.last_exception)
self._level_error_polls.append(p)
cloudinitd.log(self._log, logging.ERROR, "Multilevel poll error %s" % (str(ex)), traceback)
if done:
# see if the level had an error
cb_action = cloudinitd.callback_action_complete
if len(self._level_error_polls) > 0:
cb_action = cloudinitd.callback_action_error
exception = MultilevelException(self._level_error_ex, self._level_error_polls, self.level_ndx)
self.last_exception = exception
if not self._continue_on_error:
self._execute_cb(cloudinitd.callback_action_error, self._get_callback_level())
self.exception = exception
raise exception
self._all_level_error_exs.append(self._level_error_ex)
self._level_error_polls = []
self._level_error_ex = []
_current_level_end_time = datetime.datetime.now()
self.level_times.append(_current_level_end_time - self._current_level_start_time)
self._current_level_start_time = datetime.datetime.now()
self._execute_cb(cb_action, self._get_callback_level())
self.level_ndx = self.level_ndx + 1
if self.level_ndx == len(self.levels):
self._done = True
return True
self._execute_cb(cloudinitd.callback_action_started, self._get_callback_level())
self._start()
return False
def _execute_cb(self, action, lvl):
if not self._callback:
return
self._callback(self, action, lvl+1)
def get_level_times(self):
return self.level_times
#def cancel(self):
# table this for now
# """
# Simply call cancel on all the objects this one owns
# """
# for level in self.levels:
# for p in level:
# p.cancel()
def add_level(self, pollable_list):
if self.level_ndx >= 0:
raise APIUsageException("You cannot add a level after starting the poller")
self.levels.append(pollable_list)
def reverse_order(self):
self.levels.reverse()
self._reversed = not self._reversed
def cancel(self):
if self._canceled:
return
for i in range(0, self.level_ndx):
lvl = self.levels[i]
for p in lvl:
p.cancel()
self._canceled = True
class ValidationPollable(Pollable):
def __init__(self, svc, timeout=600, done_cb=None):
Pollable.__init__(self, timeout, done_cb=done_cb)
self._svc = svc
def start(self):
Pollable.start(self)
| buzztroll/cloudinit.d | cloudinitd/pollables.py | Python | apache-2.0 | 20,714 |
import MySQLdb
import time
import cgi
from youtube_upload import *
from config import *
from text2html import *
# creating youtube object
youtube = Youtube(DEVELOPER_KEY)
debug("Login to Youtube API: email='%s', password='%s'" %
(EMAIL, "*" * len(PASSWORD)))
try:
youtube.login(EMAIL, PASSWORD)
except gdata.service.BadAuthentication:
raise BadAuthentication("Authentication failed")
db = MySQLdb.connect(host = DB_HOST, user = DB_USER, passwd = DB_PASS, \
db = DB_NAME, charset='utf8', use_unicode=True)
cur = db.cursor()
cur.execute("SELECT ctr.id, ctr.tutorial_detail_id, ctr.common_content_id, \
ctr.language_id, ctr.outline, ctr.video, ctr.video_id, \
ctr.playlist_item_id, ctd.foss_id, ctd.tutorial, ctd.level_id, ctd.order, \
ctc.keyword, clg.name FROM creation_tutorialresource ctr INNER JOIN \
creation_tutorialdetail ctd ON (ctr.tutorial_detail_id = ctd.id) \
INNER JOIN creation_fosscategory cfc ON (ctd.foss_id = cfc.id) INNER JOIN \
creation_tutorialcommoncontent ctc ON (ctr.common_content_id = ctc.id) \
INNER JOIN creation_language clg ON (ctr.language_id = clg.id) WHERE \
((ctr.status = 1 OR ctr.status = 2) AND ctr.video_id IS NOT NULL) ORDER BY \
cfc.foss, ctd.level_id, ctd.order ASC")
rows = cur.fetchall()
error_log_file_head = open(LOG_ROOT + 'video-meta-error-log.txt',"w")
for row in rows:
entry = youtube.get_feed_from_video_id(row[6])
entry.media.description = gdata.media.Description(description_type = \
'html', text = convert_to_html(row[4]))
entry.media.keywords = gdata.media.Keywords(row[12])
try:
youtube.service.UpdateVideoEntry(entry)
print entry.media.title, '-- Success'
except Exception, e:
print convert_to_html(row[4])
print entry.media.title, '-- Failed'
error_string = row[9] + ' - ' + row[13] + ' -- Failed'
error_log_file_head.write(error_string + '\n')
print e
time.sleep(1)
error_log_file_head.close()
| kirti3192/spoken-website | cron/old/test-outl.py | Python | gpl-3.0 | 1,995 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
import os
from pysollib.mfxutil import KwStruct
from pysollib.mygettext import _
from pysollib.pysolaudio import pysolsoundserver
from pysollib.settings import TITLE
from pysollib.ui.tktile.tkconst import EVENT_HANDLED
from six.moves import tkinter
from six.moves import tkinter_ttk as ttk
from .tkwidget import MfxDialog, MfxMessageDialog
from .tkwidget import PysolScale
class SoundOptionsDialog(MfxDialog):
def __init__(self, parent, title, app, **kw):
self.app = app
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
#
self.saved_opt = app.opt.copy()
self.sound = tkinter.BooleanVar()
self.sound.set(app.opt.sound != 0)
self.sound_mode = tkinter.BooleanVar()
self.sound_mode.set(app.opt.sound_mode != 0)
self.sample_volume = tkinter.IntVar()
self.sample_volume.set(app.opt.sound_sample_volume)
self.music = tkinter.BooleanVar()
self.music.set(app.opt.music != 0)
self.music_volume = tkinter.IntVar()
self.music_volume.set(app.opt.sound_music_volume)
self.samples = [
('areyousure', _('Are You Sure'), tkinter.BooleanVar()),
('deal', _('Deal'), tkinter.BooleanVar()),
('dealwaste', _('Deal waste'), tkinter.BooleanVar()),
('turnwaste', _('Turn waste'), tkinter.BooleanVar()),
('startdrag', _('Start drag'), tkinter.BooleanVar()),
('drop', _('Drop'), tkinter.BooleanVar()),
('droppair', _('Drop pair'), tkinter.BooleanVar()),
('autodrop', _('Auto drop'), tkinter.BooleanVar()),
('flip', _('Flip'), tkinter.BooleanVar()),
('autoflip', _('Auto flip'), tkinter.BooleanVar()),
('move', _('Move'), tkinter.BooleanVar()),
('nomove', _('No move'), tkinter.BooleanVar()),
('undo', _('Undo'), tkinter.BooleanVar()),
('redo', _('Redo'), tkinter.BooleanVar()),
('autopilotlost', _('Autopilot lost'), tkinter.BooleanVar()),
('autopilotwon', _('Autopilot won'), tkinter.BooleanVar()),
('gamefinished', _('Game finished'), tkinter.BooleanVar()),
('gamelost', _('Game lost'), tkinter.BooleanVar()),
('gamewon', _('Game won'), tkinter.BooleanVar()),
('gameperfect', _('Perfect game'), tkinter.BooleanVar()),
('extra', _('Other'), tkinter.BooleanVar()),
]
#
frame = ttk.Frame(top_frame)
frame.pack(expand=True, fill='both', padx=5, pady=5)
frame.columnconfigure(1, weight=1)
#
row = 0
w = ttk.Checkbutton(frame, variable=self.sound,
text=_("Sound enabled"))
w.grid(row=row, column=0, columnspan=2, sticky='ew')
#
if os.name == "nt" and pysolsoundserver:
row += 1
w = ttk.Checkbutton(frame, variable=self.sound_mode,
text=_("Use DirectX for sound playing"),
command=self.mOptSoundDirectX)
w.grid(row=row, column=0, columnspan=2, sticky='ew')
#
if app.audio.CAN_PLAY_MUSIC: # and app.startup_opt.sound_mode > 0:
row += 1
ttk.Label(frame, text=_('Sample volume:'), anchor='w'
).grid(row=row, column=0, sticky='ew')
w = PysolScale(frame, from_=0, to=128, resolution=1,
orient='horizontal', takefocus=0,
length="3i", # label=_('Sample volume'),
variable=self.sample_volume)
w.grid(row=row, column=1, sticky='w', padx=5)
row += 1
w = ttk.Checkbutton(frame, variable=self.music,
text=_("Music enabled"))
w.grid(row=row, column=0, columnspan=2, sticky='ew')
row += 1
ttk.Label(frame, text=_('Music volume:'), anchor='w'
).grid(row=row, column=0, sticky='ew')
w = PysolScale(frame, from_=0, to=128, resolution=1,
orient='horizontal', takefocus=0,
length="3i", # label=_('Music volume'),
variable=self.music_volume)
w.grid(row=row, column=1, sticky='w', padx=5)
else:
# remove "Apply" button
kw.strings[1] = None
#
frame = ttk.LabelFrame(top_frame, text=_('Enable samples'))
frame.pack(expand=True, fill='both', padx=5, pady=5)
frame.columnconfigure(0, weight=1)
frame.columnconfigure(1, weight=1)
#
row = 0
col = 0
for n, t, v in self.samples:
v.set(app.opt.sound_samples[n])
w = ttk.Checkbutton(frame, text=t, variable=v)
w.grid(row=row, column=col, sticky='ew', padx=3, pady=1)
if col == 1:
col = 0
row += 1
else:
col = 1
#
top_frame.columnconfigure(1, weight=1)
#
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
def initKw(self, kw):
strings = [_("&OK"), _("&Apply"), _("&Cancel"), ]
kw = KwStruct(kw,
strings=strings,
default=0,
)
return MfxDialog.initKw(self, kw)
def mDone(self, button):
if button == 0 or button == 1:
self.app.opt.sound = self.sound.get()
self.app.opt.sound_mode = int(self.sound_mode.get())
self.app.opt.sound_sample_volume = self.sample_volume.get()
self.app.opt.sound_music_volume = self.music_volume.get()
self.app.opt.music = self.music.get()
for n, t, v in self.samples:
self.app.opt.sound_samples[n] = v.get()
elif button == 2:
self.app.opt = self.saved_opt
if self.app.audio:
self.app.audio.updateSettings()
if button == 1:
self.app.audio.playSample("drop", priority=1000)
if button == 1:
return EVENT_HANDLED
return MfxDialog.mDone(self, button)
def mCancel(self, *event):
return self.mDone(2)
def wmDeleteWindow(self, *event):
return self.mDone(0)
def mOptSoundDirectX(self, *event):
# print self.sound_mode.get()
MfxMessageDialog(
self.top, title=_("Sound preferences info"),
text=_("""\
Changing DirectX settings will take effect
the next time you restart """)+TITLE,
bitmap="warning",
default=0, strings=(_("&OK"),))
| shlomif/PySolFC | pysollib/tile/soundoptionsdialog.py | Python | gpl-3.0 | 8,082 |
import unittest
from lyric_engine.modules.petitlyrics import PetitLyrics as Lyric
class PetitLyricsTest(unittest.TestCase):
def test_url_01(self):
url = 'https://petitlyrics.com/lyrics/914421'
obj = Lyric(url)
obj.parse()
self.assertEqual(obj.title, '猫背')
self.assertEqual(obj.artist, '坂本 真綾')
self.assertEqual(obj.lyricist, '岩里祐穂')
self.assertEqual(obj.composer, '菅野よう子')
self.assertEqual(len(obj.lyric), 410)
self.assertEqual(obj.lyric[:6], '背の高い君は')
def test_url_02(self):
url = 'http://petitlyrics.com/lyrics/936622'
obj = Lyric(url)
obj.parse()
self.assertEqual(obj.title, 'RPG')
self.assertEqual(obj.artist, 'SEKAI NO OWARI')
self.assertEqual(obj.lyricist, 'Saori/Fukase')
self.assertEqual(obj.composer, 'Fukase')
self.assertEqual(len(obj.lyric), 612)
self.assertEqual(obj.lyric[:17], '空は青く澄み渡り 海を目指して歩く')
if __name__ == '__main__':
unittest.main()
| franklai/lyric-get | lyric_engine/tests/test_petitlyrics.py | Python | mit | 1,090 |
# Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
from dns import resolver, exception, reversename
from yawast.shared import output
def get_ips(domain: str):
ips = []
try:
answers_v4 = resolver.query(domain, "A")
for data in answers_v4:
ips.append(str(data))
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
try:
answers_v6 = resolver.query(domain, "AAAA")
for data in answers_v6:
ips.append(str(data))
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
return ips
def get_text(domain):
records = []
try:
answers = resolver.query(domain, "TXT")
for data in answers:
records.append(str(data))
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
return records
def get_mx(domain):
records = []
try:
answers = resolver.query(domain, "MX")
for data in answers:
records.append([str(data.exchange), str(data.preference)])
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
return records
def get_ns(domain):
records = []
try:
answers = resolver.query(domain, "NS")
for data in answers:
records.append(str(data))
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
return records
def get_host(ip):
name = "N/A"
try:
rev_name = reversename.from_address(str(ip))
name = str(resolver.query(rev_name, "PTR", lifetime=3)[0])[:-1]
except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout):
pass
except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA):
output.debug_exception()
return name
| adamcaudill/yawast | yawast/scanner/plugins/dns/basic.py | Python | mit | 2,561 |
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
print(' '.join(map(str, reversed(arr))))
| rootulp/hackerrank | python/arrays-ds.py | Python | mit | 131 |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.serialization import jsonutils
import six
from nova import objects
from nova.objects import base
from nova.objects import fields
class PciDevicePool(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'product_id': fields.StringField(),
'vendor_id': fields.StringField(),
'tags': fields.DictOfNullableStringsField(),
'count': fields.IntegerField(),
}
# NOTE(pmurray): before this object existed the pci device pool data was
# stored as a dict. For backward compatibility we need to be able to read
# it in from a dict
@classmethod
def from_dict(cls, value):
pool_dict = copy.copy(value)
pool = cls()
pool.vendor_id = pool_dict.pop("vendor_id")
pool.product_id = pool_dict.pop("product_id")
pool.count = pool_dict.pop("count")
pool.tags = {}
pool.tags.update(pool_dict)
return pool
# NOTE(sbauza): Before using objects, pci stats was a list of
# dictionaries not having tags. For compatibility with other modules, let's
# create a reversible method
def to_dict(self):
pci_pool = base.obj_to_primitive(self)
tags = pci_pool.pop('tags', None)
for k, v in six.iteritems(tags):
pci_pool[k] = v
return pci_pool
class PciDevicePoolList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial verison
# PciDevicePool <= 1.0
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('PciDevicePool'),
}
child_versions = {
'1.0': '1.0',
}
def from_pci_stats(pci_stats):
"""Create and return a PciDevicePoolList from the data stored in the db,
which can be either the serialized object, or, prior to the creation of the
device pool objects, a simple dict or a list of such dicts.
"""
pools = None
if isinstance(pci_stats, six.string_types):
try:
pci_stats = jsonutils.loads(pci_stats)
except (ValueError, TypeError):
pci_stats = None
if pci_stats:
# Check for object-ness, or old-style storage format.
if 'nova_object.namespace' in pci_stats:
pools = objects.PciDevicePoolList.obj_from_primitive(pci_stats)
else:
# This can be either a dict or a list of dicts
if isinstance(pci_stats, list):
pool_list = [objects.PciDevicePool.from_dict(stat)
for stat in pci_stats]
else:
pool_list = [objects.PciDevicePool.from_dict(pci_stats)]
pools = objects.PciDevicePoolList(objects=pool_list)
return pools
| mgagne/nova | nova/objects/pci_device_pool.py | Python | apache-2.0 | 3,410 |
# -*- coding: utf-8 -*-
import django
# location of patterns, url, include changes in 1.4 onwards
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
# in Django>=1.5 CustomUser models can be specified
if django.VERSION >= (1, 5):
from django.contrib.auth import get_user_model
from django.conf import settings
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
else:
from django.contrib.auth.models import User
get_user_model = lambda: User
AUTH_USER_MODEL = "auth.User"
try:
from django.utils.crypto import get_random_string
except ImportError:
import random
# fallback for older versions of django (<=1.3). You shouldn't use them
get_random_string = lambda length: ''.join([random.choice('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for i in range(length)])
try:
from django.utils.timezone import now
except ImportError:
import datetime
try:
# this is fallback for old versions of django
import pytz
from functools import partial
now = partial(datetime.datetime.now, tz=pytz.UTC)
except ImportError:
# if there is no pytz and this is old version of django, probably
# no one cares for timezones
now = datetime.datetime.now
if django.VERSION >= (1, 4):
from django.http import HttpResponse
class UnsafeRedirect(HttpResponse):
def __init__(self, url, *args, **kwargs):
super(UnsafeRedirect, self).__init__(*args, status=302, **kwargs)
self["Location"] = url
else:
from django.http import HttpResponseRedirect as UnsafeRedirect | daafgo/Server_LRS | oauth_provider/compat.py | Python | apache-2.0 | 1,738 |
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Editor.
"""
from nose.plugins.attrib import attr
from .test_studio_video_module import CMSVideoBaseTest
@attr('shard_2')
class VideoEditorTest(CMSVideoBaseTest):
"""
CMS Video Editor Test Class
"""
def setUp(self):
super(VideoEditorTest, self).setUp()
def _create_video_component(self, subtitles=False):
"""
Create a video component and navigate to unit page
Arguments:
subtitles (bool): Upload subtitles or not
"""
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def test_default_settings(self):
"""
Scenario: User can view Video metadata
Given I have created a Video component
And I edit the component
Then I see the correct video settings and default values
"""
self._create_video_component()
self.edit_component()
self.assertTrue(self.video.verify_settings())
def test_modify_video_display_name(self):
"""
Scenario: User can modify Video display name
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
Then I can modify video display name
And my video display name change is persisted on save
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Component Display Name', 'Transformers')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertTrue(self.video.verify_field_value('Component Display Name', 'Transformers'))
def test_hidden_captions(self):
"""
Scenario: Captions are hidden when "transcript display" is false
Given I have created a Video component with subtitles
And I have set "transcript display" to False
Then when I view the video it does not show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'False', 'select')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_shown_captions(self):
"""
Scenario: Captions are shown when "transcript display" is true
Given I have created a Video component with subtitles
And I have set "transcript display" to True
Then when I view the video it does show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'True', 'select')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
def test_translations_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "uk, zh" translations
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
def test_upload_large_transcript(self):
"""
Scenario: User can upload transcript file with > 1mb size
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "1mb_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('1mb_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
def test_translations_download_works_w_saving(self):
"""
Scenario: Translations downloading works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And video language menu has "uk, zh" translations
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_download_works_wo_saving(self):
"""
Scenario: Translations downloading works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_remove_works_w_saving(self):
"""
Scenario: Translations removing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
Then I remove translation for "zh" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_remove_works_wo_saving(self):
"""
Scenario: Translations removing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I see translations for "uk"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.assertEqual(self.video.translations(), ['uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_w_saving(self):
"""
Scenario: Translations clearing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_wo_saving(self):
"""
Scenario: Translations clearing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_cannot_upload_sjson_translation(self):
"""
Scenario: User cannot upload translations in sjson format
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "uk" language code
And I try to upload transcript file "subs_3_yD_cEKoCk.srt.sjson"
Then I see validation error "Only SRT files can be uploaded. Please select a file ending in .srt to upload."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('uk')
self.video.upload_asset('subs_3_yD_cEKoCk.srt.sjson', asset_type='transcript')
error_msg = 'Only SRT files can be uploaded. Please select a file ending in .srt to upload.'
self.assertEqual(self.video.upload_status_message, error_msg)
def test_replace_translation_w_save(self):
"""
Scenario: User can easy replace the translation by another one w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_replace_translation_wo_save(self):
"""
Scenario: User can easy replace the translation by another one w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_translation_upload_remove_upload(self):
"""
Scenario: Upload "zh" file "A" -> Remove "zh" -> Upload "zh" file "B"
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
Then I remove translation for "zh" language code
And I upload transcript file "uk_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.video.upload_translation('uk_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_select_language_twice(self):
"""
Scenario: User cannot select the same language twice
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "zh" language code
And I click button "Add"
Then I cannot choose "zh" language code
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.assertEqual(self.video.translations(), [u'zh', u''])
def test_table_of_contents(self):
"""
Scenario: User can see table of content at the first position
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|table |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "table, uk" translations
And I see video language with code "table" at position "0"
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'table')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), [u'table', u'uk'])
self.assertEqual(self.video.caption_languages.keys()[0], 'table')
def test_upload_transcript_with_BOM(self):
"""
Scenario: User can upload transcript file with BOM(Byte Order Mark) in it.
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts_with_BOM.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "莎拉·佩林 (Sarah Palin)" text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts_with_BOM.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "莎拉·佩林 (Sarah Palin)".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
| eestay/edx-platform | common/test/acceptance/tests/video/test_studio_video_editor.py | Python | agpl-3.0 | 22,804 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.service.AddressBook.scenario.base import BaseScenario
__all__ = ['GroupDeleteScenario']
class GroupDeleteScenario(BaseScenario):
def __init__(self, ab, callback, errback, group_guid=''):
"""Deletes a group from the address book.
@param ab: the address book service
@param callback: tuple(callable, *args)
@param errback: tuple(callable, *args)
@param group_guid: the guid of the group to delete"""
BaseScenario.__init__(self, 'GroupSave', callback, errback)
self.__ab = ab
self.group_guid = group_guid
def execute(self):
self.__ab.GroupDelete(self._callback, self._errback,
self._scenario, self.group_guid)
| billiob/papyon | papyon/service/AddressBook/scenario/groups/group_delete.py | Python | gpl-2.0 | 1,542 |
from cyder.cydns.views import CydnsDeleteView
from cyder.cydns.views import CydnsDetailView
from cyder.cydns.views import CydnsCreateView
from cyder.cydns.views import CydnsUpdateView
from cyder.cydns.views import CydnsListView
from cyder.cydns.cname.models import CNAME
from cyder.cydns.cname.forms import CNAMEForm
class CNAMEView(object):
model = CNAME
form_class = CNAMEForm
queryset = CNAME.objects.all().order_by('fqdn')
class CNAMEDeleteView(CNAMEView, CydnsDeleteView):
""" """
class CNAMEDetailView(CNAMEView, CydnsDetailView):
""" """
template_name = "cname/cname_detail.html"
class CNAMECreateView(CNAMEView, CydnsCreateView):
""" """
class CNAMEUpdateView(CNAMEView, CydnsUpdateView):
""" """
class CNAMEListView(CNAMEView, CydnsListView):
""" """
| ngokevin/cyder | cyder/cydns/cname/views.py | Python | bsd-3-clause | 808 |
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from flask import (Blueprint, request, current_app, session, url_for, redirect,
render_template, g, flash, abort)
from flask_babel import gettext
from sqlalchemy.sql.expression import false
import store
from db import db
from models import Source, SourceStar, Submission, Reply
from journalist_app.forms import ReplyForm
from journalist_app.utils import (validate_user, bulk_delete, download,
confirm_bulk_delete, get_source)
def make_blueprint(config):
view = Blueprint('main', __name__)
@view.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
user = validate_user(request.form['username'],
request.form['password'],
request.form['token'])
if user:
current_app.logger.info("'{}' logged in with the token {}"
.format(request.form['username'],
request.form['token']))
# Update access metadata
user.last_access = datetime.utcnow()
db.session.add(user)
db.session.commit()
session['uid'] = user.id
return redirect(url_for('main.index'))
return render_template("login.html")
@view.route('/logout')
def logout():
session.pop('uid', None)
session.pop('expires', None)
return redirect(url_for('main.index'))
@view.route('/org-logo')
def select_logo():
if os.path.exists(os.path.join(current_app.static_folder, 'i',
'custom_logo.png')):
return redirect(url_for('static', filename='i/custom_logo.png'))
else:
return redirect(url_for('static', filename='i/logo.png'))
@view.route('/')
def index():
unstarred = []
starred = []
# Long SQLAlchemy statements look best when formatted according to
# the Pocoo style guide, IMHO:
# http://www.pocoo.org/internal/styleguide/
sources = Source.query.filter_by(pending=False) \
.filter(Source.last_updated.isnot(None)) \
.order_by(Source.last_updated.desc()) \
.all()
for source in sources:
star = SourceStar.query.filter_by(source_id=source.id).first()
if star and star.starred:
starred.append(source)
else:
unstarred.append(source)
source.num_unread = len(
Submission.query.filter_by(source_id=source.id,
downloaded=False).all())
return render_template('index.html',
unstarred=unstarred,
starred=starred)
@view.route('/reply', methods=('POST',))
def reply():
"""Attempt to send a Reply from a Journalist to a Source. Empty
messages are rejected, and an informative error message is flashed
on the client. In the case of unexpected errors involving database
transactions (potentially caused by racing request threads that
modify the same the database object) logging is done in such a way
so as not to write potentially sensitive information to disk, and a
generic error message is flashed on the client.
Returns:
flask.Response: The user is redirected to the same Source
collection view, regardless if the Reply is created
successfully.
"""
form = ReplyForm()
if not form.validate_on_submit():
for error in form.message.errors:
flash(error, "error")
return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
g.source.interaction_count += 1
filename = "{0}-{1}-reply.gpg".format(g.source.interaction_count,
g.source.journalist_filename)
current_app.crypto_util.encrypt(
form.message.data,
[current_app.crypto_util.getkey(g.filesystem_id),
config.JOURNALIST_KEY],
output=current_app.storage.path(g.filesystem_id, filename),
)
reply = Reply(g.user, g.source, filename)
try:
db.session.add(reply)
db.session.commit()
store.async_add_checksum_for_file(reply)
except Exception as exc:
flash(gettext(
"An unexpected error occurred! Please "
"inform your admin."), "error")
# We take a cautious approach to logging here because we're dealing
# with responses to sources. It's possible the exception message
# could contain information we don't want to write to disk.
current_app.logger.error(
"Reply from '{}' (ID {}) failed: {}!".format(g.user.username,
g.user.id,
exc.__class__))
else:
flash(gettext("Thanks. Your reply has been stored."),
"notification")
finally:
return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
@view.route('/flag', methods=('POST',))
def flag():
g.source.flagged = True
db.session.commit()
return render_template('flag.html', filesystem_id=g.filesystem_id,
codename=g.source.journalist_designation)
@view.route('/bulk', methods=('POST',))
def bulk():
action = request.form['action']
doc_names_selected = request.form.getlist('doc_names_selected')
selected_docs = [doc for doc in g.source.collection
if doc.filename in doc_names_selected]
if selected_docs == []:
if action == 'download':
flash(gettext("No collections selected for download."),
"error")
elif action in ('delete', 'confirm_delete'):
flash(gettext("No collections selected for deletion."),
"error")
return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
if action == 'download':
source = get_source(g.filesystem_id)
return download(source.journalist_filename, selected_docs)
elif action == 'delete':
return bulk_delete(g.filesystem_id, selected_docs)
elif action == 'confirm_delete':
return confirm_bulk_delete(g.filesystem_id, selected_docs)
else:
abort(400)
@view.route('/regenerate-code', methods=('POST',))
def regenerate_code():
original_journalist_designation = g.source.journalist_designation
g.source.journalist_designation = current_app.crypto_util.display_id()
for item in g.source.collection:
item.filename = current_app.storage.rename_submission(
g.filesystem_id,
item.filename,
g.source.journalist_filename)
db.session.commit()
flash(gettext(
"The source '{original_name}' has been renamed to '{new_name}'")
.format(original_name=original_journalist_designation,
new_name=g.source.journalist_designation),
"notification")
return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
@view.route('/download_unread/<filesystem_id>')
def download_unread_filesystem_id(filesystem_id):
id = Source.query.filter(Source.filesystem_id == filesystem_id) \
.one().id
submissions = Submission.query.filter(
Submission.source_id == id,
Submission.downloaded == false()).all()
if submissions == []:
flash(gettext("No unread submissions for this source."))
return redirect(url_for('col.col', filesystem_id=filesystem_id))
source = get_source(filesystem_id)
return download(source.journalist_filename, submissions)
return view
| ehartsuyker/securedrop | securedrop/journalist_app/main.py | Python | agpl-3.0 | 8,317 |
# Zadání:
#########
#
# Program načte kladné reálné číslo (označme ho x) a poté načte další kladné
# reálné číslo (označme ho y).
# Program vypočte logaritmus čísla x o základu y metodou půlení intervalu.
# Výsledek nalezněte s přesností na 8 desetinných míst (výpočet ukončíte
# pokud |x1−x2|<0.000000001.
# Použití nějaké knihovní funkce (např. math.log) není dovoleno. Program
# volající cizí funkce nebude hodnocen.
#
# Pro nastavení krajních mezí platí následující pomůcky:
# - pokud je y>1 a x>1, pak levá mez může být 0
# - pokud je y>e a x>1, pak víme, že y^k>k+1, tedy pravá mez může být x
# - pokud je e>y>1 a x>1, pak víme, že y^k > k*ln(y)+1 (derivace v bodě 0 je
# ln(y)), tedy pravá mez může být x/ln(y) a pro e>y>1 můžeme použít
# ln(y) > (y−1)/(e−1)
# - pokud je x<1 pak víme, že 1/x > 1 a 1/(y^k) = y^(−k), tím pádem můžeme
# použít záporné hodnoty mezí zjištěné pro hodnotu 1/x, tedy pro výše
# uvedené případy pravá mez rovna 0 a levá mez rovna −1/x, resp.
# (−1/x) * ( (e−1)/(y−1) )
# - pokud je y<1, pak (1/y)^k = y^(−k). Tedy zjistíme meze pro základ 1/y
# (což jsme uvedli výše) a tyto meze vynásobíme −1.
###############################################################################
x = float(input()) # log
y = float(input()) # zaklad
# Funkce pro výpočet logaritmu
def log(z, base, precision = 10 ** -9):
lower_limit = 0.0
upper_limit = z
e = 2.7182818284
# Kontrola mezí.
# Hodnoty jsou výsledkem několika hodin metody pokus-omyl.
if base < 0 or base == 1:
raise ValueError('Logaritmus není pro záporný základ, nebo základ roven 1 definován')
if (base < e) and (base > 1.0) and (z > 1.0):
upper_limit = (z - 1) / ((base -1)/ e )
elif z < 1.0:
upper_limit = 0.0
lower_limit = (-1/z) * ((e-1)/(base-1))
if base < 1.0:
upper_limit = 0
lower_limit = (1/z)
if z > 1.0 and (base < 1.0):
upper_limit = -(1/base) * z
if (base < 1.0) and (z < 1.0):
upper_limit = 150
lower_limit = -150
logarithm = 0.0
# Výpočet samotného logaritmu
while abs(lower_limit - upper_limit) > precision:
if (base < 1.0) and (z < 1.0):
logarithm = abs( (lower_limit + upper_limit)/2 )
else:
logarithm = (lower_limit + upper_limit) / 2
if base ** logarithm < z:
lower_limit = logarithm
else:
upper_limit = logarithm
return logarithm
# Výpis logaritmu v požadovaném formátu
print("{0:.9f}".format(round( log(x,y), 9)))
| malja/cvut-python | cviceni02/log.py | Python | mit | 2,700 |
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ['number of groups', 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == '-n':
n = int(a)
elif o == '-f':
f = a
def random_string(prefix, maxlen):
symbol = string.digits + string.ascii_letters + ' '
return prefix + "".join([random.choice(symbol) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + \
[
Group(name=random_string('name', 10), header=random_string('header', 20), footer=random_string('footer', 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, 'w') as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| Exesium/python_training | generator/group.py | Python | gpl-3.0 | 984 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Document.public'
db.add_column('mediaman_document', 'public', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Document.public'
db.delete_column('mediaman_document', 'public')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 23, 11, 14, 50, 225387)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 23, 11, 14, 50, 225311)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.accessstatus': {
'Meta': {'object_name': 'AccessStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.acquisitionmethod': {
'Meta': {'object_name': 'AcquisitionMethod'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'preposition': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'cat.artefacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ArtefactType'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'see_also': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
'cat.category': {
'Meta': {'ordering': "['parent__name', 'name']", 'unique_together': "(('slug', 'parent'), ('name', 'parent'))", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cat.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'suggested_artefact_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'categories'", 'null': 'True', 'to': "orm['cat.ArtefactType']"})
},
'cat.culturalbloc': {
'Meta': {'ordering': "['name']", 'object_name': 'CulturalBloc'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'})
},
'cat.functionalcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalCategory'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.loanstatus': {
'Meta': {'object_name': 'LoanStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.museumobject': {
'Meta': {'ordering': "['registration_number']", 'object_name': 'MuseumObject'},
'access_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AccessStatus']", 'null': 'True'}),
'acquisition_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'acquisition_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AcquisitionMethod']", 'null': 'True'}),
'artefact_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'artefact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.ArtefactType']"}),
'assoc_cultural_group': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cat.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'category_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'circumference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'collector': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'collector_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cultural_bloc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.CulturalBloc']", 'null': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'donor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'donor_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'exhibition_history': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'functional_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.FunctionalCategory']", 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'how_collector_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collector_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_donor_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'donor_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_source_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indigenous_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'is_public_comment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'loan_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.LoanStatus']", 'null': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'maker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['parties.Maker']", 'null': 'True', 'blank': 'True'}),
'manufacture_technique': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'old_registration_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.Place']", 'null': 'True'}),
'private_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'raw_material': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'record_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.RecordStatus']", 'null': 'True', 'blank': 'True'}),
'recorded_use': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'reg_counter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'reg_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'registered_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['parties.MuseumStaff']", 'null': 'True'}),
'registration_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'registration_number': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['mediaman.Document']", 'null': 'True', 'blank': 'True'}),
'significance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site_name_number': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'storage_bay': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_section': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_shelf_box_drawer': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_unit': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'when_collector_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'when_donor_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cat.obtained': {
'Meta': {'object_name': 'Obtained'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'how': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cat.recordstatus': {
'Meta': {'object_name': 'RecordStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'location.place': {
'Meta': {'ordering': "['id']", 'object_name': 'Place'},
'australian_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'mediaman.artefactrepresentation': {
'Meta': {'ordering': "['position']", 'object_name': 'ArtefactRepresentation'},
'artefact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.MuseumObject']"}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'mediaman.document': {
'Meta': {'object_name': 'Document'},
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'document_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'parties.maker': {
'Meta': {'ordering': "['name']", 'object_name': 'Maker'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.museumstaff': {
'Meta': {'ordering': "['name']", 'object_name': 'MuseumStaff'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_people'", 'blank': 'True', 'to': "orm['mediaman.Document']"})
}
}
complete_apps = ['mediaman']
| uq-eresearch/uqam | mediaman/migrations/0006_auto__add_field_document_public.py | Python | bsd-3-clause | 20,584 |
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
import numpy
from pyscf import gto, scf
'''
Concatenate two molecule enviroments
We need the integrals from different bra and ket space, eg to prepare
initial guess from different geometry or basis, or compute the
transition properties between two states. To access These integrals, we
need concatenate the enviroments of two Mole object.
This method can be used to generate the 3-center integrals for RI integrals.
'''
mol1 = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
mol2 = gto.M(
verbose = 0,
atom = 'H 0 1 0; H 1 0 0',
basis = 'ccpvdz'
)
atm3, bas3, env3 = gto.conc_env(mol1._atm, mol1._bas, mol1._env,
mol2._atm, mol2._bas, mol2._env)
nao1 = mol1.nao_nr()
nao2 = mol2.nao_nr()
s12 = numpy.empty((nao1,nao2))
pi = 0
for i in range(mol1.nbas):
pj = 0
for j in range(mol1.nbas, mol1.nbas+mol2.nbas):
shls = (i, j)
buf = gto.moleintor.getints_by_shell('cint1e_ovlp_sph',
shls, atm3, bas3, env3)
di, dj = buf.shape
s12[pi:pi+di,pj:pj+dj] = buf
pj += dj
pi += di
print('<mol1|mol2> overlap shape %s' % str(s12.shape))
#
# 3-center and 2-center 2e integrals for density fitting
#
mol = mol1
auxmol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'weigend'
)
nao = mol.nao_nr()
naoaux = auxmol.nao_nr()
atm, bas, env = \
gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
eri3c = numpy.empty((nao,nao,naoaux))
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j, k)
buf = gto.moleintor.getints_by_shell('cint3c2e_sph',
shls, atm, bas, env)
di, dj, dk = buf.shape
eri3c[pi:pi+di,pj:pj+dj,pk:pk+dk] = buf
pk += dk
pj += dj
pi += di
eri2c = numpy.empty((naoaux,naoaux))
pi = 0
for i in range(mol.nbas, mol.nbas+auxmol.nbas):
pj = 0
for j in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j)
buf = gto.moleintor.getints_by_shell('cint2c2e_sph',
shls, atm, bas, env)
di, dj = buf.shape
eri2c[pi:pi+di,pj:pj+dj] = buf
pj += dj
pi += di
#
# Density fitting Hartree-Fock
#
def get_vhf(mol, dm, *args, **kwargs):
naux = eri2c.shape[0]
rho = numpy.einsum('ijp,ij->p', eri3c, dm)
rho = numpy.linalg.solve(eri2c, rho)
jmat = numpy.einsum('p,ijp->ij', rho, eri3c)
kpj = numpy.einsum('ijp,jk->ikp', eri3c, dm)
pik = numpy.linalg.solve(eri2c, kpj.reshape(-1,naux).T).reshape(-1,nao,nao)
kmat = numpy.einsum('pik,kjp->ij', pik, eri3c)
return jmat - kmat * .5
mf = scf.RHF(mol)
mf.verbose = 0
mf.get_veff = get_vhf
print('E(DF-HF) = %.12f, ref = %.12f' % (mf.kernel(), scf.density_fit(mf).kernel()))
| gkc1000/pyscf | examples/gto/21-concatenate_molecules.py | Python | apache-2.0 | 3,094 |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from utils import *
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = DefaultDict([])
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S = 'NP VP | S Conjunction S',
NP = 'Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP = 'Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP = 'Preposition NP',
RelClause = 'That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun = "stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb = "is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective = "right | left | east | south | back | smelly",
Adverb = "here | there | nearby | ahead | right | left | east | south | back",
Pronoun = "me | you | I | it",
Name = "John | Mary | Boston | Aristotle",
Article = "the | a | an",
Preposition = "to | in | on | near",
Conjunction = "and | or | but",
Digit = "0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That = "that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S = 'NP VP',
NP = 'Art N | Pronoun',
VP = 'V NP'),
Lexicon(
Art = 'the | a',
N = 'man | woman | table | shoelace | saw',
Pronoun = 'I | you | it',
V = 'saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP = 'Adj NP | N'),
Lexicon(Adj = 'happy | handsome | hairy',
N = 'man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print '%10s: added %s' % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
#### TODO:
#### 1. Parsing with augmentations -- requires unification, etc.
#### 2. Sequitor
__doc__ += """
>>> chart = Chart(E0)
>>> chart.parses('the wumpus that is smelly is near 2 2')
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
### There is a built-in trace facility (compare [Fig. 22.9])
>>> Chart(E_, trace=True).parses('I feel it')
parse: added [0, 0, 'S_', [], ['S']]
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
predictor: added [0, 0, 'NP', [], ['Pronoun']]
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
predictor: added [2, 2, 'NP', [], ['Pronoun']]
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
"""
| ttalviste/aima | aima/nlp.py | Python | mit | 8,297 |
#!/usr/bin/env python3
import unittest
import itertools
import string
import random
import strjump
class TestCase(unittest.TestCase):
def test_compiler1(self):
string = [
'1:',
strjump.Reference(1),
',2:',
strjump.Reference(2),
'|',
strjump.Identifier(1, 'f'),
'irst|',
strjump.Identifier(2, 's'),
'econd|'
]
#print("String to process:", '"' + strjump.tools.repr_string(string) + '"')
result = strjump.process(string)
#print("Result:", '"%s"' % result)
self.assertEqual('1:9,2:15|first|second|', result)
def test_compiler2(self):
string = [
'1:',
strjump.Identifier(1, 'f'),
'irst,2:',
strjump.Reference(2),
',|1:',
strjump.Reference(1),
',|',
strjump.Identifier(2, 's'),
'econd|'
]
#print("String to process:", '"' + strjump.tools.repr_string(string) + '"')
result = strjump.process(string)
#print("Result:", result)
self.assertEqual('1:first,2:19,|1:2,|second|', result)
#def test_compiler3(self):
# for _ in range(1000):
# n = 150
# base = string.ascii_lowercase + string.digits
# base = ''.join(base[random.randint(0, 35)] for i in range(n))
# i0 = random.randint(5, n / 2) - 1
# i1 = i0 + random.randint(5, n / 2) - 1
# if i1 == 10 ** (len(str(i1)) - 1):
# i1 -= 1
# ref = str(i1)
# lref = len(ref)
# lst = [base[:i0], strjump.Reference(1), base[i0 + lref:i1],
# strjump.Identifier(1, base[i1]), base[i1 + 1:]]
# base = base[:i0] + ref + base[i0 + lref:]
# #print("String to process:", '"' + strjump.tools.repr_string(lst) + '"')
# rst = strjump.process(lst)
# self.assertEqual(base, rst)
#
#def test_compiler4(self):
# for _ in range(1000):
# n = 150
# base = string.ascii_lowercase + string.digits
# base = ''.join(base[random.randint(0, 35)] for i in range(n))
# i1 = random.randint(5, n / 2) - 1
# if i1 == 10 ** (len(str(i1)) - 1):
# i1 -= 1
# i0 = i1 + random.randint(5, n / 2) - 1
# ref = str(i1)
# lref = len(ref)
# lst = [base[:i1], strjump.Identifier(1, base[i1]), base[i1 + 1:i0],
# strjump.Reference(1), base[i0 + lref:]]
# base = base[:i0] + ref + base[i0 + lref:]
# #print("String to process:", '"' + strjump.tools.repr_string(lst) + '"')
# rst = strjump.process(lst)
# self.assertEqual(base, rst)
def test_compiler5(self):
lst = ["3:",
strjump.Reference('3-1277'),
",|",
strjump.Identifier('3-1277', '-'),
"a:",
strjump.Reference('3-1280'),
",-b:",
strjump.Reference('3-1279'),
",-n:",
strjump.Reference('3-1278'),
",-t:",
strjump.Reference('3-1281'),
",;;;;|",
strjump.Identifier('3-1278', '-'),
"m:",
strjump.Reference('3-1284'),
",;;;;|",
strjump.Identifier('3-1279',';'),
";;;|",
strjump.Identifier('3-1280',';'),
";;",
strjump.Reference('3!1'),
";|",
strjump.Identifier('3-1281', '-'),
"f:",
strjump.Reference('3-1283'),
",-g:",
strjump.Reference('3-1282'),
",;;;;|",
strjump.Identifier('3-1282', ';'),
";;",
strjump.Reference('3!4'),
";|",
strjump.Identifier('3-1283', ';'),
";;",
strjump.Reference('3!5'),
";|",
strjump.Identifier('3-1284', '-'),
"j:",
strjump.Reference('3-1285'),
",;;",
strjump.Reference('3-1286'),
";;|",
strjump.Identifier('3-1285', ';'),
";;",
strjump.Reference('3!3'),
";|",
strjump.Identifier('3-1286', ';'),
";;",
strjump.Reference('3!2'),
";|",
strjump.Identifier('3!0', '-'),
"b,-c,d,|",
strjump.Identifier('3!1', '-'),
"a,-c,d,|",
strjump.Identifier('3!2', '-'),
"n,-m,i,|",
strjump.Identifier('3!3', '-'),
"n,-m,-j,|",
strjump.Identifier('3!4', '-'),
"t,-g,i,|",
strjump.Identifier('3!5', '-'),
"t,-f,i,|"]
#print("String to process:", '"' + strjump.tools.repr_string(lst) + '"')
result = strjump.process(lst)
#print("Result:", result)
ref = "3:5,|-a:50,-b:45,-n:34,-t:58,;;;;|-m:91,;;;;|;;;;|;;;131;|-f:83,-g:75,;;;;|;;;159;|;;;168;|-j:106,;;114;;|;;;149;|;;;140;|-b,-c,d,|-a,-c,d,|-n,-m,i,|-n,-m,-j,|-t,-g,i,|-t,-f,i,|"
self.assertEqual(result, ref)
| ffunenga/strjump | tests/test_compiler.py | Python | mit | 4,964 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GraphFn(self, x):
dtype = x.dtype
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
return array_ops.identity(x, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[100, 4]], [[100, 10]])
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
def GetMaxBatchSize(self, run_params):
"""Returns the max_batch_size that the converter should use for tests."""
if run_params.dynamic_engine:
return None
return 100
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
class CalibrationInt32Support(trt_test.TfTrtIntegrationTestBase):
"""Test execution of calibration with int32 input"""
def GraphFn(self, inp):
# Can use any op that is converted to TRT with int32 inputs
inp_transposed = array_ops.transpose(inp, [0, 3, 2, 1], name='transpose_0')
return array_ops.identity(inp_transposed, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[3, 4, 5, 6]],
[[3, 6, 5, 4]])
def ShouldRunTest(self, run_params):
# Although test passes with all configurations but only
# execute INT8 with use_calibration=True because
# that is the purpose of the test.
return trt_test.IsQuantizationWithCalibration(
run_params), 'test calibration and INT8'
def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_0']
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/compiler/tensorrt/test/int32_test.py | Python | apache-2.0 | 3,216 |
# PAY.JP Python bindings
# Configuration variables
api_key = None
api_base = 'https://api.pay.jp'
api_version = None
# TODO include Card?
__all__ = ['Account', 'Card', 'Charge', 'Customer', 'Event', 'Plan', 'Subscription', 'Token', 'Transfer']
# Resource
from payjp.resource import ( # noqa
Account, Charge, Customer, Event, Plan, Subscription, Token, Transfer)
| moriyoshi/payjp-python | payjp/__init__.py | Python | mit | 372 |
#coding=utf-8
from datetime import datetime
import random
import numpy as np
import matplotlib.pyplot as plt
INF = 1000
BOUNDVAL = 1000
PATHLINE = []
def inf_set(cost_matrix,n):
for i in xrange(n):
for j in xrange(n):
if cost_matrix[i,j] > 900:
cost_matrix[i,j] = INF
def initial_cost_matrix(n):
random_val = [random.randint(0,100) for i in range(n**2)]
cost_matrix = np.array(random_val).reshape(n,n)
inf_set(cost_matrix,n)
return cost_matrix
def checkout_matrix(c_matrix,n):
cost_matrix = np.array([0]*n**2).reshape(n,n)
for i in xrange(n):
for j in xrange(n):
cost_matrix[i,j] = c_matrix[i,j]
#print "==> before the checkout <=="
#print cost_matrix
tmp_val = 0
for idx in xrange(n):
min_num = min(cost_matrix[idx,:])
if min_num != 0 and min_num != INF:
cost_matrix[idx,:] -= np.array([min_num]*n)
inf_set(cost_matrix,n)
tmp_val += min_num
for idx in xrange(n):
min_num = min(cost_matrix[:,idx])
if min_num != 0 and min_num != INF:
cost_matrix[:,idx] -= np.array([min_num]*n)
inf_set(cost_matrix,n)
tmp_val += min_num
#print "==> after the checkout <=="
#print cost_matrix
return cost_matrix,tmp_val
def check_path(pathline,i,j):
#test is there a circle?!
path_dict = {}
for idx in pathline:
path_dict[idx[1]] = idx[2]
path_dict[i] = j
for idx in path_dict:
Head = idx
tmp = path_dict[Head]
while tmp in path_dict:
tmp = path_dict[tmp]
if tmp == Head:
return False
return True
#get the 0's pos i row j col 's min's sum's max.
def get_the_edge(cost_matrix,n,path_line):
tmp_edge = tuple([-1,-1,-1])
for i_row in xrange(n):
for j_col in xrange(n):
if cost_matrix[i_row,j_col] == 0:
tmp_row = [cost_matrix[i_row,idx] for idx in xrange(n) if idx != j_col]
tmp_col = [cost_matrix[idx,j_col] for idx in xrange(n) if idx != i_row]
tmp_val = min(tmp_row) + min(tmp_col)
if tmp_val > tmp_edge[0] and tmp_val < INF and check_path(path_line,i_row,j_col):
tmp_edge = tuple([tmp_val,i_row,j_col])
if tmp_val > INF and check_path(path_line,i_row,j_col):
tmp_edge = tuple([cost_matrix[i_row,j_col],i_row,j_col])
return tmp_edge
def adjust_matrix(c_matrix,cut_edge,is_in,no_edge,n):
cost_matrix = np.array([0]*n**2).reshape(n,n)
for i in xrange(n):
for j in xrange(n):
cost_matrix[i,j] = c_matrix[i,j]
i = cut_edge[1]
j = cut_edge[2]
if is_in:
cost_matrix[j,i] = INF
cost_matrix[i,:] = np.array([INF]*n)
cost_matrix[:,j] = np.array([INF]*n)
if no_edge == 0:
cost_matrix[:,i] = np.array([INF]*n)
else:
cost_matrix[i,j] = INF
return cost_matrix
def update_path(tmp_path,base_val):
global BOUNDVAL,PATHLINE
PATHLINE = [x for x in tmp_path]
BOUNDVAL = base_val
tmp = [idx[1:] for idx in PATHLINE]
print "Now edge is : %s , The Bound is : %s"%(tmp,BOUNDVAL)
def get_final_path(tmp_path,cost_matrix):
path_dict = {}
point_line = []
#path_val = cost_matrix[tmp_path[0][1]][tmp_path[0][2]]
for idx in tmp_path:
path_dict[idx[1]] = idx[2]
end_pos = tmp_path[0][1]
point_line.append(end_pos)
str_pos = path_dict[end_pos]
while path_dict[str_pos] in path_dict:
#path_val += cost_matrix[str_pos][path_dict[str_pos]]
point_line.append(str_pos)
str_pos = path_dict[str_pos]
point_line.append(str_pos)
#path_val += cost_matrix[str_pos][path_dict[str_pos]]
str_pos = path_dict[str_pos]
#str_pos = path_dict[str_pos]
point_line.append(str_pos)
point_line.append(end_pos)
#path_val += cost_matrix[str_pos][end_pos]
str_list = ' --> '.join(str(x) for x in point_line)
print "The Final line is : %s"%str_list
def build_node(b_v,cost_matrix,path_line,n_e,n):
global BOUNDVAL,PATHLINE
no_edge = n_e
base_val = b_v
tmp_out_path = [x for x in path_line]
tmp_in_path = [x for x in path_line]
if no_edge == 0:
if cost_matrix.min() == INF:
return -1
cost_matrix,tmp_val = checkout_matrix(cost_matrix,n)
base_val += tmp_val
cut_edge = get_the_edge(cost_matrix,n,path_line)
if cut_edge[0] == -1 or cut_edge[1] == cut_edge[2]:
return -1
tmp_in_path.append(cut_edge)
in_cmatrix = adjust_matrix(cost_matrix,cut_edge,True,no_edge,n)
build_node(base_val,in_cmatrix,tmp_in_path,no_edge+1,n)
#get the right i,j add value.
out_cmatrix = adjust_matrix(cost_matrix,cut_edge,False,no_edge,n)
build_node(base_val+cut_edge[0],out_cmatrix,tmp_out_path,no_edge,n)
elif no_edge < n-1:
cost_matrix,tmp_val = checkout_matrix(cost_matrix,n)
base_val += tmp_val
if base_val > BOUNDVAL:
return -1
if cost_matrix.min() == INF:
return -1
else:
cut_edge = get_the_edge(cost_matrix,n,path_line)
if cut_edge[0] == -1 or cut_edge[1] == cut_edge[2]:
return -1
tmp_in_path.append(cut_edge)
in_cmatrix = adjust_matrix(cost_matrix,cut_edge,True,no_edge,n)
build_node(base_val,in_cmatrix,tmp_in_path,no_edge+1,n)
out_cmatrix = adjust_matrix(cost_matrix,cut_edge,False,no_edge,n)
build_node(base_val+cut_edge[0],out_cmatrix,tmp_out_path,no_edge,n)
else:
if base_val < BOUNDVAL:
print "Once Update the BOUNDVAL and PATHLINE"
update_path(tmp_in_path,base_val)
return 1
else:
return -1
if __name__ == "__main__":
time_list = []
for n in range(5,20):
path_line = []
base_val = 0
no_edge = 0
cost_matrix = initial_cost_matrix(n)
print "==> Initial the cost matrix with the N : %s <=="%n
start = datetime.now()
build_node(base_val,cost_matrix,path_line,no_edge,n)
get_final_path(PATHLINE,cost_matrix)
PATHLINE = ''
BOUNDVAL = ''
times = datetime.now()-start
seconds = times.microseconds + times.seconds
time_list.append(tuple([n,seconds]))
plt.figure(1)
for idx in time_list:
plt.plot(idx[0],idx[1],'ro')
plt.show()
| CharLLCH/work-for-py | alg-train/hamilton-train3/hamilton.py | Python | gpl-2.0 | 6,538 |
# Stairbuilder - Stringer generation
#
# Generates stringer mesh for stair generation.
# Stair Type (typ):
# - id1 = Freestanding staircase
# - id2 = Housed-open staircase
# - id3 = Box staircase
# - id4 = Circular staircase
# Stringer Type (typ_s):
# - sId1 = Classic
# - sId2 = I-Beam
# - sId3 = C-Beam
#
# Paul "BrikBot" Marshall
# Created: September 19, 2011
# Last Modified: January 29, 2011
# Homepage (blog): http://post.darkarsenic.com/
# //blog.darkarsenic.com/
#
# Coded in IDLE, tested in Blender 2.61.
# Search for "@todo" to quickly find sections that need work.
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# Stairbuilder is for quick stair generation.
# Copyright (C) 2011 Paul Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from math import atan, cos, radians, tan
from mathutils import Matrix, Vector
from mathutils.geometry import (intersect_line_plane,
intersect_line_line)
class Stringer:
def __init__(self,G,typ,typ_s,rise,run,w,h,nT,hT,wT,tT,tO,tw,tf,tp,g,
nS=1,dis=False,notMulti=True,deg=4):
self.G = G #General
self.typ = typ # Stair type
self.typ_s = typ_s # Stringer type
self.rise = rise #Stair rise
self.run = run #Stair run. Degrees if self.typ == "id4"
if notMulti:
self.w = w / 100 #stringer width
else:
self.w = (wT * (w / 100)) / nS
self.h = h #stringer height
self.nT = nT #number of treads
self.hT = hT #tread height
self.wT = wT #tread width
self.tT = tT #tread toe
self.tO = tO #Tread overhang. Inner radius if self.typ == "id4"
self.tw = self.w * (tw / 100) #stringer web thickness
self.tf = tf #stringer flange thickness
self.tp = 1 - (tp / 100) #stringer flange taper
self.g = g #does stringer intersect the ground?
self.nS = nS #number of stringers
self.dis = dis #Use distributed stringers
self.deg = deg #number of sections per "slice". Only applys if self.typ == "id4"
# Default stringer object (classic / sId1):
self.faces1=[[0,1,3,2],[1,5,3],[3,5,4],[6,7,9,8],[7,11,9],[9,11,10],
[0,2,8,6],[0,1,7,6],[1,5,11,7],[2,3,9,8],[3,4,10,9],[4,5,11,10]]
# Box stair type stringer:
self.faces2=[[0,1,7,6],[1,3,9,7],[3,4,10,9],[4,10,11,5],[5,11,8,2],
[2,8,6,0],[0,1,2],[1,2,5,3],[3,4,5],[6,7,8],[7,8,11,9],[9,10,11]]
# I-beam stringer (id2 / sId2 / Taper < 100%):
self.faces3a=[[0,1,17,16],[1,2,18,17],[2,3,19,18],[3,4,20,19],[4,5,21,20],[5,6,22,21],
[6,7,23,22],[7,8,24,23],[8,9,25,24],[9,10,26,25],[10,11,27,26],
[11,12,28,27],[12,13,29,28],[13,14,30,29],[14,15,31,30],[15,0,16,31],
[0,1,2,15],[2,11,14,15],[11,12,13,14],[2,3,10,11],[3,4,5,6],[3,6,7,10],
[7,8,9,10],[16,17,18,31],[18,27,30,31],[27,28,29,30],[18,19,26,27],
[19,20,21,22],[19,22,23,26],[23,24,25,26]]
# I-beam stringer (id2 / sId2 / Taper = 100%):
self.faces3b=[[0,1,9,8],[1,2,10,9],[2,3,11,10],[3,4,12,11],[4,5,13,12],[5,6,14,13],
[6,7,15,14],[7,0,8,15],[0,1,6,7],[1,2,5,6],[2,3,4,5],[8,9,14,15],
[9,10,13,14],[10,11,12,13]]
# I-beam stringer (id3 / sId2 / Taper < 100%):
self.faces3c=[[0,1,2,7],[2,3,6,7],[3,4,5,6],[1,2,23,16],[2,3,22,23],
[3,4,21,22],[16,17,18,23],[18,19,22,23],[19,20,21,22],
[17,8,15,18],[18,15,14,19],[19,14,13,20],[8,9,10,15],
[10,11,14,15],[11,12,13,14],[9,10,53,52],[10,11,54,53],
[11,12,55,54],[52,53,61,60],[53,54,62,61],[54,55,63,62],
[60,61,34,33],[61,62,35,34],[62,63,36,35],[32,33,34,39],
[34,35,38,39],[35,36,37,38],[41,32,39,42],[42,39,38,43],
[43,38,37,44],[40,41,42,47],[42,43,46,47],[43,44,45,46],
[25,26,47,40],[26,27,46,47],[27,28,45,46],[24,25,26,31],
[26,27,30,31],[27,28,29,30],[24,31,57,56],[31,30,58,57],
[30,29,59,58],[48,49,57,56],[49,50,58,57],[50,51,59,58],
[0,7,49,48],[7,6,50,49],[6,5,51,50],[0,1,16,48],[16,40,56,48],
[24,25,40,56],[16,17,41,40],[8,9,52,17],[17,52,60,41],
[32,33,60,41],[12,13,20,55],[20,44,63,55],[37,44,63,36],
[20,21,45,44],[28,29,51,21],[21,51,59,45],[28,45,59,29],
[4,5,51,21]]
# C-beam stringer (id3 / sId3 / Taper < 100%):
self.faces4c=[[0,1,2,7],[2,3,6,7],[3,4,5,6],[1,2,23,16],[2,3,22,23],[3,4,21,22],
[16,17,18,23],[18,19,22,23],[19,20,21,22],[17,8,15,18],[18,15,14,19],
[19,14,13,20],[8,9,10,15],[10,11,14,15],[11,12,13,14],[0,24,25,7],
[7,25,26,6],[6,26,27,5],[9,31,30,10],[10,30,29,11],[11,29,28,12],
[24,25,30,31],[25,26,29,30],[26,27,28,29],[0,1,16,24],[16,24,31,17],
[8,9,31,17],[4,5,27,21],[20,21,27,28],[12,13,20,28]]
self.Create()
def Create(self):
if self.typ == "id1":
if self.typ_s == "sId1":
if self.dis or self.nS == 1:
offset = (self.wT / (self.nS + 1)) - (self.w / 2)
else:
offset = 0
for i in range(self.nS):
for j in range(self.nT):
coords = []
coords.append(Vector([0, offset, -self.rise]))
coords.append(Vector([self.run, offset, -self.rise]))
coords.append(Vector([0, offset, -self.hT]))
coords.append(Vector([self.run, offset, -self.hT]))
coords.append(Vector([self.run, offset, 0]))
coords.append(Vector([self.run * 2, offset, 0]))
for k in range(6):
coords.append(coords[k]+Vector([0, self.w, 0]))
for k in coords:
k += j*Vector([self.run, 0, self.rise])
self.G.Make_mesh(coords,self.faces1,'stringer')
if self.dis or self.nS == 1:
offset += self.wT / (self.nS + 1)
else:
offset += (self.wT - self.w) / (self.nS - 1)
elif self.typ_s == "sId2":
self.I_beam()
elif self.typ == "id2":
if self.typ_s == "sId1":
coords = []
coords.append(Vector([-self.tT, -self.w, -self.rise]))
coords.append(Vector([self.hT / self.G.slope, -self.w, -self.rise]))
coords.append(Vector([-self.tT, -self.w, 0]))
coords.append(Vector([self.nT * self.run, -self.w,
((self.nT - 1) * self.rise) - self.hT]))
coords.append(Vector([self.nT * self.run, -self.w, self.nT * self.rise]))
coords.append(Vector([(self.nT * self.run) - self.tT, -self.w,
self.nT * self.rise]))
for i in range(6):
coords.append(coords[i] + Vector([0, self.w, 0]))
self.G.Make_mesh(coords, self.faces2, 'stringer')
for i in coords:
i += Vector([0, self.w + self.wT, 0])
self.G.Make_mesh(coords, self.faces2, 'stringer')
elif self.typ_s == "sId2":
self.housed_I_beam()
elif self.typ_s == "sId3":
self.housed_C_beam()
elif self.typ == "id3":
h = (self.rise - self.hT) - self.rise #height of top section
for i in range(self.nT):
coords = []
coords.append(Vector([i * self.run,0,-self.rise]))
coords.append(Vector([(i + 1) * self.run,0,-self.rise]))
coords.append(Vector([i * self.run,0,h + (i * self.rise)]))
coords.append(Vector([(i + 1) * self.run,0,h + (i * self.rise)]))
for j in range(4):
coords.append(coords[j] + Vector([0,self.wT,0]))
self.G.Make_mesh(coords, self.G.faces, 'stringer')
elif self.typ == "id4":
offset = (self.wT / (self.nS + 1)) - (self.w / 2)
for s in range(self.nS):
base = self.tO + (offset * (s + 1))
start = [Vector([0, -base, -self.hT]),
Vector([0, -base, -self.hT - self.rise]),
Vector([0, -base - self.w, -self.hT]),
Vector([0, -base - self.w, -self.hT - self.rise])]
self.d = radians(self.run) / self.nT
for i in range(self.nT):
coords = []
# Base faces. Should be able to append more sections:
tId4_faces = [[0, 1, 3, 2]]
t_inner = Matrix.Rotation(self.d * i, 3, 'Z')
coords.append((t_inner * start[0]) + Vector([0, 0, self.rise * i]))
coords.append((t_inner * start[1]) + Vector([0, 0, self.rise * i]))
t_outer = Matrix.Rotation(self.d * i, 3, 'Z')
coords.append((t_outer * start[2]) + Vector([0, 0, self.rise * i]))
coords.append((t_outer * start[3]) + Vector([0, 0, self.rise * i]))
k = 0
for j in range(self.deg):
k = (j * 4) + 4
tId4_faces.append([k, k - 4, k - 3, k + 1])
tId4_faces.append([k - 2, k - 1, k + 3, k + 2])
tId4_faces.append([k + 1, k - 3, k - 1, k + 3])
tId4_faces.append([k, k - 4, k - 2, k + 2])
rot = Matrix.Rotation(((self.d * (j + 1)) / self.deg) + (self.d * i), 3, 'Z')
for v in start:
coords.append((rot * v) + Vector([0, 0, self.rise * i]))
for j in range(self.deg):
k = ((j + self.deg) * 4) + 4
tId4_faces.append([k, k - 4, k - 3, k + 1])
tId4_faces.append([k - 2, k - 1, k + 3, k + 2])
tId4_faces.append([k + 1, k - 3, k - 1, k + 3])
tId4_faces.append([k, k - 4, k - 2, k + 2])
rot = Matrix.Rotation(((self.d * ((j + self.deg) + 1)) / self.deg) + (self.d * i), 3, 'Z')
for v in range(4):
if v in [1, 3]:
incline = (self.rise * i) + (self.rise / self.deg) * (j + 1)
coords.append((rot * start[v]) + Vector([0, 0, incline]))
else:
coords.append((rot * start[v]) + Vector([0, 0, self.rise * i]))
self.G.Make_mesh(coords, tId4_faces, 'treads')
return {'FINISHED'}
def I_beam(self):
mid = self.w / 2
web = self.tw / 2
# Bottom of the stringer:
baseZ = -self.rise - self.hT - self.h
# Top of the strigner:
topZ = -self.rise - self.hT
# Vertical taper amount:
taper = self.tf * self.tp
if self.dis or self.nS == 1:
offset = (self.wT / (self.nS + 1)) - mid
else:
offset = 0
# taper < 100%:
if self.tp > 0:
for i in range(self.nS):
coords = []
coords.append(Vector([0, offset, baseZ]))
coords.append(Vector([0, offset, baseZ + taper]))
coords.append(Vector([0, offset + (mid - web), baseZ + self.tf]))
coords.append(Vector([0, offset + (mid - web), topZ - self.tf]))
coords.append(Vector([0, offset, topZ - taper]))
coords.append(Vector([0, offset, topZ]))
coords.append(Vector([0, offset + (mid - web), topZ]))
coords.append(Vector([0, offset + (mid + web), topZ]))
coords.append(Vector([0, offset + self.w, topZ]))
coords.append(Vector([0, offset + self.w, topZ - taper]))
coords.append(Vector([0, offset + (mid + web), topZ - self.tf]))
coords.append(Vector([0, offset + (mid + web), baseZ + self.tf]))
coords.append(Vector([0, offset + self.w, baseZ + taper]))
coords.append(Vector([0, offset + self.w, baseZ]))
coords.append(Vector([0, offset + (mid + web), baseZ]))
coords.append(Vector([0, offset + (mid - web), baseZ]))
for j in range(16):
coords.append(coords[j]+Vector([self.run * self.nT, 0, self.rise * self.nT]))
# If the bottom meets the ground:
# Bottom be flat with the xy plane, but shifted down.
# Either project onto the plane along a vector (hard) or use the built in
# interest found in mathutils.geometry (easy). Using intersect:
if self.g:
for j in range(16):
coords[j] = intersect_line_plane(coords[j], coords[j + 16],
Vector([0, 0, topZ]),
Vector([0, 0, 1]))
self.G.Make_mesh(coords, self.faces3a, 'stringer')
if self.dis or self.nS == 1:
offset += self.wT / (self.nS + 1)
else:
offset += (self.wT - self.w) / (self.nS - 1)
# taper = 100%:
else:
for i in range(self.nS):
coords = []
coords.append(Vector([0, offset, baseZ]))
coords.append(Vector([0, offset + (mid - web), baseZ + self.tf]))
coords.append(Vector([0, offset + (mid - web), topZ - self.tf]))
coords.append(Vector([0, offset, topZ]))
coords.append(Vector([0, offset + self.w, topZ]))
coords.append(Vector([0, offset + (mid + web), topZ - self.tf]))
coords.append(Vector([0, offset + (mid + web), baseZ + self.tf]))
coords.append(Vector([0, offset + self.w, baseZ]))
for j in range(8):
coords.append(coords[j]+Vector([self.run * self.nT, 0, self.rise * self.nT]))
self.G.Make_mesh(coords, self.faces3b, 'stringer')
offset += self.wT / (self.nS + 1)
return {'FINISHED'}
def housed_I_beam(self):
webOrth = Vector([self.rise, 0, -self.run]).normalized()
webHeight = Vector([self.run + self.tT, 0, -self.hT]).project(webOrth).length
vDelta_1 = self.tf * tan(self.G.angle)
vDelta_2 = (self.rise * (self.nT - 1)) - (webHeight + self.tf)
flange_y = (self.w - self.tw) / 2
front = -self.tT - self.tf
outer = -self.tO - self.tw - flange_y
coords = []
if self.tp > 0:
# Upper-Outer flange:
coords.append(Vector([front, outer, -self.rise]))
coords.append(Vector([-self.tT, outer, -self.rise]))
coords.append(Vector([-self.tT, outer, 0]))
coords.append(Vector([(self.run * (self.nT - 1)) - self.tT, outer,
self.rise * (self.nT - 1)]))
coords.append(Vector([self.run * self.nT, outer,
self.rise * (self.nT - 1)]))
coords.append(Vector([self.run * self.nT, outer,
(self.rise * (self.nT - 1)) + self.tf]))
coords.append(Vector([(self.run * (self.nT - 1)) - self.tT, outer,
(self.rise * (self.nT - 1)) + self.tf]))
coords.append(Vector([front, outer, self.tf - vDelta_1]))
# Lower-Outer flange:
coords.append(coords[0] + Vector([self.tf + webHeight, 0, 0]))
coords.append(coords[1] + Vector([self.tf + webHeight, 0, 0]))
coords.append(intersect_line_line(coords[9],
coords[9] - Vector([0, 0, 1]),
Vector([self.run, 0, -self.hT - self.tf]),
Vector([self.run * 2, 0, self.rise - self.hT - self.tf]))[0])
coords.append(Vector([(self.run * self.nT) - ((webHeight - self.hT) / tan(self.G.angle)),
outer, vDelta_2]))
coords.append(coords[4] - Vector([0, 0, self.tf + webHeight]))
coords.append(coords[5] - Vector([0, 0, self.tf + webHeight]))
coords.append(coords[11] + Vector([0, 0, self.tf]))
coords.append(intersect_line_line(coords[8],
coords[8] - Vector([0, 0, 1]),
Vector([self.run, 0, -self.hT]),
Vector([self.run * 2, 0, self.rise - self.hT]))[0])
# Outer web:
coords.append(coords[1] + Vector([0, flange_y, 0]))
coords.append(coords[8] + Vector([0, flange_y, 0]))
coords.append(coords[15] + Vector([0, flange_y, 0]))
coords.append(coords[14] + Vector([0, flange_y, 0]))
coords.append(coords[13] + Vector([0, flange_y, 0]))
coords.append(coords[4] + Vector([0, flange_y, 0]))
coords.append(coords[3] + Vector([0, flange_y, 0]))
coords.append(coords[2] + Vector([0, flange_y, 0]))
# Upper-Inner flange and lower-inner flange:
for i in range(16):
coords.append(coords[i] + Vector([0, self.w, 0]))
# Inner web:
for i in range(8):
coords.append(coords[i + 16] + Vector([0, self.tw, 0]))
# Mid nodes to so faces will be quads:
for i in [0,7,6,5,9,10,11,12]:
coords.append(coords[i] + Vector([0, flange_y, 0]))
for i in range(8):
coords.append(coords[i + 48] + Vector([0, self.tw, 0]))
self.G.Make_mesh(coords, self.faces3c, 'stringer')
for i in coords:
i += Vector([0, self.wT + self.tw, 0])
self.G.Make_mesh(coords, self.faces3c, 'stringer')
# @TODO Taper = 100%
return {'FINISHED'}
def C_Beam(self):
mid = self.w / 2
web = self.tw / 2
# Bottom of the stringer:
baseZ = -self.rise - self.hT - self.h
# Top of the strigner:
topZ = -self.rise - self.hT
# Vertical taper amount:
taper = self.tf * self.tp
if self.dis or self.nS == 1:
offset = (self.wT / (self.nS + 1)) - mid
else:
offset = 0
# taper < 100%:
if self.tp > 0:
for i in range(self.nS):
coords = []
coords.append(Vector([0, offset, baseZ]))
coords.append(Vector([0, offset, baseZ + taper]))
coords.append(Vector([0, offset + (mid - web), baseZ + self.tf]))
coords.append(Vector([0, offset + (mid - web), topZ - self.tf]))
coords.append(Vector([0, offset, topZ - taper]))
coords.append(Vector([0, offset, topZ]))
coords.append(Vector([0, offset + (mid - web), topZ]))
coords.append(Vector([0, offset + (mid + web), topZ]))
coords.append(Vector([0, offset + self.w, topZ]))
coords.append(Vector([0, offset + self.w, topZ - taper]))
coords.append(Vector([0, offset + (mid + web), topZ - self.tf]))
coords.append(Vector([0, offset + (mid + web), baseZ + self.tf]))
coords.append(Vector([0, offset + self.w, baseZ + taper]))
coords.append(Vector([0, offset + self.w, baseZ]))
coords.append(Vector([0, offset + (mid + web), baseZ]))
coords.append(Vector([0, offset + (mid - web), baseZ]))
for j in range(16):
coords.append(coords[j]+Vector([self.run * self.nT, 0, self.rise * self.nT]))
# If the bottom meets the ground:
# Bottom be flat with the xy plane, but shifted down.
# Either project onto the plane along a vector (hard) or use the built in
# interest found in mathutils.geometry (easy). Using intersect:
if self.g:
for j in range(16):
coords[j] = intersect_line_plane(coords[j], coords[j + 16],
Vector([0, 0, topZ]),
Vector([0, 0, 1]))
self.G.Make_mesh(coords, self.faces3a, 'stringer')
if self.dis or self.nS == 1:
offset += self.wT / (self.nS + 1)
else:
offset += (self.wT - self.w) / (self.nS - 1)
# taper = 100%:
else:
for i in range(self.nS):
coords = []
coords.append(Vector([0, offset, baseZ]))
coords.append(Vector([0, offset + (mid - web), baseZ + self.tf]))
coords.append(Vector([0, offset + (mid - web), topZ - self.tf]))
coords.append(Vector([0, offset, topZ]))
coords.append(Vector([0, offset + self.w, topZ]))
coords.append(Vector([0, offset + (mid + web), topZ - self.tf]))
coords.append(Vector([0, offset + (mid + web), baseZ + self.tf]))
coords.append(Vector([0, offset + self.w, baseZ]))
for j in range(8):
coords.append(coords[j]+Vector([self.run * self.nT, 0, self.rise * self.nT]))
self.G.Make_mesh(coords, self.faces3b, 'stringer')
offset += self.wT / (self.nS + 1)
return {'FINISHED'}
def housed_C_beam(self):
webOrth = Vector([self.rise, 0, -self.run]).normalized()
webHeight = Vector([self.run + self.tT, 0, -self.hT]).project(webOrth).length
vDelta_1 = self.tf * tan(self.G.angle)
vDelta_2 = (self.rise * (self.nT - 1)) - (webHeight + self.tf)
flange_y = (self.w - self.tw) / 2
front = -self.tT - self.tf
outer = -self.tO - self.tw - flange_y
coords = []
if self.tp > 0:
# Upper-Outer flange:
coords.append(Vector([front, outer, -self.rise]))
coords.append(Vector([-self.tT, outer, -self.rise]))
coords.append(Vector([-self.tT, outer, 0]))
coords.append(Vector([(self.run * (self.nT - 1)) - self.tT, outer,
self.rise * (self.nT - 1)]))
coords.append(Vector([self.run * self.nT, outer,
self.rise * (self.nT - 1)]))
coords.append(Vector([self.run * self.nT, outer,
(self.rise * (self.nT - 1)) + self.tf]))
coords.append(Vector([(self.run * (self.nT - 1)) - self.tT, outer,
(self.rise * (self.nT - 1)) + self.tf]))
coords.append(Vector([front, outer, self.tf - vDelta_1]))
# Lower-Outer flange:
coords.append(coords[0] + Vector([self.tf + webHeight, 0, 0]))
coords.append(coords[1] + Vector([self.tf + webHeight, 0, 0]))
coords.append(intersect_line_line(coords[9],
coords[9] - Vector([0, 0, 1]),
Vector([self.run, 0, -self.hT - self.tf]),
Vector([self.run * 2, 0, self.rise - self.hT - self.tf]))[0])
coords.append(Vector([(self.run * self.nT) - ((webHeight - self.hT) / tan(self.G.angle)),
outer, vDelta_2]))
coords.append(coords[4] - Vector([0, 0, self.tf + webHeight]))
coords.append(coords[5] - Vector([0, 0, self.tf + webHeight]))
coords.append(coords[11] + Vector([0, 0, self.tf]))
coords.append(intersect_line_line(coords[8],
coords[8] - Vector([0, 0, 1]),
Vector([self.run, 0, -self.hT]),
Vector([self.run * 2, 0, self.rise - self.hT]))[0])
# Outer web:
coords.append(coords[1] + Vector([0, flange_y, 0]))
coords.append(coords[8] + Vector([0, flange_y, 0]))
coords.append(coords[15] + Vector([0, flange_y, 0]))
coords.append(coords[14] + Vector([0, flange_y, 0]))
coords.append(coords[13] + Vector([0, flange_y, 0]))
coords.append(coords[4] + Vector([0, flange_y, 0]))
coords.append(coords[3] + Vector([0, flange_y, 0]))
coords.append(coords[2] + Vector([0, flange_y, 0]))
# Outer corner nodes:
for i in [0, 7, 6, 5, 12, 11, 10, 9]:
coords.append(coords[i] + Vector([0, flange_y + self.tw, 0]))
self.G.Make_mesh(coords, self.faces4c, 'stringer')
for i in range(16):
coords[i] += Vector([0, -outer * 2, 0])
for i in range(8):
coords[i + 16] += Vector([0, (-outer - flange_y) * 2, 0])
for i in coords:
i += Vector([0, (self.tO * 2) + self.wT, 0])
self.G.Make_mesh(coords, self.faces4c, 'stringer')
return {'FINISHED'}
| Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/add_mesh_building_objects/stringer.py | Python | gpl-3.0 | 26,985 |
# -*- coding:utf-8 -*-
"""
/***************************************************************************
qgsplugininstallerinstallingdialog.py
Plugin Installer module
-------------------
Date : June 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import QDir, QUrl, QFile, QCoreApplication
from qgis.PyQt.QtWidgets import QDialog
from qgis.PyQt.QtNetwork import QNetworkRequest, QNetworkReply
import qgis
from qgis.core import QgsNetworkAccessManager, QgsApplication
from .ui_qgsplugininstallerinstallingbase import Ui_QgsPluginInstallerInstallingDialogBase
from .installer_data import removeDir, repositories
from .unzip import unzip
class QgsPluginInstallerInstallingDialog(QDialog, Ui_QgsPluginInstallerInstallingDialogBase):
# ----------------------------------------- #
def __init__(self, parent, plugin):
QDialog.__init__(self, parent)
self.setupUi(self)
self.plugin = plugin
self.mResult = ""
self.progressBar.setRange(0, 0)
self.progressBar.setFormat("%p%")
self.labelName.setText(plugin["name"])
self.buttonBox.clicked.connect(self.abort)
self.url = QUrl(plugin["download_url"])
self.redirectionCounter = 0
fileName = plugin["filename"]
tmpDir = QDir.tempPath()
tmpPath = QDir.cleanPath(tmpDir + "/" + fileName)
self.file = QFile(tmpPath)
self.requestDownloading()
def requestDownloading(self):
self.request = QNetworkRequest(self.url)
authcfg = repositories.all()[self.plugin["zip_repository"]]["authcfg"]
if authcfg and isinstance(authcfg, str):
if not QgsApplication.authManager().updateNetworkRequest(
self.request, authcfg.strip()):
self.mResult = self.tr(
"Update of network request with authentication "
"credentials FAILED for configuration '{0}'").format(authcfg)
self.request = None
if self.request is not None:
self.reply = QgsNetworkAccessManager.instance().get(self.request)
self.reply.downloadProgress.connect(self.readProgress)
self.reply.finished.connect(self.requestFinished)
self.stateChanged(4)
def exec_(self):
if self.request is None:
return QDialog.Rejected
QDialog.exec_(self)
# ----------------------------------------- #
def result(self):
return self.mResult
# ----------------------------------------- #
def stateChanged(self, state):
messages = [self.tr("Installing..."), self.tr("Resolving host name..."), self.tr("Connecting..."), self.tr("Host connected. Sending request..."), self.tr("Downloading data..."), self.tr("Idle"), self.tr("Closing connection..."), self.tr("Error")]
self.labelState.setText(messages[state])
# ----------------------------------------- #
def readProgress(self, done, total):
if total > 0:
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
# ----------------------------------------- #
def requestFinished(self):
reply = self.sender()
self.buttonBox.setEnabled(False)
if reply.error() != QNetworkReply.NoError:
self.mResult = reply.errorString()
if reply.error() == QNetworkReply.OperationCanceledError:
self.mResult += "<br/><br/>" + QCoreApplication.translate("QgsPluginInstaller", "If you haven't canceled the download manually, it might be caused by a timeout. In this case consider increasing the connection timeout value in QGIS options.")
self.reject()
reply.deleteLater()
return
elif reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 301:
redirectionUrl = reply.attribute(QNetworkRequest.RedirectionTargetAttribute)
self.redirectionCounter += 1
if self.redirectionCounter > 4:
self.mResult = QCoreApplication.translate("QgsPluginInstaller", "Too many redirections")
self.reject()
reply.deleteLater()
return
else:
if redirectionUrl.isRelative():
redirectionUrl = reply.url().resolved(redirectionUrl)
# Fire a new request and exit immediately in order to quietly destroy the old one
self.url = redirectionUrl
self.requestDownloading()
reply.deleteLater()
return
self.file.open(QFile.WriteOnly)
self.file.write(reply.readAll())
self.file.close()
self.stateChanged(0)
reply.deleteLater()
pluginDir = qgis.utils.home_plugin_path
tmpPath = self.file.fileName()
# make sure that the parent directory exists
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
# if the target directory already exists as a link, remove the link without resolving:
QFile(pluginDir + str(QDir.separator()) + self.plugin["id"]).remove()
try:
unzip(str(tmpPath), str(pluginDir)) # test extract. If fails, then exception will be raised and no removing occurs
# removing old plugin files if exist
removeDir(QDir.cleanPath(pluginDir + "/" + self.plugin["id"])) # remove old plugin if exists
unzip(str(tmpPath), str(pluginDir)) # final extract.
except:
self.mResult = self.tr("Failed to unzip the plugin package. Probably it's broken or missing from the repository. You may also want to make sure that you have write permission to the plugin directory:") + "\n" + pluginDir
self.reject()
return
try:
# cleaning: removing the temporary zip file
QFile(tmpPath).remove()
except:
pass
self.close()
# ----------------------------------------- #
def abort(self):
if self.reply.isRunning():
self.reply.finished.disconnect()
self.reply.abort()
del self.reply
self.mResult = self.tr("Aborted by user")
self.reject()
| medspx/QGIS | python/pyplugin_installer/qgsplugininstallerinstallingdialog.py | Python | gpl-2.0 | 7,267 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
first_party_pkg,
go_mod,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.embedcfg import EmbedConfig
from pants.backend.go.util_rules.first_party_pkg import (
FallibleFirstPartyPkgAnalysis,
FallibleFirstPartyPkgDigest,
FirstPartyPkgAnalysisRequest,
FirstPartyPkgDigestRequest,
FirstPartyPkgImportPath,
FirstPartyPkgImportPathRequest,
)
from pants.core.target_types import ResourcesGeneratorTarget
from pants.engine.addresses import Address
from pants.engine.fs import PathGlobs, Snapshot
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner, engine_error
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*go_mod.rules(),
*first_party_pkg.rules(),
*sdk.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
*build_pkg.rules(),
*link.rules(),
*assembly.rules(),
QueryRule(FallibleFirstPartyPkgAnalysis, [FirstPartyPkgAnalysisRequest]),
QueryRule(FallibleFirstPartyPkgDigest, [FirstPartyPkgDigestRequest]),
QueryRule(FirstPartyPkgImportPath, [FirstPartyPkgImportPathRequest]),
],
target_types=[GoModTarget, GoPackageTarget, ResourcesGeneratorTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
@pytest.mark.parametrize("mod_dir", ("", "src/go/"))
def test_import_path(rule_runner: RuleRunner, mod_dir: str) -> None:
rule_runner.write_files(
{
f"{mod_dir}BUILD": "go_mod(name='mod')\ngo_package(name='pkg')",
f"{mod_dir}go.mod": "module go.example.com/foo",
f"{mod_dir}f.go": "",
f"{mod_dir}dir/f.go": "",
f"{mod_dir}dir/BUILD": "go_package()",
}
)
info = rule_runner.request(
FirstPartyPkgImportPath,
[FirstPartyPkgImportPathRequest(Address(mod_dir, target_name="pkg"))],
)
assert info.import_path == "go.example.com/foo"
assert info.dir_path_rel_to_gomod == ""
info = rule_runner.request(
FirstPartyPkgImportPath,
[FirstPartyPkgImportPathRequest(Address(os.path.join(mod_dir, "dir")))],
)
assert info.import_path == "go.example.com/foo/dir"
assert info.dir_path_rel_to_gomod == "dir"
def test_package_analysis(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod()\n",
"foo/go.mod": dedent(
"""\
module go.example.com/foo
go 1.16
require github.com/google/uuid v1.3.0
require (
rsc.io/quote v1.5.2
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
rsc.io/sampler v1.3.0 // indirect
)
"""
),
"foo/go.sum": dedent(
"""\
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
"""
),
"foo/pkg/foo.go": dedent(
"""\
package pkg
import "github.com/google/uuid"
import "rsc.io/quote"
func Grok() string {
return "Hello World"
}
"""
),
"foo/pkg/BUILD": "go_package()",
"foo/cmd/main.go": dedent(
"""\
package main
import (
"fmt"
"go.example.com/foo/pkg"
)
func main() {
fmt.Printf("%s\n", pkg.Grok())
}
"""
),
"foo/cmd/bar_test.go": dedent(
"""\
package main
import "testing"
func TestBar(t *testing.T) {}
"""
),
"foo/cmd/BUILD": "go_package()",
}
)
def assert_analysis(
dir_path: str,
*,
imports: list[str],
test_imports: list[str],
xtest_imports: list[str],
go_files: list[str],
test_files: list[str],
xtest_files: list[str],
) -> None:
addr = Address(os.path.join("foo", dir_path))
maybe_analysis = rule_runner.request(
FallibleFirstPartyPkgAnalysis, [FirstPartyPkgAnalysisRequest(addr)]
)
assert maybe_analysis.analysis is not None
analysis = maybe_analysis.analysis
assert analysis.imports == tuple(imports)
assert analysis.test_imports == tuple(test_imports)
assert analysis.xtest_imports == tuple(xtest_imports)
assert analysis.go_files == tuple(go_files)
assert analysis.test_files == tuple(test_files)
assert analysis.xtest_files == tuple(xtest_files)
assert not analysis.s_files
assert analysis.minimum_go_version == "1.16"
assert analysis.embed_patterns == ()
assert analysis.test_embed_patterns == ()
assert analysis.xtest_embed_patterns == ()
maybe_digest = rule_runner.request(
FallibleFirstPartyPkgDigest, [FirstPartyPkgDigestRequest(addr)]
)
assert maybe_digest.pkg_digest is not None
pkg_digest = maybe_digest.pkg_digest
actual_snapshot = rule_runner.request(Snapshot, [pkg_digest.digest])
expected_snapshot = rule_runner.request(Snapshot, [PathGlobs([f"foo/{dir_path}/*.go"])])
assert actual_snapshot == expected_snapshot
assert pkg_digest.embed_config is None
assert pkg_digest.xtest_embed_config is None
assert pkg_digest.xtest_embed_config is None
assert_analysis(
"pkg",
imports=["github.com/google/uuid", "rsc.io/quote"],
test_imports=[],
xtest_imports=[],
go_files=["foo.go"],
test_files=[],
xtest_files=[],
)
assert_analysis(
"cmd",
imports=["fmt", "go.example.com/foo/pkg"],
test_imports=["testing"],
xtest_imports=[],
go_files=["main.go"],
test_files=["bar_test.go"],
xtest_files=[],
)
def test_invalid_package(rule_runner) -> None:
rule_runner.write_files(
{
"BUILD": "go_mod(name='mod')\ngo_package(name='pkg')",
"go.mod": dedent(
"""\
module go.example.com/foo
go 1.17
"""
),
"bad.go": "invalid!!!",
}
)
maybe_analysis = rule_runner.request(
FallibleFirstPartyPkgAnalysis,
[FirstPartyPkgAnalysisRequest(Address("", target_name="pkg"))],
)
assert maybe_analysis.analysis is None
assert maybe_analysis.exit_code == 1
assert "bad.go:1:1: expected 'package', found invalid\n" in maybe_analysis.stderr
@pytest.mark.xfail(reason="cgo is ignored")
def test_cgo_not_supported(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": "go_mod(name='mod')\ngo_package(name='pkg')",
"go.mod": dedent(
"""\
module go.example.com/foo
go 1.17
"""
),
"hello.go": dedent(
"""\
package main
// int fortytwo()
// {
// return 42;
// }
import "C"
import "fmt"
func main() {
f := C.intFunc(C.fortytwo)
fmt.Println(C.intFunc(C.fortytwo))
}
"""
),
}
)
with engine_error(NotImplementedError):
rule_runner.request(
FallibleFirstPartyPkgAnalysis,
[FirstPartyPkgAnalysisRequest(Address("", target_name="pkg"))],
)
def test_embeds_supported(rule_runner: RuleRunner) -> None:
go_sources = {
"foo.go": dedent(
"""\
package foo
import _ "embed"
//go:embed grok.txt
var message
"""
),
"foo_test.go": dedent(
"""\
package foo
import _ "embed"
//go:embed test_grok.txt
var testMessage
"""
),
"bar_test.go": dedent(
"""\
package foo_test
import _ "embed"
//go:embed xtest_grok.txt
var testMessage
"""
),
}
resources = {
"grok.txt": "This will be embedded in a Go binary.",
"test_grok.txt": "This will be embedded in a Go binary.",
"xtest_grok.txt": "This will be embedded in a Go binary.",
}
rule_runner.write_files(
{
"BUILD": dedent(
"""
go_mod(name='mod')
go_package(name='pkg', dependencies=[":resources"])
resources(
name="resources",
sources=["*.txt"],
)
"""
),
"go.mod": dedent(
"""\
module go.example.com/foo
go 1.17
"""
),
**resources, # type: ignore[arg-type]
**go_sources, # type: ignore[arg-type]
}
)
maybe_analysis = rule_runner.request(
FallibleFirstPartyPkgAnalysis,
[FirstPartyPkgAnalysisRequest(Address("", target_name="pkg"))],
)
assert maybe_analysis.analysis is not None
analysis = maybe_analysis.analysis
assert analysis.embed_patterns == ("grok.txt",)
assert analysis.test_embed_patterns == ("test_grok.txt",)
assert analysis.xtest_embed_patterns == ("xtest_grok.txt",)
maybe_digest = rule_runner.request(
FallibleFirstPartyPkgDigest,
[FirstPartyPkgDigestRequest(Address("", target_name="pkg"))],
)
assert maybe_digest.pkg_digest is not None
pkg_digest = maybe_digest.pkg_digest
actual_snapshot = rule_runner.request(Snapshot, [pkg_digest.digest])
expected_snapshot = rule_runner.make_snapshot(
{
**go_sources,
**{os.path.join("__resources__", f): content for f, content in resources.items()},
}
)
assert actual_snapshot == expected_snapshot
assert pkg_digest.embed_config == EmbedConfig(
{"grok.txt": ["grok.txt"]}, {"grok.txt": "__resources__/grok.txt"}
)
assert pkg_digest.test_embed_config == EmbedConfig(
{"grok.txt": ["grok.txt"], "test_grok.txt": ["test_grok.txt"]},
{"grok.txt": "__resources__/grok.txt", "test_grok.txt": "__resources__/test_grok.txt"},
)
assert pkg_digest.xtest_embed_config == EmbedConfig(
{"xtest_grok.txt": ["xtest_grok.txt"]}, {"xtest_grok.txt": "__resources__/xtest_grok.txt"}
)
def test_missing_embeds(rule_runner: RuleRunner) -> None:
"""Failing to set up embeds should not crash Pants."""
rule_runner.write_files(
{
"BUILD": dedent(
"""
go_mod(name='mod')
go_package(name='pkg')
"""
),
"go.mod": dedent(
"""\
module go.example.com/foo
go 1.17
"""
),
"foo.go": dedent(
"""\
package foo
import _ "embed"
//go:embed fake.txt
var message
"""
),
}
)
maybe_digest = rule_runner.request(
FallibleFirstPartyPkgDigest,
[FirstPartyPkgDigestRequest(Address("", target_name="pkg"))],
)
assert maybe_digest.pkg_digest is None
assert maybe_digest.exit_code == 1
assert maybe_digest.stderr is not None
assert "Failed to find embedded resources: could not embed fake.txt" in maybe_digest.stderr
| pantsbuild/pants | src/python/pants/backend/go/util_rules/first_party_pkg_test.py | Python | apache-2.0 | 13,175 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# TODO:
# Ability to set CPU/Memory reservations
try:
import json
except ImportError:
import simplejson as json
HAS_PYSPHERE = False
try:
from pysphere import VIServer, VIProperty, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
from pysphere import VIException, VIApiException, FaultTypes
HAS_PYSPHERE = True
except ImportError:
pass
DOCUMENTATION = '''
---
module: vsphere_guest
short_description: Create/delete/manage a guest VM through VMware vSphere.
description:
- Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7
version_added: "1.6"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
guest:
description:
- The virtual server name you wish to manage.
required: true
username:
description:
- Username to connect to vcenter as.
required: true
default: null
password:
description:
- Password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
esxi:
description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
required: false
default: null
state:
description:
- Indicate desired state of the vm.
default: present
choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured']
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_hardware:
description:
- A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi'].
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
vm_hw_version:
description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
vmware_guest_facts:
description:
- Gather facts from vCenter on a particular VM
required: false
default: null
force:
description:
- Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy.
default: "no"
choices: [ "yes", "no" ]
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: Richard Hoop <[email protected]>
requirements: [ pysphere ]
'''
EXAMPLES = '''
# Create a new VM on an ESX server
# Returns changed = False when the VM already exists
# Returns changed = True and a adds ansible_facts from the new VM
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: powered_on
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Reconfigure the CPU and Memory on the newly created VM
# Will return the changes made
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: reconfigured
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 4096
num_cpus: 4
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
vmware_guest_facts: yes
# Typical output of a vsphere_facts run on a guest
- hw_eth0:
- addresstype: "assigned"
label: "Network adapter 1"
macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55"
summary: "VM Network"
hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest"
hw_memtotal_mb: 2048
hw_name: "centos64Guest"
hw_processor_count: 2
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
# Remove a vm from vSphere
# The VM must be powered_off or you need to use force to force a shutdown
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: absent
force: yes
'''
def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1):
# add a scsi controller
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation('add')
if type == "lsi":
# For RHEL5
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
elif type == "paravirtual":
# For RHEL6
scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
elif type == "lsi_sas":
scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def(
"scsi_ctrl").pyclass()
elif type == "bus_logic":
scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding scsi controller to vm spec. No scsi controller"
" type of: %s" % (type))
scsi_ctrl.set_element_busNumber(int(bus_num))
scsi_ctrl.set_element_key(int(disk_ctrl_key))
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
# Add the scsi controller to the VM spec.
devices.append(scsi_ctrl_spec)
return disk_ctrl_key
def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0):
# add a vmdk disk
# Verify the datastore exists
datastore_name, ds = find_datastore(module, s, datastore, config_target)
# create a new disk - file based - for the vm
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
"disk_backing").pyclass()
disk_backing.set_element_fileName(datastore_name)
disk_backing.set_element_diskMode("persistent")
if type != "thick":
disk_backing.set_element_thinProvisioned(1)
disk_ctlr.set_element_key(key)
disk_ctlr.set_element_controllerKey(int(disk_ctrl_key))
disk_ctlr.set_element_unitNumber(int(disk_number))
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(int(size))
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None):
# Add a cd-rom
# Make sure the datastore exists.
if vm_cd_iso_path:
iso_location = vm_cd_iso_path.split('/', 1)
datastore, ds = find_datastore(
module, s, iso_location[0], config_target)
iso_path = iso_location[1]
# find ide controller
ide_ctlr = None
for dev in default_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation('add')
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
if type == "iso":
iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass()
ds_ref = iso.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
iso.set_element_datastore(ds_ref)
iso.set_element_fileName("%s %s" % (datastore, iso_path))
cd_ctrl.set_element_backing(iso)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
elif type == "client":
client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("")
cd_ctrl.set_element_backing(client)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding cdrom of type %s to vm spec. "
" cdrom type can either be iso or client" % (type))
devices.append(cd_spec)
def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"):
# add a NIC
# Different network card types are: "VirtualE1000",
# "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2",
# "VirtualVmxnet3"
nic_spec = config.new_deviceChange()
nic_spec.set_element_operation("add")
if nic_type == "e1000":
nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
elif nic_type == "e1000e":
nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
elif nic_type == "pcnet32":
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet":
nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet2":
nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet3":
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding nic to vm spec. No nic type of: %s" %
(nic_type))
if network_type == "standard":
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
elif network_type == "dvs":
# Get the portgroup key
portgroupKey = find_portgroup_key(module, s, nfmor, network_name)
# Get the dvswitch uuid
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(portgroupKey)
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
else:
s.disconnect()
module.fail_json(
msg="Error adding nic backing to vm spec. No network type of:"
" %s" % (network_type))
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does.
ds = None
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
if not ds:
s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" %
(datastore))
datastore_name = "[%s]" % datastore
return datastore_name, ds
def find_portgroup_key(module, s, nfmor, network_name):
# Find a portgroups key given the portgroup name.
# Grab all the distributed virtual portgroup's names and key's.
dvpg_mors = s._retrieve_properties_traversal(
property_names=['name', 'key'],
from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# Get the correct portgroup managed object.
dvpg_mor = None
for dvpg in dvpg_mors:
if dvpg_mor:
break
for p in dvpg.PropSet:
if p.Name == "name" and p.Val == network_name:
dvpg_mor = dvpg
if dvpg_mor:
break
# If dvpg_mor is empty we didn't find the named portgroup.
if dvpg_mor is None:
s.disconnect()
module.fail_json(
msg="Could not find the distributed virtual portgroup named"
" %s" % network_name)
# Get the portgroup key
portgroupKey = None
for p in dvpg_mor.PropSet:
if p.Name == "key":
portgroupKey = p.Val
return portgroupKey
def find_dvswitch_uuid(module, s, nfmor, portgroupKey):
# Find a dvswitch's uuid given a portgroup key.
# Function searches all dvswitches in the datacenter to find the switch
# that has the portgroup key.
# Grab the dvswitch uuid and portgroup properties
dvswitch_mors = s._retrieve_properties_traversal(
property_names=['uuid', 'portgroup'],
from_node=nfmor, obj_type='DistributedVirtualSwitch')
dvswitch_mor = None
# Get the dvswitches managed object
for dvswitch in dvswitch_mors:
if dvswitch_mor:
break
for p in dvswitch.PropSet:
if p.Name == "portgroup":
pg_mors = p.Val.ManagedObjectReference
for pg_mor in pg_mors:
if dvswitch_mor:
break
key_mor = s._get_object_properties(
pg_mor, property_names=['key'])
for key in key_mor.PropSet:
if key.Val == portgroupKey:
dvswitch_mor = dvswitch
# Get the switches uuid
dvswitch_uuid = None
for p in dvswitch_mor.PropSet:
if p.Name == "uuid":
dvswitch_uuid = p.Val
return dvswitch_uuid
def spec_singleton(spec, request, vm):
if not spec:
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
return spec
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
if current_datastore_name in vm_disk['descriptor']:
id_list.append(vm_disk['device']['key'])
return id_list
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
changes = {}
request = VI.ReconfigVM_TaskRequestMsg()
shutdown = False
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
# Change Memory
if vm_hardware['memory_mb']:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not memoryHotAddEnabled:
shutdown = True
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
shutdown = True
else:
# Fail on no hot add and no force
if not memoryHotAddEnabled:
module.fail_json(
msg="memoryHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and memory shrink
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
module.fail_json(
msg="Cannot lower memory on a live VM. force is "
"required for shutdown")
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ====( Config Memory )====#
if vm_hardware['num_cpus']:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not cpuHotAddEnabled:
shutdown = True
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
shutdown = True
else:
# Fail on no hot add and no force
if not cpuHotAddEnabled:
module.fail_json(
msg="cpuHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and cpu shrink without hot remove
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
module.fail_json(
msg="Cannot lower CPU on a live VM without "
"cpuHotRemove. force is required for shutdown")
spec.set_element_numCPUs(int(vm_hardware['num_cpus']))
changes['cpu'] = vm_hardware['num_cpus']
if len(changes):
if shutdown and vm.is_powered_on():
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e)
)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off():
try:
vm.power_on(sync_run=True)
except Exception, e:
module.fail_json(
msg='Failed to power on vm %s : %s' % (guest, e)
)
vsphere_client.disconnect()
if changed:
module.exit_json(changed=True, changes=changes)
module.exit_json(changed=False)
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computerResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
# Get resource pool managed reference
# Requires that a cluster name be specified.
if resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
rpmor = crprops.resourcePool._obj
# CREATE VM CONFIGURATION
# get config target
request = VI.QueryConfigTargetRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval
# get default devices
request = VI.QueryConfigOptionRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval
default_devs = config_option.DefaultDevice
# add parameters to the create vm task
create_vm_request = VI.CreateVM_TaskRequestMsg()
config = create_vm_request.new_config()
if vm_hw_version:
config.set_element_version(vm_hw_version)
vmfiles = config.new_files()
datastore_name, ds = find_datastore(
module, vsphere_client, vm_disk['disk1']['datastore'], config_target)
vmfiles.set_element_vmPathName(datastore_name)
config.set_element_files(vmfiles)
config.set_element_name(guest)
if 'notes' in vm_extra_config:
config.set_element_annotation(vm_extra_config['notes'])
config.set_element_memoryMB(int(vm_hardware['memory_mb']))
config.set_element_numCPUs(int(vm_hardware['num_cpus']))
config.set_element_guestId(vm_hardware['osid'])
devices = []
# Attach all the hardware we want to the VM spec.
# Add a scsi controller to the VM spec.
disk_ctrl_key = add_scsi_controller(
module, vsphere_client, config, devices, vm_hardware['scsi'])
if vm_disk:
disk_num = 0
disk_key = 0
for disk in sorted(vm_disk.iterkeys()):
try:
datastore = vm_disk[disk]['datastore']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. datastore needs to be"
" specified." % disk)
try:
disksize = int(vm_disk[disk]['size_gb'])
# Convert the disk size to kiloboytes
disksize = disksize * 1024 * 1024
except (KeyError, ValueError):
vsphere_client.disconnect()
module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk)
try:
disktype = vm_disk[disk]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be"
" specified." % disk)
# Add the disk to the VM spec.
add_disk(
module, vsphere_client, config_target, config,
devices, datastore, disktype, disksize, disk_ctrl_key,
disk_num, disk_key)
disk_num = disk_num + 1
disk_key = disk_key + 1
if 'vm_cdrom' in vm_hardware:
cdrom_iso_path = None
cdrom_type = None
try:
cdrom_type = vm_hardware['vm_cdrom']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom type needs to be"
" specified." % vm_hardware['vm_cdrom'])
if cdrom_type == 'iso':
try:
cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom iso_path needs"
" to be specified." % vm_hardware['vm_cdrom'])
# Add a CD-ROM device to the VM.
add_cdrom(module, vsphere_client, config_target, config, devices,
default_devs, cdrom_type, cdrom_iso_path)
if vm_nic:
for nic in sorted(vm_nic.iterkeys()):
try:
nictype = vm_nic[nic]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be "
" specified." % nic)
try:
network = vm_nic[nic]['network']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network needs to be "
" specified." % nic)
try:
network_type = vm_nic[nic]['network_type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network_type needs to be "
" specified." % nic)
# Add the nic to the VM spec.
add_nic(module, vsphere_client, nfmor, config, devices,
nictype, network, network_type)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, vsphere_client)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error creating vm: %s" %
task.get_error_message())
else:
# We always need to get the vm because we are going to gather facts
vm = vsphere_client.get_vm_by_name(guest)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if vm_extra_config:
vm.set_extra_config(vm_extra_config)
# Power on the VM if it was requested
power_state(vm, state, True)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=gather_facts(vm),
changed=True,
changes="Created VM %s" % guest)
def delete_vm(vsphere_client, module, guest, vm, force):
try:
if vm.is_powered_on():
if force:
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e))
else:
module.fail_json(
msg='You must use either shut the vm down first or '
'use force ')
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = vsphere_client._proxy.Destroy_Task(request)._returnval
task = VITask(ret, vsphere_client)
# Wait for the task to finish
status = task.wait_for_state(
[task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error removing vm: %s %s" %
task.get_error_message())
module.exit_json(changed=True, changes="VM %s deleted" % guest)
except Exception, e:
module.fail_json(
msg='Failed to delete vm %s : %s' % (guest, e))
def power_state(vm, state, force):
"""
Correctly set the power status for a VM determined by the current and
requested states. force is forceful
"""
power_status = vm.get_status()
check_status = ' '.join(state.split("_")).upper()
# Need Force
if not force and power_status in [
'SUSPENDED', 'POWERING ON',
'RESETTING', 'BLOCKED ON MSG'
]:
return "VM is in %s power state. Force is required!" % power_status
# State is already true
if power_status == check_status:
return False
else:
try:
if state == 'powered_off':
vm.power_off(sync_run=True)
elif state == 'powered_on':
vm.power_on(sync_run=True)
elif state == 'restarted':
if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'):
vm.reset(sync_run=False)
else:
return "Cannot restart VM in the current state %s" \
% power_status
return True
except Exception, e:
return e
return False
def gather_facts(vm):
"""
Gather facts for VM directly from vsphere.
"""
vm.get_properties()
facts = {
'module_hw': True,
'hw_name': vm.properties.name,
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
}
ifidx = 0
for entry in vm.properties.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ifidx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
ifidx += 1
return facts
class DefaultVMConfig(object):
"""
Shallow and deep dict comparison for interfaces
"""
def __init__(self, check_dict, interface_dict):
self.check_dict, self.interface_dict = check_dict, interface_dict
self.set_current, self.set_past = set(
check_dict.keys()), set(interface_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
self.recursive_missing = None
def shallow_diff(self):
return self.set_past - self.intersect
def recursive_diff(self):
if not self.recursive_missing:
self.recursive_missing = []
for key, value in self.interface_dict.items():
if isinstance(value, dict):
for k, v in value.items():
if k in self.check_dict[key]:
if not isinstance(self.check_dict[key][k], v):
try:
if v == int:
self.check_dict[key][k] = int(self.check_dict[key][k])
elif v == basestring:
self.check_dict[key][k] = str(self.check_dict[key][k])
else:
raise ValueError
except ValueError:
self.recursive_missing.append((k, v))
else:
self.recursive_missing.append((k, v))
return self.recursive_missing
def config_check(name, passed, default, module):
"""
Checks that the dict passed for VM configuration matches the required
interface declared at the top of __main__
"""
diff = DefaultVMConfig(passed, default)
if len(diff.shallow_diff()):
module.fail_json(
msg="Missing required key/pair [%s]. %s must contain %s" %
(', '.join(diff.shallow_diff()), name, default))
if diff.recursive_diff():
module.fail_json(
msg="Config mismatch for %s on %s" %
(name, diff.recursive_diff()))
return True
def main():
vm = None
proto_vm_hardware = {
'memory_mb': int,
'num_cpus': int,
'scsi': basestring,
'osid': basestring
}
proto_vm_disk = {
'disk1': {
'datastore': basestring,
'size_gb': int,
'type': basestring
}
}
proto_vm_nic = {
'nic1': {
'type': basestring,
'network': basestring,
'network_type': basestring
}
}
proto_esxi = {
'datacenter': basestring,
'hostname': basestring
}
module = AnsibleModule(
argument_spec=dict(
vcenter_hostname=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=True, type='str'),
state=dict(
required=False,
choices=[
'powered_on',
'powered_off',
'present',
'absent',
'restarted',
'reconfigured'
],
default='present'),
vmware_guest_facts=dict(required=False, choices=BOOLEANS),
guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}),
vm_hardware=dict(required=False, type='dict', default={}),
vm_extra_config=dict(required=False, type='dict', default={}),
vm_hw_version=dict(required=False, default=None, type='str'),
resource_pool=dict(required=False, default=None, type='str'),
cluster=dict(required=False, default=None, type='str'),
force=dict(required=False, choices=BOOLEANS, default=False),
esxi=dict(required=False, type='dict', default={}),
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts']],
required_together=[
['state', 'force'],
[
'state',
'vm_disk',
'vm_nic',
'vm_hardware',
'esxi'
],
['resource_pool', 'cluster']
],
)
if not HAS_PYSPHERE:
module.fail_json(msg='pysphere module required')
vcenter_hostname = module.params['vcenter_hostname']
username = module.params['username']
password = module.params['password']
vmware_guest_facts = module.params['vmware_guest_facts']
state = module.params['state']
guest = module.params['guest']
force = module.params['force']
vm_disk = module.params['vm_disk']
vm_nic = module.params['vm_nic']
vm_hardware = module.params['vm_hardware']
vm_extra_config = module.params['vm_extra_config']
vm_hw_version = module.params['vm_hw_version']
esxi = module.params['esxi']
resource_pool = module.params['resource_pool']
cluster = module.params['cluster']
# CONNECT TO THE SERVER
viserver = VIServer()
try:
viserver.connect(vcenter_hostname, username, password)
except VIApiException, err:
module.fail_json(msg="Cannot connect to %s: %s" %
(vcenter_hostname, err))
# Check if the VM exists before continuing
try:
vm = viserver.get_vm_by_name(guest)
except Exception:
pass
if vm:
# Run for facts only
if vmware_guest_facts:
try:
module.exit_json(ansible_facts=gather_facts(vm))
except Exception, e:
module.fail_json(
msg="Fact gather failed with exception %s" % e)
# Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force)
# Failure
if isinstance(state_result, basestring):
module.fail_json(msg=state_result)
else:
module.exit_json(changed=state_result)
# Just check if there
elif state == 'present':
module.exit_json(changed=False)
# Fail on reconfig without params
elif state == 'reconfigured':
reconfigure_vm(
vsphere_client=viserver,
vm=vm,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
state=state,
force=force
)
elif state == 'absent':
delete_vm(
vsphere_client=viserver,
module=module,
guest=guest,
vm=vm,
force=force)
# VM doesn't exist
else:
# Fail for fact gather task
if vmware_guest_facts:
module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm"
% guest)
if state in ['restarted', 'reconfigured']:
module.fail_json(
msg="No such VM %s. States ["
"restarted, reconfigured] required an existing VM" % guest)
elif state == 'absent':
module.exit_json(changed=False, msg="vm %s not present" % guest)
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']:
# Check the guest_config
config_check("vm_disk", vm_disk, proto_vm_disk, module)
config_check("vm_nic", vm_nic, proto_vm_nic, module)
config_check("vm_hardware", vm_hardware, proto_vm_hardware, module)
config_check("esxi", esxi, proto_esxi, module)
create_vm(
vsphere_client=viserver,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
vm_hw_version=vm_hw_version,
state=state
)
viserver.disconnect()
module.exit_json(
changed=False,
vcenter=vcenter_hostname)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| axilleas/ansible-modules-core | cloud/vmware/vsphere_guest.py | Python | gpl-3.0 | 42,629 |
import hashlib
import logging
import os
import tempfile
import pickle
import random
import shutil
import string
import sys
class BlockMD5(object):
def __init__(self):
return None
def compare_blocks(offset, name1, name2):
'''compare two files byte-by-byte'''
info = os.stat(name1)
fd1 = os.open(name1, os.O_RDONLY)
fd2 = os.open(name2, os.O_RDONLY)
os.lseek(fd1, offset, os.SEEK_SET)
os.lseek(fd2, offset, os.SEEK_SET)
buf1 = os.read(fd1, info.st_blksize)
buf2 = os.read(fd2, info.st_blksize)
os.close(fd1)
os.close(fd2)
for i in range(0, info.st_blksize):
if buf1[i] != buf2[i]:
print("Mismatch at byte_num '{0}': {1}, {2}".format(
i, buf1[i], buf2[i]))
return
def create_map(self, name):
'''Create a per block md5sum of a file
and return a dict of block->md5hashes'''
info = os.stat(name)
left = info.st_size
fd = os.open(name, os.O_RDONLY)
offset = 0
mapd = {}
while left > 0:
buf = os.read(fd, info.st_blksize)
left -= len(buf)
h5 = hashlib.md5(buf)
mapd[offset] = h5.hexdigest()
offset += len(buf)
os.close(fd)
return mapd
def validate_map(self, name, mapd):
'''Compares the block md5sums to each block of the file'''
failed = []
info = os.stat(name)
fd = os.open(name, os.O_RDONLY)
# O_DIRECT didn't work on my test system, but worked on a GPFS filesystem
#fd = os.open(name, os.O_RDONLY+os.O_DIRECT)
left = info.st_size
offset = 0
while left > 0:
buf = os.read(fd, info.st_blksize)
left -= len(buf)
h5 = hashlib.md5(buf)
digest = h5.hexdigest()
if digest != mapd[offset]:
failed.append((offset, digest, mapd[offset]))
offset += len(buf)
os.close(fd)
if len(failed) > 0:
return False, failed
else:
return True
class FileMD5(object):
def __init__(self, loglvl='info'):
if loglvl == 'verbose':
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
return None
def create_md5(self, name):
with open(name, 'rb') as f:
md5sum = hashlib.md5(f.read()).hexdigest()
return md5sum
def validate_md5(self, name, md5sum):
logging.debug("DEBUG: FileMD5().validate_md5({0}, {1})".format(name, md5sum))
with open(name, 'rb') as f:
current_md5 = hashlib.md5(f.read()).hexdigest()
if current_md5 != md5sum:
return False, (current_md5, md5sum)
else:
return True
class FileTree(object):
def __init__(self):
'''Set defaults'''
self.aligned = True
self.dirs_per_level = 1
self.files_per_dir = 1
self.fixed_size = False
self.loglvl = 'info'
self.max_size = 8192
self.num_levels = 1
self.stats = False
self.suffix = ''
self.topdir = None
return None
def set_config(self, kwargs):
'''Set class configuration'''
for k, v in kwargs.items():
setattr(self, k, v)
# get the blocksize
vfsstats = os.statvfs(self.topdir)
self.bufsize = vfsstats.f_bsize
# set logging
if self.loglvl == 'verbose' or self.loglvl == 'debug':
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
return
def _free_space(self, num_bytes):
'''Checks to see if there is enough space on the filesystem'''
vfsstats = os.statvfs(os.path.dirname(os.path.abspath(self.topdir)))
bytes_free = vfsstats.f_ffree * vfsstats.f_bfree
logging.debug("DEBUG: Bytes_to_write: {0}, Bytes_Free: {1}".format(
num_bytes, bytes_free))
if num_bytes > bytes_free:
return False
return True
def _path_exists(self, filepath):
'''Checks to see if the path exists'''
if not os.path.isdir(os.path.dirname(filepath)):
return False
return True
def _sub_tree(self, path, levels, dirs_per_level):
'''should be called recursively to generate names of levels of dirs'''
for n in range(dirs_per_level):
dirname = "L{0}D{1}".format(levels, n)
newdir = os.path.join(path, dirname)
self.dirs.append(newdir)
for nl in range(levels):
self._sub_tree(newdir, nl, dirs_per_level)
def gen_dir_array(self, topdir, levels, dirs_per_level):
'''Generate the directory hierarchy array all at once
I won't lie, I'm basically recreating (poor attempt anyway)
fdtree in Python:
https://computing.llnl.gov/?set=code&page=sio_downloads
'''
# make an array of directory paths
self.dirs = []
# this will start recursively calling itself until
# you've reached the end (num_levels)
self._sub_tree(topdir, levels, dirs_per_level)
return
def queue_walk_tree(self, path, tasks=2):
'''import modules we wouldn't have normally used'''
#import multiprocessing
return
def random_name(self, size=10, chars=string.ascii_lowercase + string.digits):
'''return a random name'''
rname = ''.join(random.choice(chars) for x in range(size))
rname += self.suffix
return rname
def serial_create_dir_tree(self):
'''Create a directory tree'''
for d in self.dirs:
if not os.path.exists(d):
os.makedirs(d)
return
def serial_delete_dirs(self):
'''Delete the FileTree root dir'''
for d in self.dirs:
if os.path.exists(d):
shutil.rmtree(d)
return
def serial_populate_dir_tree(self):
'''Write data files in serial to the directory tree'''
for d in self.dirs:
for f in range(self.files_per_dir):
name = self.random_name()
filename = os.path.join(d, name)
result, err = self.write_file(filename)
if not result:
print(err)
break
return
def walk_tree_generator(self, path):
'''
Returns a generator that can be used to walk a directory
tree
You can then make a list of all files via:
files = []
for dir in walk:
for f in dir[2]:
files.append("{0}/{1}".format(dir[0], f))
Then use that for whatever...
'''
walk = os.walk(path)
return walk
def write_file(self, filename):
'''Create a number of random files in a directory tree of varying size'''
# the number of bytes written is a multiple of the fs blocksize
if self.fixed_size:
num_bytes = self.max_size
elif self.aligned and not self.fixed_size:
num_bytes = random.randrange(self.bufsize,
stop=self.max_size, step=self.bufsize)
# pick a random bytesize between 0 and max_size
else:
num_bytes = random.randrange(1, self.max_size)
# check to see if enough space is available
if not self._free_space(num_bytes):
return False, "Not enough space to write data."
# check to see if path exists
if not self._path_exists(filename):
return False, "Directory does not exist."
# figure out how many chunks we need to write
bytes_left = num_bytes
# write out the random data
logging.debug("DEBUG: {0}.{1}(): Writing file: {2}".format(
self.__class__.__name__, self.write_file.__name__, filename))
with open(filename, 'wb') as f:
try:
while bytes_left > 0:
if bytes_left < self.bufsize:
f.write(os.urandom(bytes_left))
bytes_left -= self.bufsize
else:
f.write(os.urandom(self.bufsize))
bytes_left -= self.bufsize
except IOError as ioe:
print("IOError: {0}".format(ioe))
print("We bail on IO Errors...")
sys.exit(1)
return True, "Success"
# for when you don't want to use the FileTree class,
# and simply want to create a random file
def create_random_file(name, numbytes):
'''writes out a file full of random data'''
path = os.path.dirname(os.path.abspath(name))
vfsstats = os.statvfs(path)
# dont write the file if there isn't enough free space on the filesystem
if numbytes > (vfsstats.f_ffree * vfsstats.f_bfree):
print("Not enough space to write data.")
return
bufsize = vfsstats.f_bsize
if numbytes % bufsize != 0:
print("Number of bytes must be a multiple of blocksize ({0})".format(
bufsize))
return
bytes_left = numbytes
with open(name, 'wb') as f:
while bytes_left > 0:
f.write(os.urandom(bufsize))
bytes_left -= bufsize
return
| stevec7/iointegrity | iointegrity/iotools.py | Python | bsd-3-clause | 9,607 |
from django.urls import path
from . import views
urlpatterns = [
path('openid/login/', views.login, name="openid_login"),
path('openid/callback/', views.callback, name='openid_callback'),
]
| bittner/django-allauth | allauth/socialaccount/providers/openid/urls.py | Python | mit | 201 |
def http_datetime( dt=None ):
if not dt:
import datetime
dt = datetime.datetime.utcnow()
else:
try:
dt = dt - dt.utcoffset()
except:
pass # no timezone offset, just assume already in UTC
s = dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
return s
def parse_http_datetime( datestring, utc_tzinfo=None, strict=False ):
import re, datetime
m = re.match(r'(?P<DOW>[a-z]+), (?P<D>\d+) (?P<MON>[a-z]+) (?P<Y>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+(\.\d+)?) (?P<TZ>\w+)$',
datestring, re.IGNORECASE)
if not m and not strict:
m = re.match(r'(?P<DOW>[a-z]+) (?P<MON>[a-z]+) (?P<D>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+) (?P<Y>\d+)$',
datestring, re.IGNORECASE)
if not m:
m = re.match(r'(?P<DOW>[a-z]+), (?P<D>\d+)-(?P<MON>[a-z]+)-(?P<Y>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+(\.\d+)?) (?P<TZ>\w+)$',
datestring, re.IGNORECASE)
if not m:
raise ValueError('HTTP date is not correctly formatted')
try:
tz = m.group('TZ').upper()
except:
tz = 'GMT'
if tz not in ('GMT','UTC','0000','00:00'):
raise ValueError('HTTP date is not in GMT timezone')
monname = m.group('MON').upper()
mdict = {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6,
'JUL':7, 'AUG':8, 'SEP':9, 'OCT':10, 'NOV':11, 'DEC':12}
month = mdict.get(monname)
if not month:
raise ValueError('HTTP date has an unrecognizable month')
y = int(m.group('Y'))
if y < 100:
century = datetime.datetime.utcnow().year / 100
if y < 50:
y = century * 100 + y
else:
y = (century - 1) * 100 + y
d = int(m.group('D'))
hour = int(m.group('H'))
minute = int(m.group('M'))
try:
second = int(m.group('S'))
except:
second = float(m.group('S'))
dt = datetime.datetime( y, month, d, hour, minute, second, tzinfo=utc_tzinfo )
return dt | Dorwido/wowapi | wowapi/utilities.py | Python | mit | 2,021 |
# coding: utf-8
# Constants
USER = 'user'
XIMPIA = 'ximpia'
TWITTER = 'twitter'
FACEBOOK = 'facebook'
LINKEDIN = 'linkedin'
GOOGLE = 'google'
EMAIL = 'email'
SMS = 'sms'
READ = 'read'
UPDATE = 'update'
NET = 'net'
DELETE = 'delete'
WF = 'wf'
ZONE = 'zone'
GROUP = 'group'
CONDITION = 'condition'
CONDITIONS = 'conditions'
MENU_NAME = 'menuName'
VIEW = 'view'
SEP = 'sep'
# Params
PARAM_COMMTYPE = 'COMMTYPE'
PARAM_ICON = 'ICON'
PARAM_CHANNEL = 'CHANNEL'
PARAM_META_TYPE = 'META_TYPE'
PARAM_SETTINGS = 'SETTINGS'
PARAM_META = 'META'
PARAM_META_WF = 'META_WF'
PARAM_META_APP = 'META_APP'
PARAM_META_SERVICE = 'META_SERVICE'
PARAM_META_VIEW = 'META_VIEW'
PARAM_MEDIA_TYPE = 'MEDIA_TYPE'
CONDITION_ACTION_RENDER = 'render'
| Ximpia/ximpia | ximpia/xpcore/constants.py | Python | apache-2.0 | 760 |
"""
Electron transport chain (Oxidative phosphorylation) reaction rates
and mitochondial ion fluxes
"""
import common_const as cc
from config import USE_NUMPY_FUNCS, USE_NUMBA
if USE_NUMPY_FUNCS:
from numpy import sqrt, exp
DEL_PSI_B = +50.0 # Mitocondrial boundary potential (mV)
OFFSET_POTENTIAL = +91.0 # (mV) for mitochondrial uniproter
K_RES_F = 5.77E13
FADH2 = 1.24 # FADH2 concentration (mM)
FAD = 0.01 # FAD concentration (mM)
DELTA_PH = -0.6 # pH gradient of the mitochondria
G_H = 1E-8
# Pre-calculated values
E3BFRT = exp(3 * cc.F_OVER_RT * DEL_PSI_B)
E6BFRT = exp(6 * cc.F_OVER_RT * DEL_PSI_B)
EAFFRT = K_RES_F * sqrt(FADH2 / FAD) # FAD factor
MUH_OFFSET = -2.303 * cc.RT_OVER_F * DELTA_PH # Proton driving force by pH difference
def _get_denom(r1, r2, r3, e1, e2, e3):
"""Common pattern of the denominators of OXPHOS rates"""
return (1 + r1 * e1) * e2 + (r2 + r3 * e1) * e3
def _get_vo2_vhe_vhf(muh, nadh):
"""Computes and returns vO2, vHe, vHeF"""
RA, RB, RC1, RC2 = 6.39E-13, 1.76E-16, 2.66E-22, 8.63E-30
R1, R2, R3 = 2.08E-18, 1.73E-9, 1.06E-26
RHO_RES, K_RES, RHO_RES_F = 1E-4, 1.35E18, 3.75E-4
G = 0.85
eg6hfrt = exp(G * 6 * cc.F_OVER_RT * muh)
nad = cc.MITO_NAD_TOTAL - nadh
eafrt = K_RES * sqrt(nadh / nad) # NADH factor
common_vo2_vhe = RHO_RES / _get_denom(R1, R2, R3, eafrt, E6BFRT, eg6hfrt)
numerator_vo2_vhe = RA * (EAFFRT - eg6hfrt)
v_o2 = (0.5 * common_vo2_vhe * (numerator_vo2_vhe + eafrt * (RC1 * E6BFRT + RC2 * eg6hfrt)))
v_he = 6 * common_vo2_vhe * (numerator_vo2_vhe - RB * eg6hfrt)
v_hef = (4 * RHO_RES_F / _get_denom(R1, R2, R3, EAFFRT, E6BFRT, eg6hfrt) *
(RA * EAFFRT - (RA + RB) * eg6hfrt))
return v_o2, v_he, v_hef
def _get_vatpase_vhu(muh, adp_mt):
PA, PB, PC1, PC2 = 1.66E-8, 3.37E-10, 9.65E-17, 4.59E-17
P1, P2, P3 = 1.35E-8, 7.74E-7, 6.65E-15
RHO_F1, K_F1 = 0.05, 1.71E6
PI = 2.0 # Inorganic phosphate (mM)
atp_mt = cc.MITO_ATP_TOTAL - adp_mt
eaf1frt = (K_F1 / PI) * (atp_mt / adp_mt)
e3mufrt = exp(3 * cc.F_OVER_RT * muh)
common = -RHO_F1 / _get_denom(P1, P2, P3, eaf1frt, E3BFRT, e3mufrt)
v_atpase = common * ((100 * PA + PC1 * E3BFRT) * eaf1frt - (PA + PC2 * eaf1frt) * e3mufrt)
v_hu = 3 * common * (100 * PA * (1 + eaf1frt) - (PA + PB) * e3mufrt)
return v_atpase, v_hu
def _get_v_ant(atp_in, adp_mt, dpsi):
"""
Computes and updates the Reaction rate of ANT
(Adenine nucleotide translocator) (mM/ms)
Used by dATP/dt, dADPmito/dt, and dDeltaPsi/dt
"""
VANT_MAX = 5E-3 # Maximum rate of ANT (mM/ms)
H_ANT = 0.5
adp_in = cc.CYTO_ATP_TOTAL - atp_in
atp_mt = cc.MITO_ATP_TOTAL - adp_mt
cyt_ratio = atp_in / adp_in
mit_ratio = adp_mt / atp_mt
vfrt = cc.F_OVER_RT * dpsi
numerator = 1 - (0.25 * 0.45 * cyt_ratio) / (0.17 * 0.025 * mit_ratio)
denom1 = 1 + 0.25 / 0.225 * cyt_ratio * exp(-H_ANT * vfrt)
denom2 = 1 + 0.45 / 0.025 * mit_ratio
return VANT_MAX * 0.75 * numerator / (exp(vfrt) * denom1 * denom2)
def oxphos_rates(nadh, atp_in, adp_mt, dpsi):
"""
Get reaction rates of oxidative phosphorylation
"""
muh = MUH_OFFSET + dpsi
v_hleak = G_H * muh
v_o2, v_he, v_hf = _get_vo2_vhe_vhf(muh, nadh)
v_atpase, v_hu = _get_vatpase_vhu(muh, adp_mt)
v_ant = _get_v_ant(atp_in, adp_mt, dpsi)
d_dpsi = cc.C_MITO_INV * (v_he + v_hf - v_hu - v_ant - v_hleak)
return v_o2, v_atpase, v_ant, d_dpsi
def get_d_dpsi(d_dpsi, v_imac, v_naca, v_uni):
return d_dpsi - cc.C_MITO_INV * (v_imac + v_naca + 2 * v_uni)
def get_d_adp_mt(v_ant, v_atpase, v_sl):
return v_ant - v_atpase - v_sl
if USE_NUMBA:
from numba import vectorize
_get_denom = vectorize(_get_denom)
_get_vo2_vhe_vhf = vectorize(_get_vo2_vhe_vhf)
_get_vatpase_vhu = vectorize(_get_vatpase_vhu)
_get_v_ant = vectorize(_get_v_ant)
get_oxphos_rates = vectorize(oxphos_rates)
get_d_dpsi = vectorize(get_d_dpsi)
get_d_adp_mt = vectorize(get_d_adp_mt)
| SosirisTseng/hearts-of-silicon | hos/ode/mito/oxphos_cortassa2006.py | Python | mit | 4,028 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import os.path as op
import math
import sys
import logging
from itertools import groupby, islice, cycle, izip
from Bio import SeqIO
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, debug, need_update, \
mkdir, popen
debug()
FastaExt = ("fasta", "fa", "fna", "cds", "pep", "faa", "fsa", "seq", "nt", "aa")
FastqExt = ("fastq", "fq")
class BaseFile (object):
def __init__(self, filename):
self.filename = filename
if filename:
logging.debug("Load file `{0}`".format(filename))
class LineFile (BaseFile, list):
"""
Generic file parser for line-based files
"""
def __init__(self, filename, comment=None, load=False):
super(LineFile, self).__init__(filename)
if load:
fp = must_open(filename)
self.lines = [l.strip() for l in fp if l[0]!=comment]
logging.debug("Load {0} lines from `{1}`.".\
format(len(self.lines), filename))
class DictFile (BaseFile, dict):
"""
Generic file parser for multi-column files, keyed by a particular index.
"""
def __init__(self, filename, keypos=0, valuepos=1, delimiter=None,
strict=True, cast=None):
super(DictFile, self).__init__(filename)
fp = must_open(filename)
ncols = max(keypos, valuepos) + 1
thiscols = 0
for lineno, row in enumerate(fp):
row = row.rstrip()
atoms = row.split(delimiter)
thiscols = len(atoms)
if thiscols < ncols:
action = "Aborted" if strict else "Skipped"
msg = "Must contain >= {0} columns. {1}.\n".format(ncols, action)
msg += " --> Line {0}: {1}".format(lineno + 1, row)
logging.error(msg)
if strict:
sys.exit(1)
else:
continue
key = atoms[keypos]
value = atoms[valuepos] if (valuepos is not None) else atoms
if cast:
value = cast(value)
self[key] = value
assert thiscols, "File empty"
self.ncols = thiscols
logging.debug("Imported {0} records from `{1}`.".\
format(len(self), filename))
class SetFile (BaseFile, set):
def __init__(self, filename, column=-1, delimiter=None):
super(SetFile, self).__init__(filename)
fp = open(filename)
for row in fp:
if not row.strip():
continue
keys = [x.strip() for x in row.split(delimiter)]
if column >= 0:
keys = [keys[column]]
self.update(keys)
class FileShredder (object):
"""
Same as rm -f *
"""
def __init__(self, filelist, verbose=True):
filelist = [x for x in filelist if x and op.exists(x)]
cmd = "rm -rf {0}".format(" ".join(filelist))
sh(cmd, log=verbose)
class FileMerger (object):
"""
Same as cat * > filename
"""
def __init__(self, filelist, outfile):
self.filelist = filelist
self.outfile = outfile
self.ingz = filelist[0].endswith(".gz")
self.outgz = outfile.endswith(".gz")
def merge(self, checkexists=False):
outfile = self.outfile
if checkexists and not need_update(self.filelist, outfile):
logging.debug("File `{0}` exists. Merge skipped.".format(outfile))
return
files = " ".join(self.filelist)
ingz, outgz = self.ingz, self.outgz
if ingz and outgz: # can merge gz files directly
cmd = "cat {0} > {1}".format(files, outfile)
sh(cmd)
else:
cmd = "zcat" if self.ingz else "cat"
cmd += " " + files
sh(cmd, outfile=outfile)
return outfile
class FileSplitter (object):
def __init__(self, filename, outputdir=None, format="fasta", mode="cycle"):
self.filename = filename
self.outputdir = outputdir
self.mode = mode
self.format = format
logging.debug("format is %s" % format)
guessedformat = self._guess_format(filename)
if format != guessedformat:
logging.warn("warn: format guessed from suffix - {0}"\
.format(guessedformat))
if format in ("fasta", "fastq"):
self.klass = "seqio"
else:
self.klass = "txt"
mkdir(outputdir)
def _open(self, filename):
if self.klass == "seqio":
handle = SeqIO.parse(open(filename), self.format)
else:
handle = open(filename)
return handle
@property
def num_records(self):
handle = self._open(self.filename)
return sum(1 for x in handle)
def _guess_format(self, filename):
root, ext = op.splitext(filename)
ext = ext.strip(".")
if ext in FastaExt:
format = "fasta"
elif ext in FastqExt:
format = "fastq"
else:
format = "txt"
return format
def _batch_iterator(self, N=1):
"""Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch
@classmethod
def get_names(cls, filename, N):
root, ext = op.splitext(op.basename(filename))
names = []
pad0 = len(str(int(N - 1)))
for i in xrange(N):
name = "{0}_{1:0{2}d}{3}".format(root, i, pad0, ext)
names.append(name)
return names
def write(self, fw, batch):
if self.klass == "seqio":
count = SeqIO.write(batch, fw, self.format)
else:
for line in batch:
fw.write(line)
count = len(batch)
return count
def split(self, N, force=False):
"""
There are two modes of splitting the records
- batch: splitting is sequentially to records/N chunks
- cycle: placing each record in the splitted files and cycles
use `cycle` if the len of the record is not evenly distributed
"""
mode = self.mode
assert mode in ("batch", "cycle", "optimal")
logging.debug("set split mode=%s" % mode)
self.names = self.__class__.get_names(self.filename, N)
if self.outputdir:
self.names = [op.join(self.outputdir, x) for x in self.names]
if not need_update(self.filename, self.names) and not force:
logging.error("file %s already existed, skip file splitting" % \
self.names[0])
return
filehandles = [open(x, "w") for x in self.names]
if mode == "batch":
for batch, fw in zip(self._batch_iterator(N), filehandles):
count = self.write(fw, batch)
logging.debug("write %d records to %s" % (count, fw.name))
elif mode == "cycle":
handle = self._open(self.filename)
for record, fw in izip(handle, cycle(filehandles)):
count = self.write(fw, [record])
elif mode == "optimal":
"""
This mode is based on Longest Processing Time (LPT) algorithm:
A simple, often-used algorithm is the LPT algorithm (Longest
Processing Time) which sorts the jobs by its processing time and
then assigns them to the machine with the earliest end time so far.
This algorithm achieves an upper bound of 4/3 - 1/(3m) OPT.
Citation: <http://en.wikipedia.org/wiki/Multiprocessor_scheduling>
"""
endtime = [0] * N
handle = self._open(self.filename)
for record in handle:
mt, mi = min((x, i) for (i, x) in enumerate(endtime))
fw = filehandles[mi]
count = self.write(fw, [record])
endtime[mi] += len(record)
for fw in filehandles:
fw.close()
def longest_unique_prefix(query, targets, remove_self=True):
"""
Find the longest unique prefix for filename, when compared against a list of
filenames. Useful to simplify file names in a pool of files. See usage in
formats.fasta.pool().
"""
query = op.basename(query)
targets = [op.basename(x) for x in targets]
prefix_lengths = [len(op.commonprefix([query, name])) for name in targets]
if remove_self and len(query) in prefix_lengths:
prefix_lengths.remove(len(query))
longest_length = max(prefix_lengths)
return query[:longest_length + 1]
def check_exists(filename, oappend=False):
"""
Avoid overwriting some files accidentally.
"""
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = (raw_input() == 'Y')
else:
overwrite = True
return overwrite
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \
oappend=False):
"""
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
"""
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith(".gz") or filename[0].endswith(".bz2"):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if 'r' in mode:
cmd = "zcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if 'r' in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp
bash_shebang = "#!/bin/bash"
python_shebang = """#!/usr/bin/env python
# -*- coding: UTF-8 -*-"""
def write_file(filename, contents, meta=None, skipcheck=False, append=False, tee=False):
if not meta:
suffix = filename.rsplit(".", 1)[-1]
if suffix == "sh":
meta = "run script"
elif suffix == "py":
meta = "python script"
else:
meta = "file"
meta_choices = ("file", "run script", "python script")
assert meta in meta_choices, "meta must be one of {0}".\
format("|".join(meta_choices))
contents = contents.strip()
shebang = "\n"
if "script" in meta:
if not append:
if meta == "run script":
shebang = bash_shebang
elif meta == "python script":
shebang = python_shebang
contents = "\n\n".join((shebang, contents))
fw = must_open(filename, "w", checkexists=True, skipcheck=skipcheck, oappend=append)
if fw:
print >> fw, contents
fw.close()
if tee:
print >> sys.stderr, contents
fileop = "appended" if append else "written"
message = "{0} {1} to `{2}`.".format(meta, fileop, filename)
logging.debug(message.capitalize())
if meta == "run script" and not append:
sh("chmod u+x {0}".format(filename))
def read_until(handle, start):
# read each line until a certain start, then puts the start tag back
while 1:
pos = handle.tell()
line = handle.readline()
if not line:
break
if line.startswith(start):
handle.seek(pos)
return
#raise EOFError, "%s tag cannot be found"
def read_block(handle, signal):
"""
Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record
"""
signal_len = len(signal)
it = (x[1] for x in groupby(handle,
key=lambda row: row.strip()[:signal_len] == signal))
found_signal = False
for header in it:
header = header.next().strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in it.next())
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq
def is_number(s, cast=float):
"""
Check if a string is a number. Use cast=int to check if s is an integer.
"""
try:
cast(s) # for int, long and float
except ValueError:
return False
return True
def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d)
def flexible_cast(s):
if is_number(s, cast=int):
return int(s)
elif is_number(s, cast=float):
return float(s)
return s
def main():
actions = (
('pairwise', 'convert a list of IDs into all pairs'),
('split', 'split large file into N chunks'),
('reorder', 'reorder columns in tab-delimited files'),
('flatten', 'convert a list of IDs into one per line'),
('group', 'group elements in a table based on key (groupby) column'),
('setop', 'set operations on files'),
('join', 'join tabular-like files based on common column'),
('subset', 'subset tabular-like files based on common column'),
('truncate', 'remove lines from end of file'),
('append', 'append a column with fixed value'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def pairwise(args):
"""
%prog pairwise ids
Convert a list of IDs into all pairs.
"""
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print >> fw, "\t".join((a, b))
fw.close()
def append(args):
"""
%prog append csvfile [tag]
Append a column with fixed value. If tag is missing then just append the
filename.
"""
p = OptionParser(append.__doc__)
p.set_sep()
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
csvfile = args[0]
tag = args[1] if nargs == 2 else csvfile
fp = must_open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip("\r\n")
row = opts.sep.join((row, tag))
print >> fw, row
def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print >> sys.stderr, "Removed {0} lines from end of file".format(number)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print >> sys.stderr, "No change: requested removal would leave empty file"
return -1
def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from itertools import izip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print "\n".join([zipsep.join(x) for x in list(izip_longest(*frows, fillvalue="na"))])
else:
print row.strip().replace(opts.sep, "\n")
def group(args):
"""
%prog group tabfile > tabfile.grouped
Given a tab-delimited file, either group all elements within the file or
group the elements in the value column(s) based on the key (groupby) column
For example, convert this | into this
---------------------------------------
a 2 3 4 | a,2,3,4,5,6
a 5 6 | b,7,8
b 7 8 | c,9,10,11
c 9 |
c 10 11 |
If grouping by a particular column,
convert this | into this:
---------------------------------------------
a 2 3 4 | a 2,5 3,6 4
a 5 6 | b 7 8
b 7 8 | c 9,10 11
c 9 |
c 10 11 |
By default, it uniqifies all the grouped elements
"""
from jcvi.utils.cbook import AutoVivification
from jcvi.utils.grouper import Grouper
p = OptionParser(group.__doc__)
p.set_sep()
p.add_option("--groupby", default=None, type='int',
help="Default column to groupby [default: %default]")
p.add_option("--groupsep", default=',',
help="Separator to join the grouped elements [default: `%default`]")
p.add_option("--nouniq", default=False, action="store_true",
help="Do not uniqify the grouped elements [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
sep = opts.sep
groupby = opts.groupby
groupsep = opts.groupsep
cols = []
grouper = AutoVivification() if groupby is not None else Grouper()
fp = must_open(tabfile)
for row in fp:
row = row.rstrip()
atoms = row.split(sep)
if groupby is not None:
if len(cols) < len(atoms):
cols = [x for x in xrange(len(atoms))]
if groupby not in cols:
logging.error("groupby col index `{0}` is out of range".format(groupby))
sys.exit()
key = atoms[groupby]
for col in cols:
if col == groupby:
continue
if not grouper[key][col]:
grouper[key][col] = [] if opts.nouniq else set()
if col < len(atoms):
if groupsep in atoms[col]:
for atom in atoms[col].split(groupsep):
if opts.nouniq:
grouper[key][col].append(atom)
else:
grouper[key][col].add(atom)
else:
if opts.nouniq:
grouper[key][col].append(atoms[col])
else:
grouper[key][col].add(atoms[col])
else:
grouper.join(*atoms)
for key in grouper:
if groupby is not None:
line = []
for col in cols:
if col == groupby:
line.append(key)
elif col in grouper[key].keys():
line.append(groupsep.join(grouper[key][col]))
else:
line.append("na")
print sep.join(line)
else:
print groupsep.join(key)
def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow)
def split(args):
"""
%prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default.
"""
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option("--all", default=False, action="store_true",
help="split all records [default: %default]")
p.add_option("--mode", default="optimal", choices=mode_choices,
help="Mode when splitting records [default: %default]")
p.add_option("--format", default="fasta", choices=("fasta", "fastq", "txt"),
help="input file format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
mode = opts.mode
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir, format=opts.format, mode=mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = int(N)
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs
def join(args):
"""
%prog join file1.txt(pivotfile) file2.txt ..
Join tabular-like files based on common column.
--column specifies the column index to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
from jcvi.utils.iter import flatten
p = OptionParser(join.__doc__)
p.add_option("--column", default="0",
help="0-based column id, multiple values allowed [default: %default]")
p.set_sep(multiple=True)
p.add_option("--noheader", default=False, action="store_true",
help="Do not print header [default: %default]")
p.add_option("--na", default="na",
help="Value for unjoined data [default: %default]")
p.add_option("--keysep", default=",",
help="specify separator joining multiple elements in the key column"
+ " of the pivot file [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
keysep = opts.keysep
if len(args) < 2:
sys.exit(not p.print_help())
na = opts.na
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
else:
cc = [int(c)] * nargs
assert len(cc) == nargs, "Column index number != File number"
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
else:
ss = [s] * nargs
assert len(ss) == nargs, "column separator number != File number"
# Maintain the first file line order, and combine other files into it
pivotfile = args[0]
files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \
for f, c, s in zip(args, cc, ss)]
otherfiles = files[1:]
header = "\t".join(flatten([op.basename(x.filename)] * x.ncols \
for x in files))
fp = must_open(pivotfile)
fw = must_open(opts.outfile, "w")
if not opts.noheader:
print >> fw, header
for row in fp:
row = row.rstrip()
atoms = row.split(ss[0])
newrow = atoms
key = atoms[cc[0]]
keys = key.split(keysep) if keysep in key else [key]
for d in otherfiles:
drows = list()
for key in keys:
drows.append(d.get(key, [na] * d.ncols))
drow = [keysep.join(x) for x in list(zip(*drows))]
newrow += drow
print >> fw, "\t".join(newrow)
def subset(args):
"""
%prog subset file1.txt(pivotfile) file2.txt ..
subset tabular-like file1 based on common column with file 2.
Normally file1 should have unique row entries.
If more than one file2 are provided, they must have same column separators.
Multiple file2's will be concatenated in the output.
--column specifies the column index (0-based) to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
p = OptionParser(subset.__doc__)
p.add_option("--column", default="0",
help="0-based column id, multiple values allowed [default: %default]")
p.set_sep(multiple=True)
p.add_option("--pivot", default=1, type="int",
help="1 for using order in file1, 2 for using order in \
file2 [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if len(args) < 2:
sys.exit(not p.print_help())
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
assert len(set(cc[1:])) == 1, \
"Multiple file2's must have same column index."
cc = cc[0:2]
else:
cc = [int(c)] * 2
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
assert len(set(cc[1:])) == 1, \
"Multiple file2's must have same column separator."
ss = ss[0:2]
else:
ss = [s] * 2
if nargs > 2:
file2 = FileMerger(args[1:], outfile="concatenatedFile2").merge()
else:
file2 = args[1]
newargs = [args[0], file2]
files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \
for f, c, s in zip(newargs, cc, ss)]
pivot = 0 if opts.pivot==1 else 1
fp = open(newargs[pivot])
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip()
atoms = row.split(ss[pivot])
key = atoms[cc[pivot]]
d = files[1-pivot]
if key in d:
print >> fw, ss[0].join(files[0][key])
if nargs > 2:
FileShredder([file2])
def setop(args):
"""
%prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &.
"""
p = OptionParser(setop.__doc__)
p.add_option("--column", default=0, type="int",
help="The column to extract, 0-based, -1 to disable [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
statement, = args
fa, op, fb = statement.split()
assert op in ('|', '&', '-', '^')
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == '|':
t = fa | fb
elif op == '&':
t = fa & fb
elif op == '-':
t = fa - fb
elif op == '^':
t = fa ^ fb
for x in sorted(t):
print x
if __name__ == '__main__':
main()
| sgordon007/jcvi_062915 | formats/base.py | Python | bsd-2-clause | 30,705 |
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.modules.registry import Registry
from odoo.osv.expression import AND
from ..blacklist import (
BLACKLIST_MODULES,
BLACKLIST_MODULES_ENDS_WITH,
BLACKLIST_MODULES_STARTS_WITH,
)
class UpgradeInstallWizard(models.TransientModel):
_name = "upgrade.install.wizard"
_description = "Upgrade Install Wizard"
state = fields.Selection(
[("draft", "Draft"), ("done", "Done")], readonly=True, default="draft"
)
module_ids = fields.Many2many(
comodel_name="ir.module.module",
domain=lambda x: x._module_ids_domain(),
)
module_qty = fields.Integer(
string="Modules Quantity", compute="_compute_module_qty"
)
@api.model
def _module_ids_domain(self, extra_domain=None):
domain = [
"&",
("state", "not in", ["uninstallable", "unknown"]),
("name", "not in", BLACKLIST_MODULES),
]
if extra_domain:
domain = AND([domain, extra_domain])
modules = self.env["ir.module.module"].search(domain)
for start_pattern in BLACKLIST_MODULES_STARTS_WITH:
modules = modules.filtered(lambda x: not x.name.startswith(start_pattern))
for end_pattern in BLACKLIST_MODULES_ENDS_WITH:
modules = modules.filtered(lambda x: not x.name.endswith(end_pattern))
return [("id", "in", modules.ids)]
@api.depends("module_ids")
def _compute_module_qty(self):
for wizard in self:
wizard.module_qty = len(wizard.module_ids)
def select_odoo_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: x.is_odoo_module)
self.module_ids = modules
return self.return_same_form_view()
def select_oca_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: x.is_oca_module)
self.module_ids = modules
return self.return_same_form_view()
def select_other_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: not (x.is_oca_module or x.is_odoo_module))
self.module_ids = modules
return self.return_same_form_view()
def select_installable_modules(self, extra_domain=None):
self.ensure_one()
self.module_ids = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
return self.return_same_form_view()
def unselect_modules(self):
self.ensure_one()
self.module_ids = False
return self.return_same_form_view()
def install_modules(self):
"""Set all selected modules and actually install them."""
self.ensure_one()
self.module_ids.write({"state": "to install"})
self.env.cr.commit() # pylint: disable=invalid-commit
Registry.new(self.env.cr.dbname, update_module=True)
self.write({"state": "done"})
return self.return_same_form_view()
def return_same_form_view(self):
return {
"type": "ir.actions.act_window",
"res_model": "upgrade.install.wizard",
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
}
| OCA/server-tools | upgrade_analysis/wizards/upgrade_install_wizard.py | Python | agpl-3.0 | 3,888 |
__author__ = 'joel'
from torrent import Torrent
import requests
from requests.auth import HTTPBasicAuth
class ruTorrentCommand(object):
__path = None
__post_data = None
__ru_torrent_instance = None
def __init__(self, path, post_data):
self.__path = path
self.__post_data = post_data
@property
def path(self):
return self.__path
@property
def url(self):
return "{base_url}/{path}".format(
base_url=self.ru_torrent_instance.url.rstrip('/'),
path=self.path.lstrip('/')
)
@property
def post_data(self):
return self.__post_data
def set_ru_torrent_instance(self, ru_torrent_instance):
self.__ru_torrent_instance=ru_torrent_instance
@property
def ru_torrent_instance(self):
return self.__ru_torrent_instance
def get_basic_auth(self):
if not self.ru_torrent_instance.basic_auth:
return None
return HTTPBasicAuth(
username=self.ru_torrent_instance.basic_auth.get('username'),
password=self.ru_torrent_instance.basic_auth.get('password'),
)
def perform_post(self):
response = requests.post(
self.url,
data=self.post_data,
headers=self.ru_torrent_instance.headers,
verify=self.ru_torrent_instance.verify_ssl_certificate,
auth=self.get_basic_auth()
)
return response
def get_json_response(self):
"""
s = '{"t":{"E4060C0EEDDE9EF0E0B0E8D8DDB62B8F62336F44":["1","0","1","1","Christopher.Walken.MOViE.PACK.1080p.BluRay.x264-SCC","296644621293","35363","35363","296644621293","0","0","0","0","8388608","","1","79","1","0","0","2","1382152880","10034921606","0","35363","\/home\/joel/projects/rfinder/seedbox\/torrents\/data\/Christopher.Walken.MOViE.PACK.1080p.BluRay.x264-SCC","1381269264","1","1","","","2582225178624","1","1","","","","","52#","49#","","",""],"2E84CC87B6EA1FF49FF0199B1E55EB3F342218A8":["1","0","1","0","Homeland.S03E03.720p.HDTV.x264-IMMERSE","1476192006","2816","2816","1476192006","0","0","0","0","524288","","0","8","0","0","0","2","1382171315","221954789","0","2816","\/home\/joel/projects/rfinder/seedbox\/torrents\/data\/Homeland.S03E03.720p.HDTV.x264-IMMERSE","1381740472","1","0","","","2582225178624","1","1","","","","","215#","11#","","1382165569\n","1382165495\n"]},"cid":464607101}'
s = s.replace('\n','')
return loads(s)['t']
"""
r = self.perform_post()
return r.json()['t']
class ruTorrentGetTorrentListCommand(ruTorrentCommand):
def get_torrents(self):
try:
r = self.get_json_response()
except Exception:
return
for key, torrent_data in r.items():
yield Torrent(key, torrent_data)
return
def __iter__(self):
for t in self.get_torrents():
yield t
return
class ruTorrentCommands(object):
__ru_torrent_instance = None
def __init__(self, ru_torrent_instance):
self.__ru_torrent_instance = ru_torrent_instance
@property
def ru_torrent_instance(self):
return self.__ru_torrent_instance
def get_torrent_list(self):
c = ruTorrentGetTorrentListCommand(
path='plugins/httprpc/action.php',
post_data='mode=list&cid=229885160&cmd=d.get_throttle_name%3D&cmd=d.get_custom%3Dchk-state&cmd=d.get_custom%3Dchk-time&cmd=d.get_custom%3Dsch_ignore&cmd=cat%3D%22%24t.multicall%3Dd.get_hash%3D%2Ct.get_scrape_complete%3D%2Ccat%3D%7B%23%7D%22&cmd=cat%3D%22%24t.multicall%3Dd.get_hash%3D%2Ct.get_scrape_incomplete%3D%2Ccat%3D%7B%23%7D%22&cmd=cat%3D%24d.views%3D&cmd=d.get_custom%3Dseedingtime&cmd=d.get_custom%3Daddtime'
)
c.set_ru_torrent_instance(
self.ru_torrent_instance
)
return c
| joelbitar/rfinder | rutorrent/commands.py | Python | lgpl-3.0 | 3,875 |
from django.contrib.auth.models import User
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=30)
headline = models.CharField(max_length=100)
body = models.TextField()
author = models.ForeignKey(User)
create_date = models.DateField(auto_now_add=True)
def __str__(self):
return self.title | azul-cloud/django-rest-server | blog/models.py | Python | mit | 366 |
import unittest
from models.words import Word
class SaddamTest(unittest.TestCase):
def test_get_word(self):
word_class = Word()
word = word_class.generar_palabra()
self.assertEqual('MORIN', word) | dimh/saddam | test/words_test.py | Python | gpl-3.0 | 224 |
from functools import reduce
from .homogeneous import Translation, UniformScale, Rotation, Affine, Homogeneous
def transform_about_centre(obj, transform):
r"""
Return a Transform that implements transforming an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre. More precisely, the object will be
translated to the origin (according to it's centre), transformed, and then
translated back to it's previous position.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
transform : :map:`ComposableTransform`
A composable transform.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
to_origin = Translation(-obj.centre(), skip_checks=True)
back_to_centre = Translation(obj.centre(), skip_checks=True)
# Fast path - compose in-place in order to ensure only a single matrix
# is returned
if isinstance(transform, Homogeneous):
# Translate to origin, transform, then translate back
return to_origin.compose_before(transform).compose_before(back_to_centre)
else: # Fallback to transform chain
return reduce(
lambda a, b: a.compose_before(b), [to_origin, transform, back_to_centre]
)
def scale_about_centre(obj, scale):
r"""
Return a Homogeneous Transform that implements scaling an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
scale : `float` or ``(n_dims,)`` `ndarray`
The scale factor as defined in the :map:`Scale` documentation.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
s = UniformScale(scale, obj.n_dims, skip_checks=True)
return transform_about_centre(obj, s)
def rotate_ccw_about_centre(obj, theta, degrees=True):
r"""
Return a Homogeneous Transform that implements rotating an object
counter-clockwise about its centre. The given object must be transformable
and must implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
theta : `float`
The angle of rotation clockwise about the origin.
degrees : `bool`, optional
If ``True`` theta is interpreted as degrees. If ``False``, theta is
interpreted as radians.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the rotation.
"""
if obj.n_dims != 2:
raise ValueError("CCW rotation is currently only supported for " "2D objects")
r = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return transform_about_centre(obj, r)
def shear_about_centre(obj, phi, psi, degrees=True):
r"""
Return an affine transform that implements shearing (distorting) an
object about its centre. The given object must be transformable and must
implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
phi : `float`
The angle of shearing in the X direction.
psi : `float`
The angle of shearing in the Y direction.
degrees : `bool`, optional
If ``True``, then phi and psi are interpreted as degrees. If ``False``
they are interpreted as radians.
Returns
-------
transform : :map:`Affine`
An affine transform that implements the shearing.
Raises
------
ValueError
Shearing can only be applied on 2D objects
"""
if obj.n_dims != 2:
raise ValueError("Shearing is currently only supported for 2D objects")
s = Affine.init_from_2d_shear(phi, psi, degrees=degrees)
return transform_about_centre(obj, s)
| patricksnape/menpo | menpo/transform/compositions.py | Python | bsd-3-clause | 4,180 |
#!/usr/bin/env python
import boto.ec2.autoscale
import boto.ec2.cloudwatch
def monitor(app):
return True
def init(app):
regions = ['us-east-1']
cloudwatch = {}
for r in regions:
cloudwatch[r] = boto.ec2.cloudwatch.connect_to_region(r)
| yadudoc/cloud_kotta | theWhip/whip.py | Python | apache-2.0 | 268 |
input = """
a | b.
a? %, not b ?
"""
output = """
a | b.
a? %, not b ?
"""
| veltri/DLV2 | tests/parser/query.22.test.py | Python | apache-2.0 | 83 |
#!/usr/bin/env python
# Remove .egg-info directory if it exists, to avoid dependency problems with
# partially-installed packages (20160119/dphiffer)
import os, sys
from shutil import rmtree
cwd = os.path.dirname(os.path.realpath(sys.argv[0]))
egg_info = cwd + "/mapzen.whosonfirst.pip.utils.egg-info"
if os.path.exists(egg_info):
rmtree(egg_info)
from setuptools import setup, find_packages
packages = find_packages()
desc = open("README.md").read(),
version = open("VERSION").read()
setup(
name='mapzen.whosonfirst.pip.utils',
namespace_packages=['mapzen', 'mapzen.whosonfirst'],
version=version,
description='Python utility methods for making Who\'s On First documents play nicely with the go-whosonfirst-pip server',
author='Mapzen',
url='https://github.com/mapzen/py-mapzen-whosonfirst-pip-utils',
install_requires=[
'mapzen.whosonfirst.pip>=0.04',
'mapzen.whosonfirst.placetypes>=0.11',
'shapely',
],
dependency_links=[
'https://github.com/whosonfirst/py-mapzen-whosonfirst-pip/tarball/master#egg=mapzen.whosonfirst.pip-0.04',
'https://github.com/whosonfirst/py-mapzen-whosonfirst-placetypes/tarball/master#egg=mapzen.whosonfirst.placetypes-0.11',
],
packages=packages,
scripts=[
],
download_url='https://github.com/mapzen/py-mapzen-whosonfirst-pip-utils/releases/tag/' + version,
license='BSD')
| whosonfirst/py-mapzen-whosonfirst-pip-utils | setup.py | Python | bsd-3-clause | 1,676 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def copy_emails_to_email_list(apps, schema_editor):
BillingContactInfo = apps.get_model('accounting', 'BillingContactInfo')
for contact_info in BillingContactInfo.objects.all():
if contact_info.emails:
contact_info.email_list = contact_info.emails.split(',')
else:
contact_info.email_list = []
contact_info.save()
def copy_email_list_to_emails(apps, schema_editor):
BillingContactInfo = apps.get_model('accounting', 'BillingContactInfo')
for contact_info in BillingContactInfo.objects.all():
if contact_info.email_list:
contact_info.emails = ','.join(contact_info.email_list)
contact_info.save()
class Migration(migrations.Migration):
dependencies = [
('accounting', '0014_billingcontactinfo_email_list'),
]
operations = [
HqRunPython(copy_emails_to_email_list, reverse_code=copy_email_list_to_emails),
]
| qedsoftware/commcare-hq | corehq/apps/accounting/migrations/0015_datamigration_email_list.py | Python | bsd-3-clause | 1,091 |
# Copyright 2016 Susan Bennett, David Mitchell, Jim Nicholls
#
# This file is part of AutoHolds.
#
# AutoHolds is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AutoHolds is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AutoHolds. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
class Log(models.Model):
log_created_at = models.DateTimeField(auto_now_add=True, editable=False)
log_updated_at = models.DateTimeField(auto_now=True, editable=False)
log_notes = models.TextField(editable=False)
def append_log_note(self, log_note):
if self.log_notes:
self.log_notes = self.log_notes + '\n' + log_note
else:
self.log_notes = log_note
class Meta:
abstract = True
class RunLog(Log):
started_at = models.DateTimeField(editable=False)
ended_at = models.DateTimeField(null=True, editable=False)
successful = models.BooleanField(default=False, editable=False)
num_bibs_found = models.IntegerField(default=0, editable=False)
first_bib_record_number = models.IntegerField(null=True, editable=False)
first_bib_created_at = models.DateTimeField(null=True, editable=False)
last_bib_record_number = models.IntegerField(null=True, editable=False)
last_bib_created_at = models.DateTimeField(null=True, editable=False)
class Meta(Log.Meta):
pass
class BibLog(Log):
run_log = models.ForeignKey(RunLog, models.CASCADE, editable=False, related_name='bibs_found')
bib_record_number = models.IntegerField(editable=False)
bib_created_at = models.DateTimeField(editable=False)
author = models.CharField(blank=True, max_length=255, editable=False)
format = models.CharField(blank=True, max_length=1, editable=False)
language = models.CharField(blank=True, max_length=3, editable=False)
num_registrations_found = models.IntegerField(default=0, editable=False)
class Meta(Log.Meta):
pass
class HoldLog(Log):
bib_log = models.ForeignKey(BibLog, models.CASCADE, editable=False, related_name='holds_placed')
patron_record_number = models.IntegerField(editable=False)
pickup_location = models.CharField(blank=True, max_length=5, editable=False)
successful = models.BooleanField(default=False, editable=False)
class Meta(Log.Meta):
pass
| SydneyUniLibrary/auto-holds | staff/models.py | Python | gpl-3.0 | 2,771 |
from distutils.core import setup
setup(
name='metwit-weather',
version='0.1.0',
packages=[
'metwit',
],
description='Metwit weather API client library',
author='Davide Rizzo',
author_email='[email protected]',
url='http://github.com/metwit/metwit-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
]
)
| metwit/metwit-python | setup.py | Python | bsd-3-clause | 598 |
"""Test IAM Policy templates are valid JSON."""
import json
import jinja2
import pytest
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([str(LOCAL_TEMPLATES)]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
@pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
def test_all_iam_templates(template_name):
"""Verify all IAM templates render as proper JSON."""
*_, service_json = template_name.split('/')
service, *_ = service_json.split('.')
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
try:
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
except json.decoder.JSONDecodeError:
pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
assert isinstance(rendered, list)
| gogoair/foremast | tests/iam/test_iam_valid_json.py | Python | apache-2.0 | 1,609 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import test_child_tasks
from . import test_sale_project
| jeremiahyan/odoo | addons/sale_project/tests/__init__.py | Python | gpl-3.0 | 163 |
# -*- coding: utf-8 -*-
"""
===============================================================================
Crystal structure classes (:mod:`sknano.core.crystallography._xtal_structures`)
===============================================================================
.. currentmodule:: sknano.core.crystallography._xtal_structures
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
from functools import total_ordering
import numpy as np
from sknano.core import BaseClass
from sknano.core.atoms import BasisAtom, BasisAtoms, StructureAtoms
from sknano.core.refdata import lattice_parameters as lattparams
from ._extras import pymatgen_structure, supercell_lattice_points
from ._xtal_cells import CrystalCell, UnitCell, SuperCell
from ._xtal_lattices import Crystal2DLattice, Crystal3DLattice
__all__ = ['BaseStructureMixin', 'BaseStructure', 'StructureData',
'CrystalStructureBase', 'Crystal2DStructure',
'CrystalStructure', 'Crystal3DStructure',
'CaesiumChlorideStructure', 'CsClStructure',
'DiamondStructure',
'RocksaltStructure', 'RockSaltStructure', 'NaClStructure',
'SphaleriteStructure', 'ZincblendeStructure', 'ZincBlendeStructure',
'BCCStructure', 'FCCStructure', 'Iron', 'Copper', 'Gold',
'CubicClosePackedStructure', 'CCPStructure',
'HexagonalClosePackedStructure', 'HCPStructure',
'HexagonalStructure', 'AlphaQuartz', 'MoS2']
class BaseStructureMixin:
"""Mixin class for crystal structures."""
# def __deepcopy__(self, memo):
# from copy import deepcopy
# cp = self.__class__()
# memo[id(self)] = cp
# for attr in dir(self):
# if not attr.startswith('_'):
# setattr(cp, attr, deepcopy(getattr(self, attr), memo))
# return cp
def __getattr__(self, name):
try:
return getattr(self.atoms, name)
except AttributeError:
try:
return getattr(self.crystal_cell, name)
except AttributeError:
return super().__getattr__(name)
@property
def atoms(self):
"""Structure :class:`~sknano.core.atoms.StructureAtoms`."""
return self._atoms
@property
def crystal_cell(self):
"""Structure :class:`~sknano.core.crystallography.CrystalCell`."""
return self._crystal_cell
@crystal_cell.setter
def crystal_cell(self, value):
self._crystal_cell = value
@property
def basis(self):
"""Structure :class:`~sknano.core.atoms.BasisAtoms`."""
return self.crystal_cell.basis
@basis.setter
def basis(self, value):
self.crystal_cell.basis = value
@property
def lattice(self):
"""Structure :class:`~sknano.core.crystallography.Crystal3DLattice`."""
return self.crystal_cell.lattice
@lattice.setter
def lattice(self, value):
self.crystal_cell.lattice = value
self.atoms.lattice = self.crystal_cell.lattice
@property
def scaling_matrix(self):
""":attr:`CrystalCell.scaling_matrix`."""
return self.crystal_cell.scaling_matrix
@scaling_matrix.setter
def scaling_matrix(self, value):
self.crystal_cell.scaling_matrix = value
@property
def unit_cell(self):
"""Structure :class:`~sknano.core.crystallography.UnitCell`."""
return self.crystal_cell.unit_cell
@unit_cell.setter
def unit_cell(self, value):
self.crystal_cell.unit_cell = value
@property
def structure(self):
"""Pointer to self."""
return self
@property
def structure_data(self):
"""Alias for :attr:`BaseStructureMixin.structure`."""
return self
def clear(self):
"""Clear list of :attr:`BaseStructureMixin.atoms`."""
self.atoms.clear()
def make_supercell(self, scaling_matrix, wrap_coords=False):
"""Make supercell."""
return SuperCell(self.unit_cell, scaling_matrix,
wrap_coords=wrap_coords)
def rotate(self, **kwargs):
"""Rotate crystal cell lattice, basis, and unit cell."""
self.crystal_cell.rotate(**kwargs)
self.atoms.rotate(**kwargs)
def translate(self, t, fix_anchor_points=True):
"""Translate crystal cell basis."""
self.crystal_cell.translate(t, fix_anchor_points=fix_anchor_points)
self.atoms.translate(t, fix_anchor_points=fix_anchor_points)
def transform_lattice(self, scaling_matrix, wrap_coords=False, pbc=None):
if self.lattice is None:
return
self.lattice = self.lattice.__class__(
cell_matrix=np.asmatrix(scaling_matrix) * self.lattice.matrix)
if wrap_coords:
self.crystal_cell.basis.wrap_coords(pbc=pbc)
self.atoms.wrap_coords(pbc=pbc)
# tvecs = \
# np.asarray(
# np.asmatrix(supercell_lattice_points(scaling_matrix)) *
# self.lattice.matrix)
# if self.crystal_cell.basis is not None:
# basis = self.crystal_cell.basis[:]
# self.crystal_cell.basis = BasisAtoms()
# for atom in basis:
# xs, ys, zs = \
# self.lattice.cartesian_to_fractional(atom.r)
# if wrap_coords:
# xs, ys, zs = \
# self.lattice.wrap_fractional_coordinate([xs, ys, zs])
# self.crystal_cell.basis.append(
# BasisAtom(atom.element, lattice=self.lattice,
# xs=xs, ys=ys, zs=zs))
def read_data(self, *args, **kwargs):
from sknano.io import DATAReader
return DATAReader(*args, **kwargs)
def read_dump(self, *args, **kwargs):
from sknano.io import DUMPReader
return DUMPReader(*args, **kwargs)
def read_xyz(self, *args, **kwargs):
from sknano.io import XYZReader
return XYZReader.read(*args, **kwargs)
def write_data(self, **kwargs):
from sknano.io import DATAWriter
DATAWriter.write(**kwargs)
def write_dump(self, **kwargs):
from sknano.io import DUMPWriter
DUMPWriter.write(**kwargs)
def write_xyz(self, **kwargs):
from sknano.io import XYZWriter
XYZWriter.write(**kwargs)
class BaseStructure(BaseStructureMixin):
"""Base structure class for structure data."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._atoms = StructureAtoms()
self._crystal_cell = CrystalCell()
StructureData = BaseStructure
@total_ordering
class CrystalStructureBase(BaseStructure, BaseClass):
"""Base class for abstract representions of crystal structures.
Parameters
----------
lattice : :class:`~sknano.core.crystallography.LatticeBase` sub-class
basis : {:class:`~python:list`, :class:`~sknano.core.atoms.BasisAtoms`}
coords : {:class:`~python:list`}, optional
cartesian : {:class:`~python:bool`}, optional
scaling_matrix : {:class:`~python:int`, :class:`~python:list`}, optional
"""
def __init__(self, lattice=None, basis=None, coords=None,
cartesian=False, scaling_matrix=None, **kwargs):
super().__init__(**kwargs)
self.unit_cell = UnitCell(lattice=lattice, basis=basis,
coords=coords, cartesian=cartesian)
self.scaling_matrix = scaling_matrix
self.fmtstr = self.unit_cell.fmtstr + \
", scaling_matrix={scaling_matrix!r}"
def __dir__(self):
return dir(self.crystal_cell)
def __eq__(self, other):
if isinstance(other, CrystalStructureBase):
return self is other or self.crystal_cell == other.crystal_cell
def __lt__(self, other):
if isinstance(other, CrystalStructureBase):
return self.crystal_cell < other.crystal_cell
def todict(self):
attrdict = self.unit_cell.todict()
attrdict.update(dict(scaling_matrix=self.scaling_matrix.tolist()))
return attrdict
class Crystal2DStructure(CrystalStructureBase):
"""Base class for 2D crystal structures.
.. warning:: The implementation of this class is not complete.
Parameters
----------
lattice : :class:`~sknano.core.crystallography.LatticeBase` sub-class
basis : {:class:`~python:list`, :class:`~sknano.core.atoms.BasisAtoms`}
coords : {:class:`~python:list`}, optional
cartesian : {:class:`~python:bool`}, optional
scaling_matrix : {:class:`~python:int`, :class:`~python:list`}, optional
structure : `Crystal3DStructure`, optional
"""
def __init__(self, lattice=None, basis=None, coords=None, cartesian=False,
scaling_matrix=None, structure=None, **kwargs):
if structure is not None:
lattice = structure.lattice
basis = structure.basis
if not isinstance(lattice, Crystal2DLattice):
lattice = Crystal2DLattice(cell_matrix=lattice)
super().__init__(lattice=lattice, basis=basis, coords=coords,
cartesian=cartesian, scaling_matrix=scaling_matrix,
**kwargs)
@classmethod
def from_pymatgen_structure(cls, structure):
atoms = BasisAtoms()
for site in structure.sites:
atoms.append(BasisAtom(element=site.specie.symbol,
x=site.x, y=site.y, z=site.z))
return cls(lattice=Crystal2DLattice(
cell_matrix=structure.lattice.matrix), basis=atoms)
@classmethod
def from_spacegroup(cls, sg, lattice=None, basis=None, coords=None,
**kwargs):
"""Return a `Crystal2DStructure` from a spacegroup number/symbol.
Parameters
----------
sg : :class:`~python:int` or :class:`~python:str`
lattice : :class:`Crystal2DLattice`
basis : :class:`~python:list` of :class:`~python:str`\ s
coords : :class:`~python:list` of :class:`~python:float`\ s
Returns
-------
:class:`Crystal2DStructure`
Notes
-----
Under the hood this method first s a pymatgen
:class:`~pymatgen:Structure`
See Also
--------
pymatgen_structure
"""
if not isinstance(basis, list):
basis = [basis]
if len(basis) != len(coords):
if isinstance(coords, list) and len(coords) != 0 and \
isinstance(coords[0], (int, float)):
coords = [coords]
cls.from_spacegroup(sg, lattice, basis, coords)
structure = \
pymatgen_structure(sg, lattice.cell_matrix, basis, coords,
classmethod='from_spacegroup')
return cls.from_pymatgen_structure(structure)
class Crystal3DStructure(CrystalStructureBase):
"""Base class for 3D crystal structures.
Parameters
----------
lattice : :class:`~sknano.core.crystallography.LatticeBase` sub-class
basis : {:class:`~python:list`, :class:`~sknano.core.atoms.BasisAtoms`}
coords : {:class:`~python:list`}, optional
cartesian : {:class:`~python:bool`}, optional
scaling_matrix : {:class:`~python:int`, :class:`~python:list`}, optional
structure : `Crystal3DStructure`, optional
"""
def __init__(self, lattice=None, basis=None, coords=None, cartesian=False,
scaling_matrix=None, structure=None, **kwargs):
if structure is not None:
lattice = structure.lattice
basis = structure.basis
if not isinstance(lattice, Crystal3DLattice):
lattice = Crystal3DLattice(cell_matrix=lattice)
super().__init__(lattice=lattice, basis=basis, coords=coords,
cartesian=cartesian, scaling_matrix=scaling_matrix,
**kwargs)
@classmethod
def from_pymatgen_structure(cls, structure):
"""Return a `Crystal3DStructure` from a \
:class:`pymatgen:pymatgen.core.Structure`.
Parameters
----------
structure : :class:`pymatgen:pymatgen.core.Structure`
Returns
-------
:class:`Crystal3DStructure`
"""
atoms = BasisAtoms()
for site in structure.sites:
atoms.append(BasisAtom(site.specie.symbol,
x=site.x, y=site.y, z=site.z))
return cls(lattice=Crystal3DLattice(
cell_matrix=structure.lattice.matrix), basis=atoms)
@classmethod
def from_spacegroup(cls, sg, lattice=None, basis=None, coords=None,
**kwargs):
"""Return a `Crystal3DStructure` from a spacegroup number/symbol.
Parameters
----------
sg : :class:`~python:int` or :class:`~python:str`
lattice : :class:`Crystal3DLattice`
basis : :class:`~python:list` of :class:`~python:str`\ s
coords : :class:`~python:list` of :class:`~python:float`\ s
Returns
-------
:class:`Crystal3DStructure`
Notes
-----
Under the hood this method generates a
:class:`~pymatgen:pymatgen.core.Structure`
See Also
--------
pymatgen_structure
"""
if not isinstance(basis, list):
basis = [basis]
if len(basis) != len(coords):
if isinstance(coords, list) and len(coords) != 0 and \
isinstance(coords[0], (int, float)):
coords = [coords]
cls.from_spacegroup(sg, lattice=lattice, basis=basis,
coords=coords, **kwargs)
structure = \
pymatgen_structure(sg, lattice.cell_matrix, basis, coords,
classmethod='from_spacegroup')
return cls.from_pymatgen_structure(structure, **kwargs)
CrystalStructure = Crystal3DStructure
class MoS2(Crystal3DStructure):
"""Molybdenum disulphide structure class."""
def __init__(self, a=lattparams['molybdenum_disulphide']['a'],
c=lattparams['molybdenum_disulphide']['c'],
basis=['Mo', 'S'], **kwargs):
molybdenum_disulphide = \
pymatgen_structure(194,
Crystal3DLattice.hexagonal(a, c).cell_matrix,
basis, [[1/3, 2/3, 1/4], [1/3, 2/3, 0.621]],
classmethod='from_spacegroup')
molybdenum_disulphide = \
Crystal3DStructure.from_pymatgen_structure(molybdenum_disulphide)
super().__init__(structure=molybdenum_disulphide, **kwargs)
class CaesiumChlorideStructure(Crystal3DStructure):
"""Abstract representation of caesium chloride structure."""
def __init__(self, a=lattparams['caesium_chloride'], basis=['Cs', 'Cl'],
**kwargs):
caesium_chloride = \
pymatgen_structure(221, Crystal3DLattice.cubic(a).cell_matrix,
basis, [[0, 0, 0], [0.5, 0.5, 0.5]],
classmethod='from_spacegroup')
caesium_chloride = \
Crystal3DStructure.from_pymatgen_structure(caesium_chloride)
super().__init__(structure=caesium_chloride, **kwargs)
CsClStructure = CaesiumChlorideStructure
class DiamondStructure(Crystal3DStructure):
"""Abstract representation of diamond structure."""
def __init__(self, a=lattparams['diamond'], basis=['C'], **kwargs):
diamond = \
pymatgen_structure(227, Crystal3DLattice.cubic(a).cell_matrix,
basis, [[0, 0, 0]],
classmethod='from_spacegroup')
diamond = \
Crystal3DStructure.from_pymatgen_structure(diamond)
super().__init__(structure=diamond, **kwargs)
class RocksaltStructure(Crystal3DStructure):
"""Abstract representation of caesium chloride structure."""
def __init__(self, a=lattparams['rock_salt'], basis=['Na', 'Cl'],
**kwargs):
rock_salt = \
pymatgen_structure(225, Crystal3DLattice.cubic(a).cell_matrix,
basis, [[0, 0, 0], [0.5, 0.5, 0.5]],
classmethod='from_spacegroup')
rock_salt = \
Crystal3DStructure.from_pymatgen_structure(rock_salt)
super().__init__(structure=rock_salt, **kwargs)
NaClStructure = RockSaltStructure = RocksaltStructure
class ZincblendeStructure(Crystal3DStructure):
"""Abstract representation of caesium chloride structure."""
def __init__(self, a=lattparams['zincblende'], basis=['Zn', 'Fe'],
**kwargs):
zincblende = \
pymatgen_structure(216, Crystal3DLattice.cubic(a).cell_matrix,
basis, [[0, 0, 0], [0.25, 0.25, 0.25]],
classmethod='from_spacegroup')
zincblende = \
Crystal3DStructure.from_pymatgen_structure(zincblende)
super().__init__(structure=zincblende, **kwargs)
SphaleriteStructure = ZincBlendeStructure = ZincblendeStructure
class CubicStructure(Crystal3DStructure):
"""Base class for a cubic `Crystal3DStructure`.
Parameters
----------
centering : :class:`~python:str`
lattice : :class:`Crystal3DLattice`, optional
a : float, optional
basis : :class:`~python:list`, optional
coords : :class:`~python:list`, optional
scaling_matrix : :class:`~python:int` or :class:`~python:list`, optional
structure : :class:`Crystal3DStructure`, optional
"""
def __init__(self, *args, a=None, centering=None, lattice=None,
basis=None, coords=None, scaling_matrix=None, structure=None,
**kwargs):
if len(args) == 1 and basis is None:
basis = args[0]
self.centering = centering
if lattice is None and structure is None:
lattice = \
Crystal3DLattice.cubic(
CubicStructure.get_lattice_parameter(
a=a, basis=basis, centering=centering))
super().__init__(lattice=lattice, basis=basis, coords=coords,
scaling_matrix=scaling_matrix, structure=structure,
**kwargs)
@classmethod
def get_lattice_parameter(cls, a=None, basis=None, centering=None):
if a is not None:
return a
if basis is None or centering is None:
raise ValueError('\nBoth the `basis` and `centering` kwargs '
'are required')
elif isinstance(basis, (tuple, list)):
if len(basis) != 1:
raise ValueError('Expected a single element basis')
else:
basis = basis[0]
if not isinstance(basis, str):
raise ValueError('Expected `str` object for basis')
if basis not in lattparams['cubic'][centering]:
raise ValueError('Specify lattice constant `a` for '
'given basis {}'.format(basis))
return lattparams['cubic'][centering][basis]
@classmethod
def from_spacegroup(cls, *args, lattice=None, basis=None, coords=None,
a=None, centering=None, **kwargs):
if len(args) == 2 and basis is None:
sg, basis = args
if len(args) == 1:
sg = args[0]
if lattice is None:
lattice = \
Crystal3DLattice.cubic(
CubicStructure.get_lattice_parameter(a=a, basis=basis,
centering=centering))
if coords is None:
coords = [[0, 0, 0]]
return super().from_spacegroup(sg, lattice=lattice, basis=basis,
coords=coords)
class BCCStructure(CubicStructure):
"""BCC structure class."""
def __init__(self, *args, **kwargs):
kwargs['centering'] = 'BCC'
structure = CubicStructure.from_spacegroup(229, *args, **kwargs)
super().__init__(*args, structure=structure, **kwargs)
class FCCStructure(CubicStructure):
"""FCC structure class."""
def __init__(self, *args, **kwargs):
kwargs['centering'] = 'FCC'
structure = CubicStructure.from_spacegroup(225, *args, **kwargs)
super().__init__(*args, structure=structure, **kwargs)
class Iron(BCCStructure):
"""Iron structure."""
def __init__(self, **kwargs):
kwargs['basis'] = 'Fe'
super().__init__(**kwargs)
class Gold(FCCStructure):
"""Gold structure."""
def __init__(self, **kwargs):
kwargs['basis'] = 'Au'
super().__init__(**kwargs)
class Copper(FCCStructure):
"""Copper structure."""
def __init__(self, **kwargs):
kwargs['basis'] = 'Cu'
super().__init__(**kwargs)
class HexagonalClosePackedStructure(Crystal3DStructure):
"""Abstract representation of hexagonal close-packed structure."""
pass
HCPStructure = HexagonalClosePackedStructure
class CubicClosePackedStructure(Crystal3DStructure):
"""Abstract representation of cubic close-packed structure."""
pass
CCPStructure = CubicClosePackedStructure
class HexagonalStructure(Crystal3DStructure):
@classmethod
def from_spacegroup(cls, sg, a, c, basis, coords):
lattice = Crystal3DLattice.hexagonal(a, c)
return super().from_spacegroup(sg, lattice=lattice, basis=basis,
coords=coords)
class AlphaQuartz(HexagonalStructure):
"""Alpha quartz structure class."""
def __init__(self, a=lattparams['alpha_quartz']['a'],
c=lattparams['alpha_quartz']['c'], **kwargs):
lattice = Crystal3DLattice.hexagonal(a, c)
basis = BasisAtoms(3 * ["Si"] + 6 * ["O"])
coords = [[0.4697, 0.0000, 0.0000],
[0.0000, 0.4697, 0.6667],
[0.5305, 0.5303, 0.3333],
[0.4133, 0.2672, 0.1188],
[0.2672, 0.4133, 0.5479],
[0.7328, 0.1461, 0.7855],
[0.5867, 0.8539, 0.2145],
[0.8539, 0.5867, 0.4521],
[0.1461, 0.7328, 0.8812]]
alpha_quartz = pymatgen_structure(lattice.cell_matrix,
basis.symbols, coords)
alpha_quartz = \
Crystal3DStructure.from_pymatgen_structure(alpha_quartz)
# alpha_quartz = \
# HexagonalStructure.from_spacegroup(154, a, c, ["Si", "O"],
# [[0.4697, 0.0000, 0.0000],
# [0.4135, 0.2669, 0.1191]],
# scaling_matrix=scaling_matrix)
super().__init__(structure=alpha_quartz, **kwargs)
| scikit-nano/scikit-nano | sknano/core/crystallography/_xtal_structures.py | Python | bsd-2-clause | 23,024 |
'''
Test Exception handling for Create VM
@author: Quarkonics
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.primarystorage_operations as ps_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_snapshot_header
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import apibinding.api_actions as api_actions
import apibinding.inventory as inventory
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.net_operations as net_ops
import uuid
import os
import time
import MySQLdb
CHECK_BITS = "/nfsprimarystorage/checkbits"
DOWNLOAD_IMAGE = "/nfsprimarystorage/imagestore/download"
NFS_SFTP_CREATE_VOLUME_FROM_TEMPLATE = "/nfsprimarystorage/sftp/createvolumefromtemplate"
FLAT_DHCP_PREPARE = "/flatnetworkprovider/dhcp/prepare"
FLAT_DHCP_APPLY = "/flatnetworkprovider/dhcp/apply"
VM_START = "/vm/start"
FLAT_DHCP_RELEASE = "/flatnetworkprovider/dhcp/release"
NFS_DELETE = "/nfsprimarystorage/delete"
_config_ = {
'timeout' : 720,
'noparallel' : True,
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
agent_url = None
agent_url2 = None
vm = None
case_flavor = dict(normal= dict(agent_url=None),
check_bits= dict(agent_url=CHECK_BITS),
download_image= dict(agent_url=DOWNLOAD_IMAGE),
create_volume= dict(agent_url=NFS_SFTP_CREATE_VOLUME_FROM_TEMPLATE),
dhcp_prepare= dict(agent_url=FLAT_DHCP_PREPARE),
dhcp_apply= dict(agent_url=FLAT_DHCP_APPLY),
vm_start= dict(agent_url=VM_START),
dhcp_release= dict(agent_url=FLAT_DHCP_RELEASE),
nfs_delete= dict(agent_url=NFS_DELETE),
)
db_tables_white_list = ['VmInstanceSequenceNumberVO', 'TaskProgressVO', 'RootVolumeUsageVO', 'ImageCacheVO', 'VolumeEO', 'SecurityGroupFailureHostVO']
def test():
global agent_url
global agent_url2
global vm
flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
agent_url = flavor['agent_url']
script = '''
{ entity ->
throw new Exception("shuang")
}
'''
deploy_operations.remove_all_simulator_agent_script()
l3net_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName3')).uuid
is_flat = test_lib.lib_get_flat_dhcp_by_l3_uuid(l3net_uuid)
if is_flat:
try:
dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid)
except:
dhcp_ip = None
else:
dhcp_ip = None
imagestore = test_lib.lib_get_image_store_backup_storage()
if imagestore == None:
test_util.test_skip('Required imagestore to test')
cond = res_ops.gen_query_conditions('type', '=', 'NFS')
local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
if len(local_pss) == 0:
test_util.test_skip('Required nfs ps to test')
ps_uuid = local_pss[0].uuid
bs_uuid = imagestore.uuid
image_option = test_util.ImageOption()
image_option.set_name('fake_image')
image_option.set_description('fake image')
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_backup_storage_uuid_list([bs_uuid])
image_option.url = "http://fake/fake.raw"
image = img_ops.add_image(image_option)
image_uuid = image.uuid
vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid)
vm.destroy()
vm.expunge()
if agent_url != None:
deploy_operations.deploy_simulator_agent_script(agent_url, script)
if agent_url == FLAT_DHCP_RELEASE or agent_url == NFS_DELETE:
agent_url2 = NFS_SFTP_CREATE_VOLUME_FROM_TEMPLATE
deploy_operations.deploy_simulator_agent_script(agent_url2, script)
saved_db_stats = test_stub.get_db_stats(dhcp_ip)
create_vm_failure = False
try:
vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid)
except:
create_vm_failure = True
if agent_url != None and not create_vm_failure:
test_util.test_fail("Expect failure during creating VM while it passed. Test Exception handling for Create VM FAIL")
if agent_url != None:
if is_flat:
try:
dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid)
except:
dhcp_ip = None
else:
dhcp_ip = None
saved_db_stats2 = test_stub.get_db_stats(dhcp_ip)
test_stub.compare_db_stats(saved_db_stats, saved_db_stats2, db_tables_white_list)
test_util.test_pass("Test Exception handling for Create VM PASS")
def env_recover():
global vm
if vm != None:
vm.destroy()
vm.expunge()
deploy_operations.remove_all_simulator_agent_script()
| zstackio/zstack-woodpecker | integrationtest/vm/simulator/negative/test_imagestore_nfs_create_vm.py | Python | apache-2.0 | 5,242 |
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
import re
import sys
import math
import gevent
from gevent import monkey; monkey.patch_all()
import requests
import utils
from settings import BASEURL, DIR, PAGE_SIZE
def convert(assembly_id):
baseurl = '%sAGE_FROM=%d&AGE_TO=%d' % (BASEURL['list'], assembly_id, assembly_id)
directory = '%s/%s' % (DIR['list'], assembly_id)
return baseurl, directory
def get_npages(assembly_id):
url, directory = convert(assembly_id)
utils.check_dir(directory)
fn = '%s/tmp.html' % directory
utils.get_webpage(url, fn)
page = utils.read_webpage(fn)
m = re.search(u'총(.+)건', page.xpath('//span[@class="text3"]/text()')[0])
nbills = int(m.group(1))
npages = int(math.ceil(nbills/float(PAGE_SIZE)))
print 'Total %d bills, %d pages to %s' % (nbills, npages, directory)
return npages
def get_html(assembly_id, npages):
def get_page(baseurl, page, directory, npages):
try:
url = baseurl + '&PAGE=%d&PAGE_SIZE=%d' % (page, PAGE_SIZE)
pn = npages - page + 1
fn = '%s/%d.html' % (directory, pn)
is_first = True
while is_first or 'TEXTAREA ID="MSG" STYLE="display:none"' in doc:
doc = utils.get_webpage_text(url)
is_first = False
with open(fn, 'w') as f:
f.write(doc)
sys.stdout.write('%s\t' % pn)
sys.stdout.flush()
except (requests.exceptions.RequestException, IOError) as e:
print '\nFailed to get %s due to %s' % (fn, e.__repr__)
baseurl, directory = convert(assembly_id)
utils.check_dir(directory)
#
print 'Downloading:'
jobs = [gevent.spawn(get_page, baseurl, page, directory, npages)\
for page in range(1, npages+1)]
gevent.joinall(jobs)
return npages
| lexifdev/crawlers | bills/meta/html.py | Python | agpl-3.0 | 1,875 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product(osv.osv):
_inherit = 'product.product'
_columns = {
'event_ok': fields.boolean('Event Subscription', help='Determine if a product needs to create automatically an event registration at the confirmation of a sales order line.'),
'event_type_id': fields.many2one('event.type', 'Type of Event', help='Select event types so when we use this product in sales order lines, it will filter events of this type only.'),
}
def onchange_event_ok(self, cr, uid, ids, event_ok, context=None):
return {'value': {'type': event_ok and 'service' or False}}
product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'event_id': fields.many2one('event.event', 'Event', help="Choose an event and it will automatically create a registration for this event."),
#those 2 fields are used for dynamic domains and filled by onchange
'event_type_id': fields.related('product_id','event_type_id', type='many2one', relation="event.type", string="Event Type"),
'event_ok': fields.related('product_id', 'event_ok', string='event_ok', type='boolean'),
}
def product_id_change(self, cr, uid, ids,
pricelist,
product, qty=0,
uom=False,
qty_uos=0,
uos=False,
name='',
partner_id=False,
lang=False,
update_tax=True,
date_order=False,
packaging=False,
fiscal_position=False,
flag=False, context=None):
"""
check product if event type
"""
res = super(sale_order_line,self).product_id_change(cr, uid, ids, pricelist, product, qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id, lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if product:
product_res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if product_res.event_ok:
res['value'].update(event_type_id=product_res.event_type_id.id,
event_ok=product_res.event_ok)
else:
res['value'].update(event_type_id=False,
event_ok=False)
return res
def button_confirm(self, cr, uid, ids, context=None):
'''
create registration with sales order
'''
registration_obj = self.pool.get('event.registration')
sale_obj = self.pool.get('sale.order')
for order_line in self.browse(cr, uid, ids, context=context):
if order_line.event_id.id:
dic = {
'name': order_line.order_id.partner_invoice_id.name,
'partner_id': order_line.order_id.partner_id.id,
'nb_register': int(order_line.product_uom_qty),
'email': order_line.order_id.partner_id.email,
'phone': order_line.order_id.partner_id.phone,
'origin': order_line.order_id.name,
'event_id': order_line.event_id.id,
}
registration_id = registration_obj.create(cr, uid, dic, context=context)
message = _("The registration %s has been created from the Sales Order %s.") % (registration_id, order_line.order_id.name)
registration_obj.message_post(cr, uid, [registration_id], body=message, context=context)
return super(sale_order_line, self).button_confirm(cr, uid, ids, context=context)
| jaggu303619/asylum | openerp/addons/event_sale/event_sale.py | Python | agpl-3.0 | 4,880 |
# Internal Modules
import blockbuster.config_services
import blockbuster.bb_dbconnector_factory as bb_dbinterface
# Process a 'Current Status' command
def current_status(request):
# Create an analytics record for the request
bb_dbinterface.DBConnectorInterfaceFactory()\
.create().add_analytics_record("Count", "Command-STATUS", blockbuster.config_services
.identify_service(request.servicenumber)[0])
# Get the list of people who are blocking you in from the database
active_blocks_as_blockee = bb_dbinterface.DBConnectorInterfaceFactory()\
.create().get_list_of_blocks_for_blockee(request.requestormobile)
# Work through the returned list and put them list of one dictionary object per block
blocks_as_blockee = []
for b in active_blocks_as_blockee:
c = {
"blocker": b[0],
"blockee": b[1],
"blocked_reg": b[2]
}
blocks_as_blockee.append(c)
# Get the list of people you are blocking in from the database
active_blocks_as_blocker = bb_dbinterface.DBConnectorInterfaceFactory()\
.create().get_list_of_blocks_for_blocker(request.requestormobile)
# Work through the returned list and put them in a list of one dictionary object per block
blocks_as_blocker = []
for b in active_blocks_as_blocker:
c = {
"blocker": b[0],
"blockee": b[1],
"blocked_reg": b[2]
}
blocks_as_blocker.append(c)
# Add both lists to a single status dictionary object
status_dict = {
'blockedBy': blocks_as_blockee,
'blocking': blocks_as_blocker
}
return status_dict
# Process a 'Block' command
def block(request):
pass
| mattstibbs/blockbuster-server | blockbuster/bb_command_processor.py | Python | mit | 1,771 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
__author__ = 'Yuji Ikeda'
class FCReorderer(object):
@staticmethod
def reorder_fc(fc, indices):
fc_reordered = np.full_like(fc, np.nan) # Initialized by np.nan to detect possible errors
for i1, j1 in enumerate(indices):
for i2, j2 in enumerate(indices):
fc_reordered[j1, j2] = fc[i1, i2]
return fc_reordered
| yuzie007/ph_analysis | ph_analysis/fc/fc_reorderer.py | Python | mit | 546 |
"""
Runs OCR on a given file.
"""
from os import system, listdir
from PIL import Image
from pytesseract import image_to_string
import editdistance
from constants import DATA_DIR
def classify(image, people_class, max_classify_distance=1, min_nonclassify_distance=3):
"""
Runs an OCR classifier on a given image file, drawing from a dictionary
"""
read = image_to_string(Image.open(image)).lower()
result = None
for person in people_class:
dist = editdistance.eval(person, read)
if dist <= max_classify_distance:
if result is not None:
return None
result = people_class[person]
elif max_classify_distance < dist <= min_nonclassify_distance:
return None
return result
def setup_ocr(raw_data, progress):
"""
Grabs names from a pdf to an image
"""
system("unzip {} -d {}/extract".format(raw_data, DATA_DIR))
base = DATA_DIR + "/extract/"
mainfolder = base + listdir(base)[0]
files = sorted(listdir(mainfolder))
p_bar = progress(len(files))
for index, path in enumerate(files):
p_bar.update(index)
fullpath = mainfolder + "/" + path
system("mkdir {}/ocr".format(DATA_DIR))
basic_format = r"pdftoppm -png -f 3 -l 3 -x 170 -y %s -W 900 -H 100 {} > {}/ocr/%s{}.png" \
.format(fullpath, DATA_DIR, index)
system(basic_format % (1030, "left"))
system(basic_format % (1115, "right"))
| kavigupta/61a-analysis | src/ocr.py | Python | gpl-3.0 | 1,475 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.