commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
9bbad92a8aec5e52781fab42d913156c8d62a451
Fix output
implementation/Python/lbm_simple/lbmFlowAroundCylinder.py
implementation/Python/lbm_simple/lbmFlowAroundCylinder.py
#!/usr/bin/env python3 # Copyright (C) 2015 Universite de Geneve, Switzerland # E-mail contact: [email protected] # # 2D flow around a cylinder # # Update by Adrien Python ([email protected]): # Slightly modified from original to handle different ordering for v/t variables from numpy import * import matplotlib.pyplot as plt from matplotlib import cm import sys import datetime ###### Outputs definitions ##################################################### out = sys.argv[1] outInterval = int(sys.argv[3]) outDir = sys.argv[4] outPre = sys.argv[5] ###### Flow definition ######################################################### maxIter = int(sys.argv[2]) # Total number of time iterations. Re = 220.0 # Reynolds number. nx, ny = 420, 180 # Numer of lattice nodes. ly = ny-1 # Height of the domain in lattice units. cx, cy, r = nx//4, ny//2, ny//9 # Coordinates of the cylinder. uLB = 0.04 # Velocity in lattice units. nulb = uLB*r/Re; # Viscoscity in lattice units. omega = 1 / (3*nulb+0.5); # Relaxation parameter. ###### Lattice Constants ####################################################### v = array([[ 1, 1], [ 1, 0], [ 1, -1], [ 0, 1], [ 0, 0], [ 0, -1], [-1, 1], [-1, 0], [-1, -1]]) t = array([ 1/36, 1/9, 1/36, 1/9, 4/9, 1/9, 1/36, 1/9, 1/36]) ###### Automaticaly defined Constants ########################################## def initcol(v0): return [f for f in range(9) if v[f][0] == v0] col1 = initcol(1) col2 = initcol(0) col3 = initcol(-1) def initopp(): opp = [None] * 9 for f in range(9): for g in range(9): if v[f][0] == -v[g][0] and v[f][1] == -v[g][1]: opp[f] = g break return opp opp = initopp(); ###### Function Definitions #################################################### def macroscopic(fin): rho = sum(fin, axis=0) u = zeros((2, nx, ny)) for i in range(9): u[0,:,:] += v[i,0] * fin[i,:,:] u[1,:,:] += v[i,1] * fin[i,:,:] u /= rho return rho, u def equilibrium(rho, u): # Equilibrium distribution function. usqr = 3/2 * (u[0]**2 + u[1]**2) feq = zeros((9,nx,ny)) for i in range(9): cu = 3 * (v[i,0]*u[0,:,:] + v[i,1]*u[1,:,:]) feq[i,:,:] = rho*t[i] * (1 + cu + 0.5*cu**2 - usqr) return feq ###### Setup: cylindrical obstacle and velocity inlet with perturbation ######## # Creation of a mask with 1/0 values, defining the shape of the obstacle. def obstacle_fun(x, y): return (x-cx)**2+(y-cy)**2<0 #(x-cx)**2+(y-cy)**2<r**2 obstacle = fromfunction(obstacle_fun, (nx,ny)) # Initial velocity profile: almost zero, with a slight perturbation to trigger # the instability. def inivel(d, x, y): return 0*y #(1-d) * uLB * (1 + 1e-4*sin(y/ly*2*pi)) vel = fromfunction(inivel, (2,nx,ny)) # Initialization of the populations at equilibrium with the given velocity. rho = ones((nx,ny)) rho[nx//2,ny//2] = 2 fin = equilibrium(rho, vel) seconds = 0 start = datetime.datetime.now() ###### Main time loop ########################################################## for iter in range(1, maxIter+1): # Right wall: outflow condition. # fin[col3,-1,:] = fin[col3,-2,:] # Compute macroscopic variables, density and velocity. rho, u = macroscopic(fin) # Left wall: inflow condition. # u[:,0,:] = vel[:,0,:] # rho[0,:] = 1/(1-u[0,0,:]) * ( sum(fin[col2,0,:], axis=0) + # 2*sum(fin[col3,0,:], axis=0) ) # Compute equilibrium. feq = equilibrium(rho, u) # for i in col1: # fin[i,0,:] = feq[i,0,:] + fin[opp[i],0,:] - feq[opp[i],0,:] # Collision step. fout = fin - omega * (fin - feq) # Bounce-back condition for obstacle. for i in range(9): fout[i, obstacle] = fin[opp[i], obstacle] # Streaming step. for i in range(9): fin[i,:,:] = roll( roll(fout[i,:,:], v[i,0], axis=0), v[i,1], axis=1 ) if (outInterval==0 and iter == maxIter) or (outInterval>0 and iter % outInterval == 0): seconds += (datetime.datetime.now() - start).total_seconds() # Visualization of the velocity. if out == 'IMG': plt.clf() plt.imshow(sqrt(u[0]**2+u[1]**2).transpose(), cmap=cm.Reds) plt.savefig("{0}/{1}{2}.png".format(outDir, outPre, iter)) if out == 'OUT': file = open("{0}/{1}{2}.out".format(outDir, outPre, iter), 'w') for x in range(nx): for y in range(ny): for f in range(9): file.write("[{0}, {1}, {2}] {3:64.60f}\n".format(x,y,f, fin[f,x,y])) start = datetime.datetime.now() print ("average lups:", nx*ny*maxIter/max(1,seconds))
Python
0.999999
@@ -4683,26 +4683,10 @@ te(%22 -%5B %7B0 -%7D, %7B1%7D, %7B2%7D%5D %7B3 :64. @@ -4704,15 +4704,8 @@ mat( -x,y,f, fin%5B
f10dcd822f72e86d0eb0071acf7d38e81cfe32da
Add a blank line
examples/mnist/mnist.py
examples/mnist/mnist.py
import logging import qnd import tensorflow as tf logging.getLogger().setLevel(logging.INFO) qnd.add_flag("use_eval_input_fn", action="store_true") qnd.add_flag("use_model_fn_ops", action="store_true") def read_file(filename_queue): _, serialized = tf.TFRecordReader().read(filename_queue) scalar_feature = lambda dtype: tf.FixedLenFeature([], dtype) features = tf.parse_single_example(serialized, { "image_raw": scalar_feature(tf.string), "label": scalar_feature(tf.int64), }) image = tf.decode_raw(features["image_raw"], tf.uint8) image.set_shape([28**2]) return tf.to_float(image) / 255 - 0.5, features["label"] def minimize(loss): return tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), 0.01, "Adam") def mnist_model(image, number): h = tf.contrib.layers.fully_connected(image, 200) h = tf.contrib.layers.fully_connected(h, 10, activation_fn=None) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(h, number)) predictions = tf.argmax(h, axis=1) train_op = minimize(loss) eval_metrics = { "accuracy": tf.reduce_mean(tf.to_float(tf.equal(predictions, number))) } if qnd.FLAGS.use_model_fn_ops: return tf.contrib.learn.estimators.model_fn.ModelFnOps( predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics) return predictions, loss, train_op, eval_metrics run = qnd.def_run() def main(): run(mnist_model, read_file, read_file if qnd.FLAGS.use_eval_input_fn else None) if __name__ == "__main__": main()
Python
1
@@ -1531,16 +1531,17 @@ etrics%0A%0A +%0A run = qn
35d6d780bddf72ab5ff216a7603bd89f980c8deb
Bump version
libgrabsite/__init__.py
libgrabsite/__init__.py
__version__ = '0.4.0'
Python
0
@@ -12,11 +12,11 @@ = '0.4. -0 +1 '%0A
e511da2cb7b73891f26b93e684d9fba80042f3cd
fix syntax
bin/redis_app_update.py
bin/redis_app_update.py
import json import sys import redis r_server = redis.StrictRedis('127.0.0.1', db=2) app_key = "apps" app_info = json.loads(r_server.get(app_key)) app_name = sys.argv[1] app_port = sys.argv[2] app_namespace = sys.argv[3] app_service_name = sys.argv[4] check = False for app in app_info: if app["name"] == app_name: check = True break if not check: element = '[{"name":"%s", "namespace":"%s", "service_name":"%s" "port":"%s"}]' % (app_name, app_namespace, app_service_name app_port) el = json.loads(element) app_info.extend(el) app_data = json.dumps(app_info) r_server.set(app_key, app_data) r_server.set("need_CSR", "1") r_server.bgsave() else: r_server.set("need_CSR", "0") r_server.bgsave()
Python
0.000023
@@ -491,16 +491,17 @@ ice_name +, app_po
dcd657cb66f6d1f4df78c23bffeae01bf8a88ec3
Revise edge case & memo return positions
alg_longest_common_subsequence.py
alg_longest_common_subsequence.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function def longest_common_subsequence_recur(s1, s2, n1, n2): """Longest common subsequence by recursion. Time complexity: O(2^n). Space complexity: O(n1*n2). """ if n1 < 0 or n2 < 0: # Base case. lcs = 0 elif s1[n1] == s2[n2]: lcs = 1 + longest_common_subsequence_recur( s1, s2, n1 - 1, n2 - 1) elif s1[n1] != s2[n2]: # Just for clarity. lcs1 = longest_common_subsequence_recur(s1, s2, n1 - 1, n2) lcs2 = longest_common_subsequence_recur(s1, s2, n1, n2 - 1) lcs = max(lcs1, lcs2) return lcs def _lcs_memo(s1, s2, n1, n2, M): if M[n1][n2]: return M[n1][n2] if n1 < 0 or n2 < 0: lcs = 0 elif s1[n1] == s2[n2]: lcs = 1 + _lcs_memo(s1, s2, n1 - 1, n2 - 1, M) elif s1[n1] != s2[n2]: lcs1 = _lcs_memo(s1, s2, n1 - 1, n2 - 1, M) lcs2 = _lcs_memo(s1, s2, n1, n2 - 1, M) lcs = max(lcs1, lcs2) M[n1][n2] = lcs return lcs def longest_common_subsequence_memo(s1, s2, n1, n2): """Longest common subsequence by memoization. Time complexity: O(n1*n2). Space complexity: O(n1*n2). """ M = [[0] * (n2 + 1) for _ in range(n1 + 1)] return _lcs_memo(s1, s2, n1, n2, M) def longest_common_subsequence_dp(s1, s2, n1, n2): """Longest common subsequence by dynamic programming. Time complexity: O(n1*n2). Space complexity: O(n1*n2). """ M = [[0] * (n2 + 1) for _ in range(n1 + 1)] for r in range(1, n1 + 1): for c in range(1, n2 + 1): if s1[r] == s2[c]: M[r][c] = 1 + M[r - 1][c - 1] elif s1[r] != s2[c]: lcs1 = M[r - 1][c] lcs2 = M[r][c - 1] M[r][c] = max(lcs1, lcs2) return M[-1][-1] def main(): import time s1 = 'BATD' s2 = 'ABACD' # LCD: 3 (BAD). n1 = len(s1) - 1 n2 = len(s2) - 1 start_time = time.time() print('LCD by recursion: {}'.format( longest_common_subsequence_recur(s1, s2, n1, n2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('LCD by memo: {}'.format( longest_common_subsequence_memo(s1, s2, n1, n2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('LCD by DP: {}'.format( longest_common_subsequence_memo(s1, s2, n1, n2))) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
Python
0.000002
@@ -319,38 +319,42 @@ se.%0A -lcs = +return 0%0A -el +%0A if s1%5Bn1%5D == @@ -722,25 +722,32 @@ %0A if -M%5Bn1%5D%5Bn2%5D +n1 %3C 0 or n2 %3C 0 :%0A @@ -759,66 +759,63 @@ urn -M%5Bn1%5D%5Bn2%5D%0A%0A if n1 %3C 0 or n2 %3C 0:%0A lcs = 0 +0%0A %0A if M%5Bn1%5D%5Bn2%5D:%0A return M%5Bn1%5D%5Bn2%5D%0A %0A -el if s
a1b6205497ac079da24201478d76f3435857dd7b
Add a default height for the traits view
examples/world_map.py
examples/world_map.py
#!/usr/bin/env python """ Displays a world map with locations plotted on top. Locations are expected to be tuples of latitude, longitude where West and South are expressed as negative values. - Mousewheel up and down zooms the plot in and out. - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom history". """ # Standard library imports import os.path import urllib # Major library imports import numpy # ETS imports from enthought.chaco.api import Plot, ArrayPlotData, ImageData from enthought.chaco.tools.api import ZoomTool from enthought.enable.component_editor import ComponentEditor from enthought.traits.api import HasTraits, Instance, Str from enthought.traits.ui.api import Item, View class WorldMapPlot(HasTraits): ### Public Traits ########################################################## # The plot which will be displayed plot = Instance(Plot) # The URL which points to the world map image to be downloaded image_url = Str("http://veimages.gsfc.nasa.gov/2433/land_shallow_topo_2048.jpg") ### Private Traits ######################################################### # The path to where the image exists on the filesystem image_path = Str() # The view traits_view = View(Item('plot', editor=ComponentEditor(), width=800, show_label=False), resizable=True) #--------------------------------------------------------------------------- # Public interface #--------------------------------------------------------------------------- def __init__(self, **kw): super(WorldMapPlot, self).__init__(**kw) self._download_map_image() image = ImageData.fromfile(self.image_path) # For now, the locations are hardcoded, though this can be changed # eassily to take command line args, read from a file, or by other # means austin_loc = (30.16, -97.44) locations_x = numpy.array([austin_loc[1]]) locations_y = numpy.array([austin_loc[0]]) # transform each of the locations to the image data space, including # moving the origin from bottom left to top left locations_x = (locations_x + 180) * image.data.shape[1]/360 locations_y = (locations_y*-1 + 90) * image.data.shape[0]/180 # Create the plott data, adding the image and the locations plot_data = ArrayPlotData() plot_data.set_data("imagedata", image._data) plot_data.set_data("locations_x", locations_x) plot_data.set_data("locations_y", locations_y) # Create the plot with the origin as top left, which matches # how the image data is aligned self.plot = Plot(plot_data, default_origin="top left") self.plot.img_plot('imagedata') # Plot the locations as a scatter plot to be overlayed on top # of the map loc_plot = self.plot.plot(('locations_x', 'locations_y'), type='scatter', size=3, color='yellow', marker='dot')[0] loc_plot.x_mapper.range.high = image.data.shape[1] loc_plot.x_mapper.range.low = 0 loc_plot.y_mapper.range.high = image.data.shape[0] loc_plot.y_mapper.range.low = -0 # set up any tools, in this case just the zoom tool zoom = ZoomTool(component=self.plot, tool_mode="box", always_on=False) self.plot.overlays.append(zoom) #--------------------------------------------------------------------------- # Protected interface #--------------------------------------------------------------------------- def _download_map_image(self): """ Downloads a map from the image_url attribute. This is done primarily to keep the redistributable Chaco package as small as possible """ example_dir = os.path.dirname(__file__) self.image_path = os.path.join(example_dir, 'data', os.path.split(self.image_url)[1]) if not os.path.exists(self.image_path): print "Downloading map image" urllib.urlretrieve(self.image_url, self.image_path) #=============================================================================== # demo object that is used by the demo.py application. #=============================================================================== demo = WorldMapPlot() if __name__ == "__main__": demo.configure_traits()
Python
0
@@ -1489,16 +1489,28 @@ dth=800, + height=400, show_la
525a14100be26a89185421657989616c3711eedc
make username/passvoid better findeable ;-)
examples/debiangraph.py
examples/debiangraph.py
#/usr/bin/env python3 """ example to build a file with all packages known to your system for a debian jessie: for i in $(ls /var/lib/apt/lists/*debian_dists_jessie_* |\ grep -v i386 |grep -v Release); do cat $i >> /tmp/allpackages; echo >> /tmp/allpackages; done All debian based distros have a set of files in /var/lib/apt/lists. In doubt create a filter for your distro Install needed Python modules: pip3 install pyArango pip3 install deb_pkg_tools """ import deb_pkg_tools from deb_pkg_tools.control import deb822_from_string from deb_pkg_tools.control import parse_control_fields from pyArango.connection import * from pyArango.database import * from pyArango.collection import * from pyArango.document import * from pyArango.query import * from pyArango.graph import * from pyArango.theExceptions import * # Configure your ArangoDB server connection here conn = Connection(arangoURL="http://localhost:8529", username="USERNAME", password="SECRET") db = None edgeCols = {} packagesCol = {} # we create our own database so we don't interfere with userdata: if not conn.hasDatabase("testdb"): db = conn.createDatabase("testdb") else: db = conn["testdb"] if not db.hasCollection('packages'): packagesCol = db.createCollection('Collection', name='packages') else: packagesCol = db.collections['packages'] def getEdgeCol(name): if not name in edgeCols: if not db.hasCollection(name): edgeCols[name] = db.createCollection(name=name, className='Edges') else: edgeCols[name] = db.collections[name] return edgeCols[name] def saveGraphDefinition(): graph_collection = db.collections["_graphs"] graph_defintion = { "_key": "debian_dependency_graph", "edgeDefinitions": [], "orphanCollections": [], } for collection in edgeCols.keys(): graph_defintion["edgeDefinitions"].append( {"collection": collection, "from": ["packages",], "to": ["packages",]}) graph_collection.createDocument(graph_defintion).save() def VersionedDependencyToDict(oneDep, hasAlternatives): return { 'name': oneDep.name, 'version': oneDep.version, 'operator': oneDep.operator, 'hasAlternatives': hasAlternatives } def DependencyToDict(oneDep, hasAlternatives): return { 'name': oneDep.name, 'hasAlternatives': hasAlternatives } def DependencySetToDict(dep, hasAlternatives): depset = [] for oneDep in dep.relationships: if isinstance(oneDep, deb_pkg_tools.deps.VersionedRelationship): depset.append(VersionedDependencyToDict(oneDep, hasAlternatives)) elif isinstance(oneDep, deb_pkg_tools.deps.AlternativeRelationship): depset.append(DependencySetToDict(oneDep, True)) elif isinstance(oneDep, deb_pkg_tools.deps.Relationship): depset.append(DependencyToDict(oneDep, hasAlternatives)) else: print("Unknown relationshitp: " + repr(oneDep)) return depset def PackageToDict(pkg): # packages aren't serializable by default, translate it: ret = {} for attribute in pkg.keys(): if isinstance(pkg[attribute], deb_pkg_tools.deps.RelationshipSet): # relation ship field to become an array of relations: ret[attribute] = DependencySetToDict(pkg[attribute], False) else: # regular string field: ret[attribute] = pkg[attribute] ret["_key"] = ret["Package"] return ret def saveDependencyToEdgeCol(edgeCol, dep, pname, hasAlternatives): for oneDep in dep.relationships: if isinstance(oneDep, deb_pkg_tools.deps.VersionedRelationship): # version dependend relations: d = VersionedDependencyToDict(oneDep, hasAlternatives) d['_from'] = 'packages/' + pname d['_to'] = 'packages/' + oneDep.name relation = edgeCol.createDocument(d).save() elif isinstance(oneDep, deb_pkg_tools.deps.AlternativeRelationship): # A set of alternative relations; recurse: saveDependencyToEdgeCol(edgeCol, oneDep, pname, True) elif isinstance(oneDep, deb_pkg_tools.deps.Relationship): # simple relations only to package names without versions: d = DependencyToDict(oneDep, hasAlternatives) d['_from'] = 'packages/' + pname d['_to'] = 'packages/' + oneDep.name relation = edgeCol.createDocument(d).save() else: print("Unknown relationshitp: " + repr(oneDep)) # # Main import routine # onePackage = '' for line in open('/tmp/allpackages', encoding='utf-8'): # Package blocks are separated by new lines. if len(line) == 1 and len(onePackage) > 4: pkg = deb822_from_string(onePackage) pname = pkg['Package'] pkg1 = parse_control_fields(pkg) p = PackageToDict(pkg1) try: packagesCol.createDocument(p).save() for key in pkg1.keys(): # filter for fields with relations: if isinstance(pkg1[key], deb_pkg_tools.deps.RelationshipSet): # save one relation set to field: saveDependencyToEdgeCol(getEdgeCol(key), pkg1[key], pname, False) onePackage = '' except CreationError: pass else: onePackage += line saveGraphDefinition()
Python
0.000004
@@ -946,16 +946,12 @@ me=%22 -USERNAME +root %22, p @@ -963,14 +963,8 @@ rd=%22 -SECRET %22)%0A%0A
47dff0bd0c7d4be641268845d70d08228a621108
Make players moving more smoothly, add ability for recursion
Lesson5/MazeGame/Maze.py
Lesson5/MazeGame/Maze.py
import time class Maze(object): def __init__(self, map, mx, my): self.map = [] self.width = 0 self.mx = mx self.my = my for y in range(0, len(map)): s = map[y] self.width = max(self.width, len(s)) ls = list(s) if len(ls)>0: self.map.append(ls) self.height = len(self.map) self.players = {} for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if 'A'<=s and s<='Z': self.players[s] = [x, y] print self.players def show(self): brickImg = loadImage("Brick32.png"); okaymanImg = loadImage("Okay32.png"); trollmanImg = loadImage("Troll32.png"); chestImg = loadImage("Chest32.gif"); imgs = {"A":okaymanImg, "B":trollmanImg, "#":brickImg, "*":chestImg} for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if s == " ": noStroke() fill(0, 0, 0) rectMode(CORNERS) rect(x*self.mx, y*self.my, x*self.mx+self.mx, y*self.my+self.my) img = imgs.get(s, None) if img: image(img, x*self.mx, y*self.my, self.mx, self.my) def goLeft(self, player): return self.go(player, 0) def goRight(self, player): return self.go(player, 1) def goUp(self, player): return self.go(player, 2) def goDown(self, player): return self.go(player, 3) def go(self, player, d): px, py = self.players[player] x, y = px, py if d == 0: x = x - 1 if d == 1: x = x + 1 if d == 2: y = y - 1 if d == 3: y = y + 1 s = self.map[y][x] if s == " ": self.map[py][px] = " " self.map[y][x] = player self.players[player] = [x, y] # if player == "A": # if s == "*": # textSize(50) # text("OkayMan WIN", 200, 300) # time.sleep(10) # # if player == "B": # if s == "A": # textSize(50) # text("TrollMan WIN", 200, 300) # time.sleep(10) return True return False
Python
0.000001
@@ -641,336 +641,244 @@ -%0A print self.players %0A %0A def show(self):%0A brickImg = loadImage(%22Brick32.png%22);%0A okaymanImg = loadImage(%22Okay32.png%22);%0A trollmanImg = loadImage(%22Troll32.png%22);%0A chestImg = loadImage(%22Chest32.gif%22);%0A imgs = %7B%22A%22:okaymanImg, %22B%22:trollmanImg, %22#%22:brickImg, %22*%22:chestImg%7D +self.imgDict = %7B%22#%22:loadImage(%22Brick32.png%22), %22*%22:loadImage(%22Chest32.gif%22)%7D%0A self.plImgDict = %7B%22A%22:loadImage(%22Okay32.png%22), %22B%22:loadImage(%22Troll32.png%22)%7D%0A %0A print self.players %0A %0A def show(self): %0A @@ -1034,37 +1034,37 @@ if s -== %22 +!= %22# %22:%0A @@ -1264,18 +1264,272 @@ g = -imgs.get(s +self.imgDict.get(s, None)%0A if img:%0A image(img, x*self.mx, y*self.my, self.mx, self.my)%0A for playerName, coord in self.players.iteritems():%0A x, y = coord%0A img = self.plImgDict.get(playerName , No @@ -1999,21 +1999,21 @@ +d x, +d y = -px, py +0, 0 %0A @@ -2044,16 +2044,14 @@ +d x = -x - +- 1%0A @@ -2083,15 +2083,12 @@ +d x = - x + 1%0A @@ -2121,16 +2121,14 @@ +d y = -y - +- 1%0A @@ -2160,15 +2160,12 @@ +d y = - y + 1%0A%0A @@ -2189,14 +2189,35 @@ map%5B -y%5D%5Bx%5D%0A +int(py + dy)%5D%5Bint(px + dx)%5D %0A @@ -2250,121 +2250,296 @@ -self.map%5Bpy%5D%5Bpx%5D = %22 %22%0A self.map%5By%5D%5Bx%5D = player%0A %0A self.players%5Bplayer%5D = %5Bx, y%5D +for i in range(100):%0A self.players%5Bplayer%5D = %5Bpx + (dx / 100.0) * i, py + (dy / 100.0) * i%5D%0A time.sleep(0.001)%0A self.players%5Bplayer%5D = %5Bpx + dx, py + dy%5D%0A%0A self.map%5Bpy%5D%5Bpx%5D = %22 %22%0A self.map%5Bint(py + dy)%5D%5Bint(px + dx)%5D = player %0A
0d68fbaef300c53db407f6296c00e493e4b040bf
use xdg-email by default, fallback to xdg-open + mailto uri
plyer/platforms/linux/email.py
plyer/platforms/linux/email.py
import subprocess from urllib import quote from plyer.facades import Email class LinuxEmail(Email): def _send(self, **kwargs): recipient = kwargs.get('recipient') subject = kwargs.get('subject') text = kwargs.get('text') create_chooser = kwargs.get('create_chooser') uri = "mailto:" if recipient: uri += str(recipient) if subject: uri += "?" if not "?" in uri else "&" uri += "subject=" uri += quote(str(subject)) if text: uri += "?" if not "?" in uri else "&" uri += "body=" uri += quote(str(text)) subprocess.Popen(["xdg-open", uri]) def instance(): return LinuxEmail()
Python
0
@@ -322,16 +322,45 @@ ailto:%22%0A + args = %5B%22xdg-email%22%5D%0A @@ -407,16 +407,53 @@ ipient)%0A + args += %5Bstr(recipient)%5D%0A @@ -583,16 +583,64 @@ bject))%0A + args += %5B%22--subject%22, str(subject)%5D%0A @@ -765,52 +765,318 @@ t))%0A -%0A subprocess.Popen(%5B%22xdg-open%22, uri%5D) + args += %5B%22--body%22, str(text)%5D%0A%0A try:%0A subprocess.Popen(args)%0A except OSError:%0A try:%0A subprocess.Popen(%5B%22xdg-open%22, uri%5D)%0A except OSError:%0A print %22Warning: unable to start an email client. Make sure xdg-open is installed.%22 %0A%0A%0Ad
8178b8083e29b2f9930e8e72e9f0d80beb847ba4
Implement file upload on server side
indico/modules/attachments/controllers.py
indico/modules/attachments/controllers.py
# This file is part of Indico. # Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from flask import flash from indico.core.db import db from indico.modules.attachments.views import WPEventAttachments from indico.modules.attachments.forms import AddAttachmentsForm, AddLinkForm, CreateFolderForm from indico.modules.attachments.models.folders import AttachmentFolder from indico.util.i18n import _ from indico.web.flask.util import url_for, redirect_or_jsonify from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase def _random_date(): from datetime import datetime import random year = random.randint(1950, 2000) month = random.randint(1, 12) day = random.randint(1, 28) return datetime(year, month, day) class RHEventAttachments(RHConferenceModifBase): """Shows the attachments of an event""" def _process(self): root_folders = [ {'title': 'images', 'content-type': 'text/directory', 'modified_dt': _random_date(), 'content': [ {'title': 'img001.jpg', 'content-type': 'image/jpeg', 'modified_dt': _random_date()}, {'title': 'img002.jpg', 'content-type': 'image/jpeg', 'modified_dt': _random_date()}, {'title': 'img003.jpg', 'content-type': 'image/jpeg', 'modified_dt': _random_date()}, {'title': 'here_is_an_image_in_a_nested_folder_with_an_extremely_long_name_and_a_weird_extension.jpag', 'content-type': 'audio/vorbis', 'modified_dt': _random_date()} ]}, {'title': 'some folder', 'content-type': 'text/directory', 'modified_dt': _random_date(), 'content': []}, {'title': 'Poster', 'content-type': 'text/directory', 'modified_dt': _random_date(), 'content': [ {'title': 'poster_final.pdf', 'content-type': 'application/pdf', 'modified_dt': _random_date()} ]}, ] root_files = [ {'title': 'data1.ods', 'content-type': 'application/vnd.oasis.opendocument.spreadsheet', 'modified_dt': _random_date()}, {'title': 'raw_data1.ods', 'content-type': 'application/vnd.oasis.opendocument.spreadsheet', 'modified_dt': _random_date()}, {'title': 'final_report.pdf', 'content-type': 'application/pdf', 'modified_dt': _random_date()}, {'title': 'Some link', 'content-type': 'text/vnd.indico.link', 'modified_dt': _random_date()}, {'title': 'unknown_type.ogg', 'content-type': 'audio/vorbis', 'modified_dt': _random_date()}, {'title': 'this_is_a_completely_random_file_whose_name_is_long_extremely_extremely_extremely_long.ogg', 'content-type': 'audio/vorbis', 'modified_dt': _random_date()} ] attachments = sorted(root_folders + root_files, key=lambda a: (a['content-type'] != 'text/directory', a['title'].lower())) return WPEventAttachments.render_template('attachments.html', self._conf, event=self._conf, attachments=attachments) class RHEventAttachmentsUpload(RHConferenceModifBase): """Upload files""" def _process(self): form = AddAttachmentsForm(linked_object=self._conf) if form.validate_on_submit(): # TODO: Handle files return return WPEventAttachments.render_template('upload.html', self._conf, event=self._conf, form=form) class RHEventAttachmentsAddLink(RHConferenceModifBase): """Attach link""" def _process(self): form = AddLinkForm(linked_object=self._conf) if form.validate_on_submit(): # TODO return return WPEventAttachments.render_template('add_link.html', self._conf, event=self._conf, form=form) class RHEventAttachmentsCreateFolder(RHConferenceModifBase): """Create a new empty folder""" def _process(self): form = CreateFolderForm() if form.validate_on_submit(): folder = AttachmentFolder(linked_object=self._conf) form.populate_obj(folder, skip={'acl'}) if folder.is_protected: folder.acl = form.acl.data db.session.add(folder) flash(_("Folder \"{name}\" created").format(name=folder.title), 'success') return redirect_or_jsonify(url_for('.index', self._conf), flash=False) return WPEventAttachments.render_template('create_folder.html', self._conf, event=self._conf, form=form)
Python
0.000001
@@ -785,16 +785,34 @@ rt flash +, request, session %0Afrom in @@ -1064,16 +1064,117 @@ tFolder%0A +from indico.modules.attachments.models.attachments import Attachment, AttachmentFile, AttachmentType%0A from ind @@ -4088,47 +4088,471 @@ -# TODO: Handle files%0A return +for f in request.files.itervalues():%0A folder = form.folder.data or AttachmentFolder.get_or_create_default(linked_object=self._conf)%0A attachment = Attachment(folder=folder, user=session.user, title=f.filename,%0A type=AttachmentType.file)%0A attachment.file = AttachmentFile(user=session.user, filename=f.filename, content_type=f.mimetype)%0A attachment.file.save(f.file) %0A
447a7c56401dce19f5bdd412e5f66ba40180b665
refactor take_catty_corner to use parse_analysis
OnStage/computer_dais.py
OnStage/computer_dais.py
from player_chair import Player class Computer(Player): name = 'computer' def choose(self, board): options = self.get_legal_moves(board) win_chance = self.take_win_chances(options, board) center = self.take_the_center(options) catty_corner = self.take_catty_corner(options, board) if win_chance: return win_chance elif center: return center elif catty_corner: return catty_corner return self.make_default_choice(options) def take_win_chances(self, options, board): analysis = self.scan_board(board) for condition in analysis: if condition == self.marker_code * 2: code = analysis.index(condition) return self.parse_analysis(options, code) return False def take_the_center(self, options): if 4 in options: return 4 return False def take_catty_corner(self, options, board): if board[4] == self.marker_code: analysis = self.scan_board(board) if analysis[6] == 11: if 0 in options: return 0 return 8 elif analysis[7] == 11: if 2 in options: return 2 return 6 return False def make_default_choice(self, options): priorities = [0,2,6,8] for priority in priorities: if priority in options: return priority return options[0] def parse_analysis(self, options, code): if code == 0: if 0 in options: return 0 elif 1 in options: return 1 return 2 elif code == 1: if 3 in options: return 3 elif 4 in options: return 4 return 5 elif code == 2: if 6 in options: return 6 elif 7 in options: return 7 return 8 elif code == 3: if 0 in options: return 0 elif 3 in options: return 3 return 6 elif code == 4: if 1 in options: return 1 elif 4 in options: return 4 return 7 elif code == 5: if 2 in options: return 2 elif 5 in options: return 5 return 8 elif code == 6: if 0 in options: return 0 elif 4 in options: return 4 return 8 elif code == 7: if 2 in options: return 2 elif 4 in options: return 4 return 6
Python
0.000018
@@ -1129,199 +1129,137 @@ -if 0 in options:%0A return 0%0A return 8%0A elif analysis%5B7%5D == 11:%0A if 2 in options:%0A return 2%0A return 6 +return self.parse_analysis(options, 6)%0A elif analysis%5B7%5D == 11:%0A return self.parse_analysis(options, 7) %0A
f321366fdd486dd0cd49c15a5040b54e33e4aa8f
Update comment and docstring
src/permission/handlers.py
src/permission/handlers.py
# coding=utf-8 """ """ __author__ = 'Alisue <[email protected]>' from permission.utils.permissions import get_app_perms from permission.utils.permissions import get_model_perms import collections class PermissionHandler(object): """ Abstract permission handler class """ _includes = None _excludes = None @property def includes(self): return self._includes @includes.setter def includes(self, value): # clear cache if hasattr(self, '_perms_cache'): del self._perms_cache self._includes = value @property def excludes(self): return self._excludes @excludes.setter def excludes(self, value): # clear cache if hasattr(self, '_perms_cache'): del self._perms_cache self._excludes = value def __init__(self, model_or_app_label): """ Constructor Parameters ---------- model_or_app_label : django model class or string A django model class or application label string. Use django model class for model level permission and application label for application level permission. """ if isinstance(model_or_app_label, str): self.app_label = model_or_app_label self.model = None if self.includes is None: self.includes = self._get_app_perms else: self.app_label = model_or_app_label._meta.app_label self.model = model_or_app_label self.model._permission_handler = self if self.includes is None: self.includes = self._get_model_perms def _get_app_perms(self, *args): """ Get permissions related to the application specified in constructor Returns ------- set A set instance of `app_label.codename` formatted permission strings """ if not hasattr(self, '_app_perms_cache'): self._app_perms_cache = get_app_perms(self.app_label) return self._app_perms_cache def _get_model_perms(self, *args): """ Get permissions related to the model specified in constructor Returns ------- set A set instance of `app_label.codename` formatted permission strings """ if not hasattr(self, '_model_perms_cache'): if self.model is None: self._model_perms_cache = set() else: self._model_perms_cache = get_model_perms(self.model) return self._model_perms_cache def get_permissions(self, user_obj, perm, obj=None): """ Get permissions which this handler can treat. Specified with :attr:`includes` and :attr:`excludes` of this instance. Parameters ---------- user_obj : django user model instance A django user model instance which be checked perm : string `app_label.codename` formatted permission string obj : None or django model instance None or django model instance for object permission Returns ------- set A set instance of `app_label.codename` formatted permission strings """ if not hasattr(self, '_perms_cache'): if self.includes and isinstance(self.includes, collections.Callable): includes = self.includes(self) else: includes = self.includes or [] if self.excludes and isinstance(self.excludes, collections.Callable): excludes = self.excludes(self) else: excludes = self.excludes or [] includes = set(includes) excludes = set(excludes) includes = includes.difference(excludes) self._perms_cache = includes return self._perms_cache def has_perm(self, user_obj, perm, obj=None): """ Check if user have permission (of object) Parameters ---------- user_obj : django user model instance A django user model instance which be checked perm : string `app_label.codename` formatted permission string obj : None or django model instance None or django model instance for object permission Returns ------- boolean Wheter the specified user have specified permission (of specified object). .. note:: Sub class must override this method. """ raise NotImplementedError( "'%s' does not override `has_perm(user_obj, perm, obj=None)` " "method. Sub class must override this method." % self.__class__) class LogicalPermissionHandler(PermissionHandler): """ Permission handler class which use permission logics to determine the permission """ def __init__(self, model): """ Constructor Parameters ---------- model : django model class A django model class. .. note:: LogicalPermissionHandler cannot treat application level permission """ # logical permission handler cannot treat application level permission if isinstance(model, str): raise AttributeError( "'%s' cannot treat application level permission." % self.__class__) super(LogicalPermissionHandler, self).__init__(model) def has_perm(self, user_obj, perm, obj=None): """ Check if user have permission (of object) based on specified models's ``_permission_logics`` attribute. Parameters ---------- user_obj : django user model instance A django user model instance which be checked perm : string `app_label.codename` formatted permission string obj : None or django model instance None or django model instance for object permission Returns ------- boolean Wheter the specified user have specified permission (of specified object). """ if perm not in self.get_permissions(user_obj, perm, obj=obj): return False for permission_logic in getattr(self.model, '_permission_logics', []): if permission_logic.has_perm(user_obj, perm, obj): return True return False
Python
0
@@ -65,16 +65,53 @@ e.net%3E'%0A +from permission.conf import settings%0A from per @@ -214,16 +214,76 @@ l_perms%0A +from permission.utils.permissions import perm_to_permission%0A import c @@ -5836,24 +5836,237 @@ attribute.%0A%0A + It will raise %60%60ObjectDoesNotExist%60%60 exception when the specified%0A string permission does not exist and%0A %60%60PERMISSION_CHECK_PERMISSION_PRESENCE%60%60 is %60%60True%60%60 in %60%60settings%60%60%0A module.%0A%0A Para @@ -6064,32 +6064,32 @@ Parameters%0A - -------- @@ -6534,27 +6534,516 @@ bject).%0A - %22%22%22 +%0A Raises%0A ------%0A django.core.exceptions.ObjectDoesNotExist%0A If the specified string permission does not exist and%0A %60%60PERMISSION_CHECK_PERMISSION_PRESENCE%60%60 is %60%60True%60%60 in %60%60settings%60%60%0A module.%0A %22%22%22%0A if settings.PERMISSION_CHECK_PERMISSION_PRESENCE:%0A # get permission instance from string permission (perm)%0A # it raise ObjectDoesNotExists when the permission is not exists%0A perm_to_permission(perm)%0A %0A
8fb201b866c0eabc99c370cf3ccc993d2de06264
Update version 0.10.1
src/iteration_utilities/__init__.py
src/iteration_utilities/__init__.py
# Licensed under Apache License Version 2.0 - see LICENSE """Utilities based on Pythons iterators and generators.""" from ._iteration_utilities import * from ._convenience import * from ._recipes import * from ._additional_recipes import * from ._classes import * __version__ = '0.10.0'
Python
0.000001
@@ -281,11 +281,11 @@ = '0.10. -0 +1 '%0A
ffa551d8e4519005791f42bb2862f0411c54ced3
Update projectfiles_unchanged script
projectfiles_unchanged.py
projectfiles_unchanged.py
# version: 2 import os import glob import hashlib import sys matches = [] exlude_dirs = set(['.git', 'docs']) def get_subdirs(path): return set([name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]) def find_in(path): # print(path) out = [] out += glob.glob(path + "/*.pro") out += glob.glob(path + "/CMakeLists.txt") out += glob.glob(path + "/Info.plist") subs = get_subdirs(path) - exlude_dirs for s in subs: out += find_in(os.path.join(path, s)) out.sort() return out pros = find_in(".") # print(pros) hasher = hashlib.md5() for pro in pros: with open(pro) as f: s = f.read() hasher.update(s.encode('utf8')) current = hasher.hexdigest() if os.path.isfile("projectfiles.md5.tmp"): with open("projectfiles.md5.tmp") as f: old = f.read() else: old = "" if current.strip() == old.strip(): sys.exit(0) else: with open("projectfiles.md5.tmp", "w") as f: print(current, file=f) sys.exit(1)
Python
0.000001
@@ -1,9 +1,169 @@ # +!/usr/bin/env python3%0A#%0A# This script is used on Linux, OS X and Windows.%0A# Python 3 required.%0A# Returns 0 if project files are unchanged and 1 else.%0A#%0A# Script version @@ -168,9 +168,9 @@ on: -2 +3 %0Aimp @@ -229,16 +229,50 @@ s = %5B%5D%0A%0A +tmp_file = %22projectfiles.md5.tmp%22%0A exlude_d @@ -946,69 +946,41 @@ ile( -%22projectfiles.md5.tmp%22):%0A with open(%22projectfiles.md5.tmp%22 +tmp_file):%0A with open(tmp_file ) as @@ -1089,38 +1089,24 @@ th open( -%22projectfiles.md5.tmp%22 +tmp_file , %22w%22) a
4e62d7d9514449be5afc5a27b15726a254077e89
Remove dangling argument
MachineSettingsAction.py
MachineSettingsAction.py
from cura.MachineAction import MachineAction import cura.Settings.CuraContainerRegistry from UM.i18n import i18nCatalog from UM.Settings.DefinitionContainer import DefinitionContainer from UM.Application import Application from PyQt5.QtCore import pyqtSlot, QObject catalog = i18nCatalog("cura") class MachineSettingsAction(MachineAction, QObject, ): def __init__(self, parent = None): MachineAction.__init__(self, "MachineSettingsAction", catalog.i18nc("@action", "Machine Settings")) self._qml_url = "MachineSettingsAction.qml" cura.Settings.CuraContainerRegistry.getInstance().containerAdded.connect(self._onContainerAdded) def _execute(self): pass def _reset(self): pass def _onContainerAdded(self, container): # Add this action as a supported action to all machine definitions if isinstance(container, DefinitionContainer) and container.getMetaDataEntry("type") == "machine": Application.getInstance().getMachineActionManager().addSupportedAction(container.getId(), self.getKey()) @pyqtSlot() def forceUpdate(self): # Force rebuilding the build volume by reloading the global container stack. # This is a bit of a hack, but it seems quick enough. Application.getInstance().globalContainerStackChanged.emit()
Python
0.000021
@@ -345,18 +345,16 @@ QObject -, ):%0A d
2e071c0e37fac657955de70fb7193b3e46ba2aef
Update subscribe_speakers_to_talks.py
p3/management/commands/subscribe_speakers_to_talks.py
p3/management/commands/subscribe_speakers_to_talks.py
# -*- coding: UTF-8 -*- from django.core.management.base import BaseCommand, CommandError from conference import models as cmodels from hcomments import models as hmodels class Command(BaseCommand): def handle(self, *args, **options): try: conf = args[0] except IndexError: raise CommandError('conference missing') qs = cmodels.TalkSpeaker.objects\ .filter(talk__conference=conf)\ .select_related('talk', 'speaker__user') for row in qs: u = row.speaker.user t = row.talk print '%s %s -> %s' % (u.first_name, u.last_name, t.title) hmodels.ThreadSubscription.objects.subscribe(t, u)
Python
0
@@ -83,16 +83,63 @@ ndError%0A +from django.contrib.auth import get_user_model%0A from con @@ -212,16 +212,76 @@ models%0A%0A +info = get_user_model().objects.get(email='[email protected]')%0A%0A class Co @@ -814,8 +814,74 @@ e(t, u)%0A + hmodels.ThreadSubscription.objects.subscribe(t, info)%0A
025da327fde5b20a4f8bf8edfea6a322722a54bb
Check nodes changes before updating
polyaxon/runner/nodes/tasks.py
polyaxon/runner/nodes/tasks.py
import logging import requests import uuid from django.conf import settings from django.db.models import Count, Sum import auditor from clusters.models import Cluster from event_manager.events.cluster import ( CLUSTER_NODE_CREATED, CLUSTER_NODE_DELETED, CLUSTER_NODE_UPDATED, CLUSTER_RESOURCES_UPDATED, CLUSTER_UPDATED ) from polyaxon.celery_api import app as celery_app from polyaxon.config_settings import RunnerCeleryTasks from polyaxon_k8s.manager import K8SManager from runner.nodes.models import ClusterNode logger = logging.getLogger('polyaxon.tasks.clusters') def get_cluster_resources(): return Cluster.objects.annotate( n_nodes=Count('nodes'), n_cpus=Sum('nodes__cpu'), memory=Sum('nodes__memory'), n_gpus=Sum('nodes__n_gpus')).first() @celery_app.task(name=RunnerCeleryTasks.CLUSTERS_UPDATE_SYSTEM_INFO, time_limit=150, ignore_result=True) def update_system_info(): k8s_manager = K8SManager(in_cluster=True) version_api = k8s_manager.get_version() cluster = Cluster.load() if cluster.version_api != version_api: cluster.version_api = version_api cluster.save() auditor.record(event_type=CLUSTER_UPDATED, instance=cluster, is_upgrade=settings.CHART_IS_UPGRADE) @celery_app.task(name=RunnerCeleryTasks.CLUSTERS_UPDATE_SYSTEM_NODES, time_limit=150, ignore_result=True) def update_system_nodes(): k8s_manager = K8SManager(in_cluster=True) nodes = k8s_manager.list_nodes() cluster = Cluster.load() nodes_to_update = {} nodes_to_create = {node.metadata.name: node for node in nodes} deprecated_nodes = [] for node in cluster.nodes.all(): if node.name in nodes_to_create: nodes_to_update[node.name] = (node, nodes_to_create.pop(node.name)) else: deprecated_nodes.append(node) cluster_updated = False for node in deprecated_nodes: node.is_current = False node.save() cluster_updated = True auditor.record(event_type=CLUSTER_NODE_DELETED, instance=node) for node in nodes_to_create.values(): node_dict = ClusterNode.from_node_item(node) node_dict['cluster'] = cluster instance = ClusterNode.objects.create(**node_dict) cluster_updated = True auditor.record(event_type=CLUSTER_NODE_CREATED, instance=instance) for current_node, new_node in nodes_to_update.values(): node_dict = ClusterNode.from_node_item(new_node) for k, v in node_dict.items(): setattr(current_node, k, v) current_node.save() cluster_updated = True auditor.record(event_type=CLUSTER_NODE_UPDATED, instance=current_node) if cluster_updated: cluster = get_cluster_resources() auditor.record(event_type=CLUSTER_RESOURCES_UPDATED, instance=cluster, n_nodes=cluster.n_nodes, memory=cluster.memory / (1000 ** 3), n_cpus=cluster.n_cpus, n_gpus=cluster.n_gpus) @celery_app.task(name=RunnerCeleryTasks.CLUSTERS_NODES_NOTIFICATION_ALIVE, time_limits=60, ignore_result=True) def cluster_nodes_analytics(): cluster = get_cluster_resources() notification = uuid.uuid4() notification_url = settings.POLYAXON_NOTIFICATION_CLUSTER_NODES_URL.format( url=settings.CLUSTER_NOTIFICATION_URL, cluster_uuid=cluster.uuid.hex, n_nodes=cluster.n_nodes, n_cpus=cluster.n_cpus, memory=cluster.memory / (1000 ** 3), n_gpus=cluster.n_gpus, notification=notification, version=settings.CHART_VERSION) try: requests.get(notification_url) except requests.RequestException: pass
Python
0
@@ -2592,24 +2592,53 @@ m(new_node)%0A + node_updated = False%0A for @@ -2680,35 +2680,146 @@ -setattr(current_node, k, v) +if v != getattr(current_node, k):%0A setattr(current_node, k, v)%0A node_updated = True%0A if node_updated: %0A
50266b417208f562e7fe430d5ab2906b56c323ca
remove alpha bleeding debug
PyTexturePacker/Utils.py
PyTexturePacker/Utils.py
# -*- coding: utf-8 -*- """---------------------------------------------------------------------------- Author: Huang Quanyong (wo1fSea) [email protected] Date: 2016/10/19 Description: Utils.py ----------------------------------------------------------------------------""" SUPPORTED_IMAGE_FORMAT = [".png", ".jpg", ".bmp"] def load_images_from_paths(image_path_list): from .ImageRect import ImageRect image_rect_list = [] for file_path in image_path_list: image_rect = ImageRect(file_path) image_rect_list.append(image_rect) return image_rect_list def load_images_from_dir(dir_path): import os image_rect_path = [] for root, dirs, files in os.walk(dir_path): for f in files: file_path = os.path.join(root, f) _, ext = os.path.splitext(f) if ext.lower() in SUPPORTED_IMAGE_FORMAT: image_rect_path.append(file_path) return load_images_from_paths(image_rect_path) def save_plist(data_dict, file_name): import plistlib if hasattr(plistlib, "dump"): with open(file_name, 'wb') as fp: plistlib.dump(data_dict, fp) else: plistlib.writePlist(data_dict, file_name) def save_image(image, file_name): image.save(file_name) def alpha_bleeding(image, bleeding_pixel = 32): offsets = ((-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1,1)) image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() bleeding = set() borders = [] def _tell_border(x, y): if pa[x, y][3] == 0: return False for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0: return True return False def _bleeding(x ,y): borders = [] pixel = pa[x, y] for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0 and (ox, oy) not in bleeding: pa[ox, oy] = (pixel[0], pixel[1], pixel[2], 255) bleeding.add(pa) if _tell_border(ox, oy): borders.append((ox, oy)) return borders for x in range(width): for y in range(height): if _tell_border(x, y): borders.append((x, y)) for i in range(bleeding_pixel): pending = [] for border in borders: pending.extend(_bleeding(*border)) borders = pending return image def alpha_remove(image): image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() for x in range(width): for y in range(height): pixel = pa[x,y] pa[x, y] = (pixel[0], pixel[1], pixel[2], 255) return image
Python
0
@@ -2340,27 +2340,25 @@ , pixel%5B2%5D, -255 +0 )%0A
ea245f90cf0fb18cc0894d4b959ce7c3a75cf0c5
align librispeech
SCT/benchmark_aligner.py
SCT/benchmark_aligner.py
import sys import shutil, os sys.path.insert(0, os.path.expanduser('~/Montreal-Forced-Aligner')) import time import logging import platform import csv import statistics from datetime import datetime from aligner.command_line.train_and_align import align_corpus, align_corpus_no_dict #corpus_dir = '/media/share/datasets/aligner_benchmarks/LibriSpeech/standard' corpus_dir = '/media/share/datasets/aligner_benchmarks/sorted_quebec_french' #dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/librispeech-lexicon.txt') dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/dist/montreal-forced-aligner/prosodylab.dictionaries/fr.dict') #output_directory = '/data/michaela/aligned_librispeech' output_directory = '/data/michaela/aligned_quebec_french' output_model_path = os.path.expanduser('~/Documents/quebec_french_models.zip') num_jobs = 2 def benchmark_align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] def benchmark_align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] if dict_path == None: nodict = benchmark_align_corpus_no_dict(corpus_dir, output_directory, 0, False, output_model_path, num_jobs, False) else: yesdict = benchmark_align_corpus(corpus_dir, dict_path, output_directory, 0, False, output_model_path, num_jobs, True) def WriteDictToCSV(csv_file,csv_columns,dict_data): with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in dict_data: writer.writerow(data) return csv_columns = ['Computer','Date','Corpus', 'Type of benchmark', 'Total time', 'Num_jobs'] if dict_path == None: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': nodict[0], 'Num_jobs': num_jobs} ] else: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': yesdict[0], 'Num_jobs': num_jobs} ] now = datetime.now() date = str(now.year)+str(now.month)+str(now.day) if not os.path.exists('aligner_benchmark'+date+'.csv'): open('aligner_benchmark'+date+'.csv', 'a') with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writeheader() csv_file = 'aligner_benchmark'+date+'.csv' with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writerow(dict_data[0])
Python
0.998401
@@ -279,17 +279,16 @@ o_dict%0A%0A -# corpus_d @@ -356,16 +356,17 @@ andard'%0A +# corpus_d @@ -434,17 +434,16 @@ french'%0A -# dict_pat @@ -518,16 +518,17 @@ n.txt')%0A +# dict_pat @@ -640,17 +640,16 @@ .dict')%0A -# output_d @@ -692,24 +692,25 @@ ibrispeech'%0A +# output_direc @@ -807,27 +807,25 @@ cuments/ -quebec_fren +librispee ch_model @@ -843,16 +843,17 @@ _jobs = +1 2%0A%0Adef b @@ -1673,16 +1673,23 @@ um_jobs, + False, False)%0A @@ -1815,16 +1815,23 @@ bs, True +, False )%0A%0Adef W
b679e3b479f4889d045f56530448161523cb6290
fix syntax issue
_doc/examples/plot_rest_api_search_images.py
_doc/examples/plot_rest_api_search_images.py
""" Search engines for images through a REST API ============================================ This example starts a :epkg:`waitress` server, creates a :epkg:`WSGI` application based on :epkg:`falcon` and queries the REST API. This application takes an image and searches for similar images based on features produced by a deep learning model. """ #################### # Settings. host = '127.0.0.1' port = 8083 ######################## # Creates the search engine and starts a server in a different process. # See :func:`search_images_dogcat <ensae_projects.restapi.search_images_dogcat.search_images_dogcat>`. def process_server(host, port): import logging logger = logging.getLogger('search_images_dogcat') logger.setLevel(logging.INFO) hdlr = logging.FileHandler('search_images_dogcat.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) # If not specified, the application looks for zip file: # http://www.xavierdupre.fr/enseignement/complements/dog-cat-pixabay.zip url = None from ensae_projects.restapi import search_images_dogcat app = search_images_dogcat(url_images=url) from waitress import serve serve(app, host=host, port=port) ########################## # Saves this code into a file and we start it # from a different process. import os import ensae_projects header = """ import sys sys.path.append(r'{0}') """.format(os.path.join(os.path.dirname(ensae_projects.__file__), '..')) import inspect code = "".join(inspect.getsourcelines(process_server)[0]) code = header + code + "\nprocess_server('{0}', {1})\n".format(host, port) dest = os.path.abspath('temp_scripts') if not os.path.exists(dest): os.mkdir(dest) code_file = os.path.join(dest, "_start_server.py") print("Write file '{0}'.".format(code_file)) with open(code_file, "w") as f: f.write(code) import sys from subprocess import Popen if sys.platform.startswith('win'): cmd = '{0} -u "{1}"'.format(sys.executable, code_file) print("Running '{0}'."format(cmd)) proc = Popen(cmd) else: cmd = [sys.executable, '-u', code_file] print("Running '{0}'."format(cmd)) proc = Popen(cmd) print('Start server, process id', proc.pid) ########################## # Let's wait. from time import sleep sleep(15) #################### # Let's load an image. from lightmlrestapi.args import image2base64 import ensae_projects.datainc.search_images as si imgfile = os.path.join(os.path.dirname(si.__file__), "cat-1192026__480.jpg") from PIL import Image img = Image.open(imgfile) import numpy from matplotlib.pyplot import imshow imshow(numpy.asarray(img)) ############################ # Let's query the server. import requests import ujson b64 = image2base64(imgfile)[1] features = ujson.dumps({'X': b64}) r = requests.post('http://127.0.0.1:%d' % port, data=features) js = r.json() if 'description' in js: # This is an error. print(js['description']) res = None else: print(js) res = [] for ans in js['Y']: print("Number of neighbors:", len(ans)) for n in ans: print("score, id, name", n) res.append((n[0], n[2]['name'])) ####################### # Let's display the images. txts = list(map(lambda x: str(x[0]), res)) imgs = list(map(lambda x: os.path.join('images', x[1]), res)) from mlinsights.plotting import plot_gallery_images plot_gallery_images(imgs, txts) import matplotlib.pyplot as plt # plt.show() #################### # Let's stop the server. from pyquickhelper.loghelper import reap_children reap_children(subset={proc.pid}, fLOG=print)
Python
0.000001
@@ -2064,34 +2064,34 @@ t(%22Running '%7B0%7D' -. %22 +. format(cmd))%0A @@ -2183,18 +2183,18 @@ ng '%7B0%7D' -. %22 +. format(c
843121f8c0e4c0f2a533540633c62060bb676ea1
remove unnecessary dependency
_unittests/ut_xmlhelper/test_xml_iterator.py
_unittests/ut_xmlhelper/test_xml_iterator.py
#-*- coding: utf-8 -*- """ @brief test log(time=20s) """ import sys import os import unittest try: import src except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", ".."))) if path not in sys.path: sys.path.append(path) import src try: import pyquickhelper as skip_ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyquickhelper", "src"))) if path not in sys.path: sys.path.append(path) import pyquickhelper as skip_ try: import pyensae as skip__ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyensae", "src"))) if path not in sys.path: sys.path.append(path) import pyensae as skip__ try: import pymyinstall as skip___ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pymyinstall", "src"))) if path not in sys.path: sys.path.append(path) import pymyinstall as skip___ from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import get_temp_folder, ExtTestCase from src.pyrsslocal.xmlhelper import xml_filter_iterator class TestXmlIterator(ExtTestCase): def test_enumerate_xml_row(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") temp = get_temp_folder(__file__, "temp_enumerate_wolf_xml_row") data = os.path.join(temp, "..", "data", "sample.wolf.xml") rows = xml_filter_iterator(data, fLOG=fLOG, xmlformat=False, log=True) n = 0 node = None for i, row in enumerate(rows): if node is None: node = row #fLOG(type(row), row) s = str(row) self.assertTrue(s is not None) for obj in row.iterfields(): s = str(obj) self.assertTrue(s is not None) if i % 2 == 0: row._convert_into_list() xout = row.get_xml_output() self.assertTrue(xout is not None) row.find_node_value("SYNET") n += 1 self.assertGreater(n, 0) # node += node if __name__ == "__main__": unittest.main()
Python
0.000413
@@ -1170,407 +1170,8 @@ __%0A%0A -try:%0A import pymyinstall as skip___%0Aexcept ImportError:%0A path = os.path.normpath(%0A os.path.abspath(%0A os.path.join(%0A os.path.split(__file__)%5B0%5D,%0A %22..%22,%0A %22..%22,%0A %22..%22,%0A %22pymyinstall%22,%0A %22src%22)))%0A if path not in sys.path:%0A sys.path.append(path)%0A import pymyinstall as skip___%0A%0A %0Afro
35c5fd2606682827e43f707400d726f84f41104a
Fix for issue 11.
abusehelper/core/opts.py
abusehelper/core/opts.py
import os import sys import inspect from optparse import OptionParser from ConfigParser import SafeConfigParser def long_name(key): return key.replace("_", "-").lower() def action_and_type(default): if isinstance(default, bool): if default: return "store_false", None return "store_true", None elif isinstance(default, int): return "store", "int" elif isinstance(default, float): return "store", "float" return "store", "string" def optparse(func, argv=list(sys.argv[1:])): args, varargs, varkw, defaults = inspect.getargspec(func) assert varargs is None, "variable argument definitions are not supported" assert varkw is None, "variable keyword definitions are not supported" if not defaults: positionals = args defaults = dict() else: positionals = args[:-len(defaults)] defaults = dict(zip(args[-len(defaults):], defaults)) parser = OptionParser() usage = list() usage.append("Usage: %prog [options]") for name in positionals: usage.append(name) parser.set_usage(" ".join(usage)) # Add the INI config file parsing options. The INI section name is # magically determined from the given function's module file name # (e.g. filename '../lib/testmodule.py' -> section 'testmodule'). parser.add_option("--ini-file", dest="ini_file", default=None, help="INI file used for configuration", metavar="ini_file") _, module_file = os.path.split(inspect.getmodule(func).__file__) section_name, _ = os.path.splitext(module_file) parser.add_option("--ini-section", dest="ini_section", default=section_name, help=("if an INI configuration file is specified, "+ "use this section (default: %default)"), metavar="ini_section") long_names = dict() for key in args: long = getattr(func, key + "_long", long_name(key)) long_names[key] = long short = getattr(func, key + "_short", None) names = list() if short is not None: names.append("-" + short) names.append("--" + long) kwargs = dict() kwargs["dest"] = key kwargs["help"] = getattr(func, key + "_help", None) kwargs["metavar"] = getattr(func, key + "_metavar", key) if key in defaults: default = defaults[key] action, type = action_and_type(default) kwargs["default"] = default kwargs["action"] = getattr(func, key + "_action", action) kwargs["type"] = getattr(func, key + "_type", type) option = parser.add_option(*names, **kwargs) options, params = parser.parse_args(list(argv)) # Open and parse the INI configuration file, if given. if options.ini_file is not None: config = SafeConfigParser() config.read([options.ini_file]) section_name = options.ini_section if config.has_section(section_name): section = dict(config.items(section_name)) else: section = config.defaults() argv = list(argv) for key in args: if key in section: argv.insert(0, "--%s=%s" % (long_names[key], section[key])) options, params = parser.parse_args(argv) arglist = list() for key in args: positional = key in positionals if not positional or getattr(options, key) is not None: arglist.append(getattr(options, key)) elif positional and params: arglist.append(params.pop(0)) else: parser.error("missing value for argument %s" % key) if params: parser.error("too many positional arguments") return func(*arglist)
Python
0
@@ -2014,24 +2014,45 @@ es = dict()%0A + actions = dict()%0A for key @@ -2806,16 +2806,60 @@ %22, type) +%0A actions%5Bkey%5D = kwargs%5B%22action%22%5D %0A%0A @@ -3147,21 +3147,16 @@ section -_name = optio @@ -3178,18 +3178,77 @@ -if +argv = list(argv)%0A for key in args:%0A if not config. @@ -3251,19 +3251,18 @@ fig.has_ -sec +op tion(sec @@ -3265,21 +3265,21 @@ (section -_name +, key ):%0A @@ -3289,64 +3289,217 @@ -section = dict(config.items(section_name))%0A else: + continue%0A%0A action = actions.get(key, None)%0A if action == %22store_true%22:%0A if config.getboolean(section, key):%0A argv.insert(0, %22--%25s%22 %25 long_names%5Bkey%5D) %0A @@ -3511,118 +3511,222 @@ -s e +lif a ction = - config.defaults()%0A%0A argv = list(argv)%0A for key in args:%0A if key in section: += %22store_false%22:%0A if not config.getboolean(section, key):%0A argv.insert(0, %22--%25s%22 %25 long_names%5Bkey%5D)%0A else:%0A value = config.get(section, key) %0A @@ -3787,20 +3787,13 @@ y%5D, -section%5Bkey%5D +value ))%0A
9f4982e351e36d7f3d248e0d2fbee24617cd4509
Fix install so organizations have vote.prohibit permissions set
adhocracy/lib/install.py
adhocracy/lib/install.py
import logging import adhocracy.model as model log = logging.getLogger(__name__) def mk_group(name, code): group = model.Group.by_code(unicode(code)) if not group: log.debug("Creating group: %s" % name) group = model.Group(unicode(name), unicode(code)) model.meta.Session.add(group) else: group.group_name = unicode(name) return group def mk_perm(name, *groups): perm = model.Permission.find(name) if perm is None: log.debug("Creating permission: %s" % name) perm = model.Permission(name) model.meta.Session.add(perm) perm.groups = list(groups) return perm def setup_entities(): #model.meta.Session.begin() model.meta.Session.commit() admins = mk_group("Administrator", model.Group.CODE_ADMIN) organization = mk_group("Organization", model.Group.CODE_ORGANIZATION) supervisor = mk_group("Supervisor", model.Group.CODE_SUPERVISOR) voter = mk_group("Voter", model.Group.CODE_VOTER) observer = mk_group("Observer", model.Group.CODE_OBSERVER) advisor = mk_group("Advisor", model.Group.CODE_ADVISOR) default = mk_group("Default", model.Group.CODE_DEFAULT) anonymous = mk_group("Anonymous", model.Group.CODE_ANONYMOUS) model.meta.Session.commit() # ADD EACH NEW PERMISSION HERE mk_perm("vote.cast", voter) mk_perm("vote.prohibit", organization) mk_perm("instance.index", anonymous) mk_perm("instance.show", anonymous) mk_perm("instance.create", admins) mk_perm("instance.admin", supervisor) mk_perm("instance.join", default) mk_perm("instance.leave", default) mk_perm("instance.news", anonymous) mk_perm("instance.delete", admins) mk_perm("comment.view", anonymous) mk_perm("comment.show", anonymous) mk_perm("comment.create", advisor) mk_perm("comment.edit", advisor) mk_perm("comment.delete", supervisor) mk_perm("proposal.create", advisor) mk_perm("proposal.edit", advisor) mk_perm("proposal.delete", supervisor) mk_perm("proposal.view", anonymous) mk_perm("proposal.show", anonymous) mk_perm("poll.show", anonymous) mk_perm("poll.create", supervisor) mk_perm("poll.delete", supervisor) mk_perm("user.manage", admins) mk_perm("user.edit", default) mk_perm("user.view", anonymous) mk_perm("user.show", anonymous) mk_perm("user.message", advisor) mk_perm("delegation.view", anonymous) mk_perm("delegation.show", anonymous) mk_perm("delegation.create", voter) mk_perm("delegation.delete", voter) mk_perm("watch.show", anonymous) mk_perm("watch.create", advisor) mk_perm("watch.delete", advisor) mk_perm("tag.show", anonymous) mk_perm("tag.view", anonymous) mk_perm("tag.create", advisor) mk_perm("tag.delete", advisor) mk_perm("page.show", anonymous) mk_perm("page.view", anonymous) mk_perm("page.create", advisor) mk_perm("page.edit", advisor) mk_perm("page.delete", supervisor) mk_perm("milestone.show", anonymous) mk_perm("milestone.create", supervisor) mk_perm("milestone.edit", supervisor) mk_perm("milestone.delete", supervisor) mk_perm("global.admin", admins) mk_perm("global.member", admins) mk_perm("global.organization", organization) model.meta.Session.commit() # END PERMISSIONS LIST advisor.permissions = advisor.permissions + anonymous.permissions observer.permissions = observer.permissions + advisor.permissions voter.permissions = voter.permissions + observer.permissions supervisor.permissions = supervisor.permissions + voter.permissions admins.permissions = admins.permissions + supervisor.permissions organization.permissions = observer.permissions admin = model.User.find(u"admin") if not admin: admin = model.User.create(u"admin", u"[email protected]", password=u"password", global_admin=True) model.meta.Session.commit() from pylons import config if config.get('adhocracy.instance'): model.Instance.create(config.get('adhocracy.instance'), u"Adhocracy", admin) elif not model.Instance.find(u"test"): model.Instance.create(u"test", u"Test Instance", admin) model.meta.Session.commit()
Python
0
@@ -3712,32 +3712,58 @@ on.permissions = + organization.permission + observer.permis
801ead866ea94d9b144758198f121b0a10caeb82
Remove deprecated kwarg -Breaking change from "master"
admin/preprints/views.py
admin/preprints/views.py
from __future__ import unicode_literals from django.views.generic import ListView, UpdateView, DeleteView from django.core.urlresolvers import reverse_lazy from django.contrib.auth.mixins import PermissionRequiredMixin from django.shortcuts import redirect from django.core.exceptions import PermissionDenied from osf.models import SpamStatus from osf.models.preprint_service import PreprintService from osf.models.admin_log_entry import update_admin_log, REINDEX_SHARE, CONFIRM_SPAM, CONFIRM_HAM from website.preprints.tasks import update_preprint_share from website.project.views.register import osf_admin_change_status_identifier from framework.exceptions import PermissionsError from admin.base.views import GuidFormView, GuidView from admin.nodes.templatetags.node_extras import reverse_preprint from admin.preprints.serializers import serialize_preprint from admin.preprints.forms import ChangeProviderForm class PreprintFormView(PermissionRequiredMixin, GuidFormView): """ Allow authorized admin user to input specific preprint guid. Basic form. No admin models. """ template_name = 'preprints/search.html' object_type = 'preprint' permission_required = 'osf.view_preprintservice' raise_exception = True @property def success_url(self): return reverse_preprint(self.guid) class PreprintView(PermissionRequiredMixin, UpdateView, GuidView): """ Allow authorized admin user to view preprints View of OSF database. No admin models. """ template_name = 'preprints/preprint.html' context_object_name = 'preprintservice' permission_required = 'osf.view_preprintservice' raise_exception = True form_class = ChangeProviderForm def get_success_url(self): return reverse_lazy('preprints:preprint', kwargs={'guid': self.kwargs.get('guid')}) def post(self, request, *args, **kwargs): if not request.user.has_perm('osf.change_preprintservice'): raise PermissionsError("This user does not have permission to update this preprint's provider.") return super(PreprintView, self).post(request, *args, **kwargs) def get_object(self, queryset=None): return PreprintService.load(self.kwargs.get('guid')) def get_context_data(self, **kwargs): preprint = PreprintService.load(self.kwargs.get('guid')) # TODO - we shouldn't need this serialized_preprint value -- https://openscience.atlassian.net/browse/OSF-7743 kwargs['serialized_preprint'] = serialize_preprint(preprint) kwargs['change_provider_form'] = ChangeProviderForm(instance=preprint) kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against return super(PreprintView, self).get_context_data(**kwargs) class PreprintSpamList(PermissionRequiredMixin, ListView): SPAM_STATE = SpamStatus.UNKNOWN paginate_by = 25 paginate_orphans = 1 ordering = ('created') context_object_name = 'preprintservice' permission_required = ('osf.view_spam', 'osf.view_preprintservice') raise_exception = True def get_queryset(self): return PreprintService.objects.filter(spam_status=self.SPAM_STATE).order_by(self.ordering) def get_context_data(self, **kwargs): query_set = kwargs.pop('object_list', self.object_list) page_size = self.get_paginate_by(query_set) paginator, page, query_set, is_paginated = self.paginate_queryset( query_set, page_size) return { 'preprints': map(serialize_preprint, query_set), 'page': page, } class PreprintFlaggedSpamList(PreprintSpamList, DeleteView): SPAM_STATE = SpamStatus.FLAGGED template_name = 'preprints/flagged_spam_list.html' def delete(self, request, *args, **kwargs): if not request.user.has_perm('auth.mark_spam'): raise PermissionDenied('You do not have permission to update a preprint flagged as spam.') preprint_ids = [ pid for pid in request.POST.keys() if pid != 'csrfmiddlewaretoken' ] for pid in preprint_ids: preprint = PreprintService.load(pid) osf_admin_change_status_identifier(preprint, 'unavailable | spam') preprint.confirm_spam(save=True) update_admin_log( user_id=self.request.user.id, object_id=pid, object_repr='PreprintService', message='Confirmed SPAM: {}'.format(pid), action_flag=CONFIRM_SPAM ) return redirect('preprints:flagged-spam') class PreprintKnownSpamList(PreprintSpamList): SPAM_STATE = SpamStatus.SPAM template_name = 'preprints/known_spam_list.html' class PreprintKnownHamList(PreprintSpamList): SPAM_STATE = SpamStatus.HAM template_name = 'preprints/known_spam_list.html' class PreprintDeleteBase(DeleteView): template_name = None context_object_name = 'preprintservice' object = None def get_context_data(self, **kwargs): context = {} context.setdefault('guid', kwargs.get('object')._id) return super(PreprintDeleteBase, self).get_context_data(**context) def get_object(self, queryset=None): return PreprintService.load(self.kwargs.get('guid')) class PreprintConfirmSpamView(PermissionRequiredMixin, PreprintDeleteBase): template_name = 'preprints/confirm_spam.html' permission_required = 'osf.mark_spam' raise_exception = True def delete(self, request, *args, **kwargs): preprint = self.get_object() preprint.confirm_spam(save=True) osf_admin_change_status_identifier(preprint, 'unavailable | spam') update_admin_log( user_id=self.request.user.id, object_id=preprint._id, object_repr='PreprintService', message='Confirmed SPAM: {}'.format(preprint._id), action_flag=CONFIRM_SPAM, ) return redirect(reverse_preprint(self.kwargs.get('guid'))) class PreprintConfirmHamView(PermissionRequiredMixin, PreprintDeleteBase): template_name = 'preprints/confirm_ham.html' permission_required = 'osf.mark_spam' raise_exception = True def delete(self, request, *args, **kwargs): preprint = self.get_object() preprint.confirm_ham(save=True) osf_admin_change_status_identifier(preprint, 'public') update_admin_log( user_id=self.request.user.id, object_id=preprint._id, object_repr='PreprintService', message='Confirmed HAM: {}'.format(preprint._id), action_flag=CONFIRM_HAM ) return redirect(reverse_preprint(self.kwargs.get('guid'))) class PreprintReindexShare(PermissionRequiredMixin, PreprintDeleteBase): template_name = 'preprints/reindex_preprint_share.html' permission_required = 'osf.view_preprintservice' raise_exception = True def delete(self, request, *args, **kwargs): preprint = self.get_object() update_preprint_share(preprint) update_admin_log( user_id=self.request.user.id, object_id=preprint._id, object_repr='Preprint', message='Preprint Reindexed (SHARE): {}'.format(preprint._id), action_flag=REINDEX_SHARE ) return redirect(reverse_preprint(self.kwargs.get('guid')))
Python
0
@@ -4209,38 +4209,16 @@ preprint -, 'unavailable %7C spam' )%0A @@ -5647,30 +5647,8 @@ rint -, 'unavailable %7C spam' )%0A @@ -6345,18 +6345,8 @@ rint -, 'public' )%0A
78dd9bb220a8e1a03b51b801e023e4401a351892
Support animated pngs
gif_split/views.py
gif_split/views.py
import os import posixpath from cStringIO import StringIO import logging import requests from PIL import Image, ImageSequence from paste.httpheaders import CONTENT_DISPOSITION from pyramid.response import FileIter, FileResponse from pyramid.view import view_config from pyramid_duh import argify LOG = logging.getLogger(__name__) @view_config( route_name='root', renderer='index.jinja2') @argify def index_view(request, url=None): """ Root view '/' """ if url is not None: filename = posixpath.basename(url).replace('.gif', '') filename = filename + "_sprite.gif" stream = download_gif(url) sprite = convert_gif(stream) data = StringIO() sprite.save(data, format='GIF') data.seek(0) disp = CONTENT_DISPOSITION.tuples(filename=filename) request.response.headers.update(disp) request.response.app_iter = FileIter(data) return request.response else: return {} def download_gif(url): return StringIO(requests.get(url).content) def convert_gif(stream): image = Image.open(stream) frames = ImageSequence.Iterator(image) frame_width, frame_height = 0, 0 frame_width, frame_height = frames[0].size width = frame_width*len(list(frames)) height = frame_height out = Image.new('RGBA', (width, height)) stream.seek(0) image = Image.open(stream) for i, frame in enumerate(ImageSequence.Iterator(image)): out.paste(frame, (frame_width*i, 0)) return out
Python
0
@@ -477,20 +477,8 @@ url - is not None :%0A @@ -491,16 +491,21 @@ filename +, ext = posix @@ -513,85 +513,273 @@ ath. -basename(url).replace('.gif', '')%0A filename = filename + %22_sprite.gif%22 +splitext(posixpath.basename(url))%0A ext = ext.lower()%0A filename = filename + %22_sprite%22 + ext%0A if ext == '.gif':%0A img_format = 'GIF'%0A elif ext == '.png':%0A img_format = 'PNG'%0A else:%0A img_format = None %0A @@ -906,21 +906,26 @@ format= -'GIF' +img_format )%0A
eafe5c77a05d0f6d24bfb61c2be3a7d46c411b25
add new projections.
examples/plot_tissot.py
examples/plot_tissot.py
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from mpl_toolkits.basemap import __version__ as basemap_version # Tissot's Indicatrix (http://en.wikipedia.org/wiki/Tissot's_Indicatrix). # These diagrams illustrate the distortion inherent in all map projections. # In conformal projections, where angles are conserved around every location, # the Tissot's indicatrix are all circles, with varying sizes. In equal-area # projections, where area proportions between objects are conserved, the # Tissot's indicatrix have all unit area, although their shapes and # orientations vary with location. # requires Basemap version 0.99.1 if basemap_version < '0.99.1': raise SystemExit("this example requires Basemap version 0.99.1 or higher") # create Basemap instances with several different projections m1 = Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80, projection='cyl') m2 = Basemap(lon_0=-60,lat_0=45,projection='ortho') m3 = Basemap(llcrnrlon=-180,llcrnrlat=-70,urcrnrlon=180,urcrnrlat=70, projection='merc',lat_ts=20) m4 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='npstere') m5 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='nplaea') m6 = Basemap(lon_0=0,projection='moll') m7 = Basemap(lon_0=0,projection='robin') m8 = Basemap(lon_0=0,projection='hammer') m9 = Basemap(lon_0=0,projection='mbtfpq') m10 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='npaeqd') for m in [m1,m2,m3,m4,m5,m6,m7,m8,m9,m10]: # make a new figure. fig = plt.figure() # draw "circles" at specified longitudes and latitudes. for parallel in range(-60,61,30): for meridian in range(-165,166,30): poly = m.tissot(meridian,parallel,6,100,facecolor='green',zorder=10,alpha=0.5) # draw meridians and parallels. if m.projection != 'ortho': labels = [1,0,0,0] else: labels = [0,0,0,0] m.drawparallels(np.arange(-60,61,30),labels=labels) if m.projection not in ['moll','hammer','mbtfpq','ortho']: labels = [0,0,0,1] else: labels = [0,0,0,0] m.drawmeridians(np.arange(-180,180,60),labels=labels) # draw coastlines, fill continents, plot title. m.drawcoastlines() m.drawmapboundary(fill_color='aqua') m.fillcontinents(color='coral',lake_color='aqua') title = 'Tissot Diagram: projection = %s' % m.projection print title plt.title(title) plt.show()
Python
0
@@ -951,16 +951,119 @@ l')%0Am2 = + Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,%0A projection='mill')%0Am3 = Basemap @@ -1099,25 +1099,25 @@ n='ortho')%0Am -3 +4 = Basemap(l @@ -1141,17 +1141,17 @@ rnrlat=- -7 +8 0,urcrnr @@ -1168,17 +1168,17 @@ crnrlat= -7 +8 0,%0A @@ -1215,17 +1215,17 @@ ts=20)%0Am -4 +5 = Basem @@ -1250,34 +1250,33 @@ =90,boundinglat= -10 +5 ,projection='nps @@ -1283,17 +1283,17 @@ tere')%0Am -5 +6 = Basem @@ -1318,34 +1318,33 @@ =90,boundinglat= -10 +5 ,projection='npl @@ -1346,25 +1346,25 @@ ='nplaea')%0Am -6 +7 = Basemap(l @@ -1386,25 +1386,25 @@ on='moll')%0Am -7 +8 = Basemap(l @@ -1427,25 +1427,25 @@ n='robin')%0Am -8 +9 = Basemap(l @@ -1469,25 +1469,26 @@ ='hammer')%0Am -9 +10 = Basemap(l @@ -1513,25 +1513,107 @@ 'mbtfpq')%0Am1 -0 +1 = Basemap(lon_0=0,projection='eck4')%0Am12 = Basemap(lon_0=0,projection='kav7')%0Am13 = Basemap(l @@ -1642,18 +1642,17 @@ dinglat= -10 +5 ,project @@ -1666,16 +1666,17 @@ aeqd')%0A%0A +# for m in @@ -1707,16 +1707,43 @@ 8,m9,m10 +,m11,m12,m13%5D:%0Afor m in %5Bm6 %5D:%0A # @@ -1872,23 +1872,23 @@ range(- -60,61,3 +70,71,2 0):%0A @@ -1919,16 +1919,16 @@ e(-1 -65,166,3 +50,151,6 0):%0A @@ -2062,282 +2062,62 @@ -if m.projection != 'ortho':%0A labels = %5B1,0,0,0%5D%0A else:%0A labels = %5B0,0,0,0%5D%0A m.drawparallels(np.arange(-60,61,30),labels=labels)%0A if m.projection not in %5B'moll','hammer','mbtfpq','ortho'%5D:%0A labels = %5B0,0,0,1%5D%0A else:%0A labels = %5B0 +m.drawparallels(np.arange(-60,61,30),labels=%5B1 ,0,0,0%5D +) %0A @@ -2163,22 +2163,25 @@ ,labels= -labels +%5B0,0,0,1%5D )%0A #
44db9815aeaad8b23eecebd3e761466ffdf854f4
Normalize target dir, ensure we have target path.
scaffolder/vcs.py
scaffolder/vcs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import traceback import subprocess import os import sys import shutil def ensure_path(path): pass class VCS(): def __init__(self, url=''): """ https://github.com/CG-TTDET/Platform.git [email protected]:goliatone/minions.git https://[email protected]/goliatone/tty2gif ssh://[email protected]/goliatone/personaldetection @param url: @return: """ self.url = url def get_handler(self, url): #TODO: Make this for realz if '.git' in url: return 'git' elif 'hg@' or 'bitbucket.org' in url: return 'hg' else: raise Exception def get_repo_name(self, url, target_dir): tail = url.rpartition('/')[2] tail = tail.replace('.git', '') return os.path.normpath(os.path.join(target_dir, tail)) def notify_existing_repo(self, repo_path): if not os.path.isdir(repo_path): return question = "Repo '{0}' exists, want to delete and clone?".format(repo_path) if self.prompt_question(question): print "Removing '{0}'...".format(repo_path) shutil.rmtree(repo_path) else: print "You don't want to overwrite. Bye!" sys.exit(0) def prompt_question(self, question, default=True): valid = {'yes':True, 'y':True, 'no':False, 'n':False} prompt = '[y/n]' if default == True: prompt = '[Y/n]' elif default == False: prompt = '[y/N]' while True: sys.stdout.write("{0} {1} ".format(question, prompt)) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no'"\ "(or 'y' or 'n')") def clone(self, url=None, checkout_branch=None, target_dir='.'): if url: self.url = url url = self.url #let's check target dir: target_dir = os.path.expanduser(target_dir) ensure_path(target_dir) #did we get a git or hg repo? vcs = self.get_handler(url) print vcs repo_path = self.get_repo_name(url, target_dir) print repo_path if os.path.isdir(repo_path): self.notify_existing_repo(repo_path) try: subprocess.check_call([vcs, 'clone', url], cwd=target_dir) except Exception, e: print e exit() if checkout_branch: subprocess.check_call([vcs, 'checkout', checkout_branch], cwd=target_dir) return repo_path
Python
0
@@ -115,39 +115,215 @@ il%0A%0A +%0A def -ensure_path(path):%0A pass +normalize_path(file_path, mkdir=False):%0A file_path = os.path.realpath(os.path.expanduser(file_path))%0A if mkdir and not os.path.isdir(file_path):%0A os.makedirs(file_path)%0A return file_path %0A%0A%0Ac @@ -1405,32 +1405,64 @@ tree(repo_path)%0A + os.mkdir(repo_path)%0A else:%0A @@ -2417,52 +2417,16 @@ r = -os.path.expanduser(target_dir)%0A ensur +normaliz e_pa @@ -2430,32 +2430,44 @@ _path(target_dir +, mkdir=True )%0A%0A #did @@ -2656,32 +2656,32 @@ dir(repo_path):%0A - self @@ -2705,32 +2705,79 @@ repo(repo_path)%0A + else:%0A os.mkdir(repo_path)%0A%0A try:%0A
d00d809735210f53c3da71195107f1991814eb52
fix minor bug most likely due to merge error
labonneboite/common/models/user_favorite_offices.py
labonneboite/common/models/user_favorite_offices.py
# coding: utf8 import datetime from sqlalchemy import Column, ForeignKey, UniqueConstraint from sqlalchemy import desc from sqlalchemy import Integer, String, DateTime from sqlalchemy.orm import relationship from labonneboite.common.database import Base from labonneboite.common.database import db_session from labonneboite.common.models.base import CRUDMixin from labonneboite.common import util from labonneboite.conf import get_current_env, ENV_LBBDEV class UserFavoriteOffice(CRUDMixin, Base): """ Stores the favorites offices of a user. Important: This model has a relation to the `etablissements` model via the `office_siret` field. But the `etablissements` table is dropped and recreated during the offices import process (remember that `etablissements` is currently excluded from the migration system). Some entries in `etablissements` may disappear during this process. Therefore the `office_siret` foreign key integrity may be broken. So the foreign key integrity must be enforced by the script of the data deployment process. """ __tablename__ = 'user_favorite_offices' __table_args__ = ( UniqueConstraint('user_id', 'office_siret', name='_user_fav_office'), ) id = Column(Integer, primary_key=True) # Set `ondelete` to `CASCADE`: when a `user` is deleted, all his `favorites` are deleted too. user_id = Column(Integer, ForeignKey('users.id', ondelete='CASCADE'), nullable=False) # Set `ondelete` to `CASCADE`: when an `office` is deleted, all related `favorites` are deleted too. office_siret = Column(String(191), ForeignKey('etablissements.siret', ondelete='CASCADE'), nullable=True) date_created = Column(DateTime, default=datetime.datetime.utcnow, nullable=False) user = relationship('User') if get_current_env() == ENV_LBBDEV: # disable relationship which mysteriously breaks on lbbdev only, not needed there anyway. pass else: office = relationship('Office', lazy='joined') __mapper_args__ = { 'order_by': desc(date_created), # Default order_by for all queries. } @classmethod def user_favs_as_sirets(cls, user): """ Returns the favorites offices of a user as a list of sirets. Useful to check if an office is already in the favorites of a user. """ if user.is_anonymous: return [] sirets = [fav.office_siret for fav in db_session.query(cls).filter_by(user_id=user.id)] return sirets
Python
0.000001
@@ -360,45 +360,8 @@ xin%0A -from labonneboite.common import util%0A from
02d6e904fe02a4c53b1878a3f6c44c074de47d79
Add __str__ to Decorator
api/python/schwa/dr/decoration.py
api/python/schwa/dr/decoration.py
""" Utilities for managing document decoration by marking the document with the set of decorations that have been applied to it. """ from functools import wraps, partial def decorator(key=None): """ Wraps a docrep decorator, ensuring it is only executed once per document. Duplication is checked using the given key or the function object. """ def dec(fn): @wraps(fn) def wrapper(doc): try: if key in doc._decorated_by: return except AttributeError: doc._decorated_by = set() doc._decorated_by.add(key) fn(doc) return wrapper if callable(key): return dec(key) return dec class Decorator(object): """ An abstract document decorator, which wraps its decorate method to ensure it is only executed once per document. """ def __init__(self, key): # NOTE: wrapping __call__ like this didn't seem to work self.decorate = decorator(key)(self.decorate) @classmethod def _build_key(cls, *args): return '{}-{}'.format(cls.__name__, '-'.join(repr(arg) for arg in args)) def __call__(self, doc): self.decorate(doc) def decorate(self, doc): raise NotImplementedError() def requires_decoration(*decorators, **kwargs): """ Marks the document decoration dependencies for a function, where the document is found in the doc_arg positional argument (default 0) or doc_kwarg keyword argument (default 'doc'). """ doc_arg = kwargs.pop('doc_arg', 0) doc_kwarg = kwargs.pop('doc_kwarg', 'doc') if kwargs: raise ValueError("Got unexpected keyword arguments: {}".format(kwargs.keys())) def dec(fn): @wraps(fn) def wrapper(*args, **kwargs): try: doc = args[doc_arg] except IndexError: doc = kwargs[doc_kwarg] for decorate in decorators: decorate(doc) return fn(*args, **kwargs) return wrapper return dec method_requires_decoration = partial(requires_decoration, doc_arg=1)
Python
0.999998
@@ -940,16 +940,36 @@ ecorate) +%0A self._key = key %0A%0A @cla @@ -1196,16 +1196,59 @@ rror()%0A%0A + def __str__(self):%0A return self._key%0A%0A %0Adef req
53829f5a65727ba5ba0b69785bd74bf77d3e2ecf
Remove bogus shebang line.
cli/hack.py
cli/hack.py
# Copyright 2011 Digg, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import argparse import logging import os import os.path from digg.dev.hackbuilder.util import get_root_of_repo_directory_tree import digg.dev.hackbuilder.cli.commands.build import digg.dev.hackbuilder.plugins def main(): logging.basicConfig(level=logging.DEBUG) parser = get_parser() args = parser.parse_args() plugins = get_plugin_modules(args.plugins) digg.dev.hackbuilder.plugins.initialize_plugins(plugins) args.func(args) def get_parser(): parser = argparse.ArgumentParser(description='Hack build tool.') parser.add_argument('--plugins', action='append', default=['debian', 'python'], help='List of plugins to load') subparsers = parser.add_subparsers(title='Subcommands') parser_help = subparsers.add_parser('help', help='Subcommand help') parser_help.add_argument( 'subcommand_name', help='Name of command to get help for', nargs='?') parser_build = digg.dev.hackbuilder.cli.commands.build.get_build_argparser( subparsers) parser_clean = subparsers.add_parser('clean', help='Clean up the mess.') parser_clean.set_defaults(func=do_clean) subcommand_parsers = { 'help': parser_help, 'build': parser_build, 'clean': parser_clean, } parser_help.set_defaults(func=get_help_parser_handler(parser, subcommand_parsers)) return parser def get_help_parser_handler(main_parser, subcommand_parsers): def do_help(args): try: subcommand_parser = subcommand_parsers[args.subcommand_name] subcommand_parser.print_help() except KeyError: main_parser.print_help() return do_help def do_clean(args): repo_root = os.path.abspath(get_root_of_repo_directory_tree()) logging.info('Repository root: %s', repo_root) normalizer = digg.dev.hackbuilder.target.Normalizer(repo_root) build = digg.dev.hackbuilder.build.Build(None, normalizer) build.remove_directories() def get_plugin_modules(requested_plugins): plugins = set() for requested_plugin in requested_plugins: plugin_name = 'digg.dev.hackbuilder.plugins.' + requested_plugin logging.info('Loading plugin module: %s', plugin_name) module = __import__(plugin_name, fromlist=['buildfile_locals'], level=0) plugins.add(module) return plugins if __name__ == '__main__': main()
Python
0
@@ -582,31 +582,8 @@ e.%0A%0A -#!/usr/bin/env python%0A%0A impo
543562d588f4aea74eacb80b404e0994e97cc7ff
Add method to convert set affine=False in all BN layers for PSPnet
ptsemseg/models/pspnet.py
ptsemseg/models/pspnet.py
import numpy as np import torch.nn as nn from ptsemseg import caffe_pb2 from ptsemseg.models.utils import * class pspnet(nn.Module): def __init__(self, n_classes=21, block_config=[3, 4, 23, 3]): super(pspnet, self).__init__() self.block_config = block_config self.n_classes = n_classes # Encoder self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=64, padding=1, stride=2, bias=False) self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64, padding=1, stride=1, bias=False) self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=128, padding=1, stride=1, bias=False) # Vanilla Residual Blocks self.res_block2 = residualBlockPSP(self.block_config[0], 128, 64, 256, 1, 1) self.res_block3 = residualBlockPSP(self.block_config[1], 256, 128, 512, 2, 1) # Dilated Residual Blocks self.res_block4 = residualBlockPSP(self.block_config[2], 512, 256, 1024, 1, 2) self.res_block5 = residualBlockPSP(self.block_config[3], 1024, 512, 2048, 1, 4) self.pyramid_pooling = pyramidPooling(2048, [6, 3, 2, 1]) self.cbr_final = conv2DBatchNormRelu(4096, 512, 3, 1, 1, False) self.classification = nn.Conv2d(512, n_classes, 1, 1, 0) def forward(self, x): inp_shape = x.shape[2:] # H, W -> H/2, W/2 x = self.convbnrelu1_3(self.convbnrelu1_2(self.convbnrelu1_1(x))) # H/2, W/2 -> H/4, W/4 x = F.max_pool2d(x, 3, 2, 1) # H/4, W/4 -> H/8, W/8 x = self.res_block5(self.res_block4(self.res_block3(self.res_block2(x)))) x = self.pyramid_pooling(x) x = F.dropout2d(self.cbr_final(x), p=0.1, inplace=True) x = self.classification(x) x = F.upsample(x, size=inp_shape, mode='bilinear') return x def load_pretrained_model(self, model_path): """ Done: Load weights from caffemodel w/o caffe dependency TODO: Plug them in corresponding modules """ # Only care about layer_types that have trainable parameters ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData'] def _get_layer_params(layer, ltype): if ltype == 'BNData': n_channels = layer.blobs[0].shape.dim[1] mean = np.array([w for w in layer.blobs[0].data]).reshape(n_channels) var = np.array([w for w in layer.blobs[1].data]).reshape(n_channels) scale_factor = np.array([w for w in layer.blobs[2].data]).reshape(n_channels) mean, var = mean / scale_factor, var / scale_factor return [mean, var, scale_factor] elif ltype in ['ConvolutionData', 'HoleConvolutionData']: is_bias = layer.convolution_param.bias_term shape = [int(d) for d in layer.blobs[0].shape.dim] weights = np.array([w for w in layer.blobs[0].data]).reshape(shape) bias = [] if is_bias: bias = np.array([w for w in layer.blobs[1].data]).reshape(shape[0]) return [weights, bias] elif ltype == 'InnerProduct': raise Exception("Fully connected layers {}, not supported".format(ltype)) else: raise Exception("Unkown layer type {}".format(ltype)) net = caffe_pb2.NetParameter() with open(model_path, 'rb') as model_file: net.MergeFromString(model_file.read()) # dict formatted as -> key:<layer_name> :: value:<layer_type> layer_types = {} # dict formatted as -> key:<layer_name> :: value:[<list_of_params>] layer_params = {} for l in net.layer: lname = l.name ltype = l.type if ltype in ltypes: print("Processing layer {}".format(lname)) layer_types[lname] = ltype layer_params[lname] = _get_layer_params(l, ltype) #TODO: Plug weights from dictionary into right places if __name__ == '__main__': psp = pspnet() psp.load_pretrained_model(model_path='/home/meetshah1995/models/pspnet101_cityscapes.caffemodel') import pdb;pdb.set_trace()
Python
0.000012
@@ -4217,60 +4217,486 @@ # -TODO: Plug weights from dictionary into right places + Set affine=False for all batchnorm modules%0A def _no_affine_bn(module=None):%0A if isinstance(module, nn.BatchNorm2d):%0A module.affine = False%0A%0A if len(%5Bm for m in module.children()%5D) %3E 0:%0A for child in module.children():%0A _no_affine_bn(child)%0A%0A _no_affine_bn(self)%0A%0A #TODO: Plug weights from dictionary into right places%0A # My eyes and my heart both hurt when writing this%0A %0A%0Aif
c97992688641bcbf2075c9da532522f289e02c71
Add response code reason logging
scrap/__main__.py
scrap/__main__.py
import sys, asyncio, aiohttp, async_timeout, collections, csv from lxml import html, etree from tld import get_tld from urllib.parse import urlparse USER_AGENT = 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1' ''' analizar doctype html5 soltar tabs ''' columns =['url', 'origin', 'doctype', 'tableCount', 'error'] Row = collections.namedtuple('Row', columns) headRow = Row(*columns) def readFileSet(filename): with open(filename, 'r') as f: return set([x.strip() for x in f.readlines()]) blacklist = readFileSet('./data/blacklist.txt') class Scrapper: def __init__(self, loop, timeout=10): self.loop = loop self.timeout = timeout self.headers = { 'User-Agent': USER_AGENT } async def get(self, url): with async_timeout.timeout(self.timeout): async with aiohttp.ClientSession(loop=self.loop, headers=self.headers) as session: try: async with session.get(url) as response: ## check status code try: text = await response.text() except Exception as e: print(url, 'has an unicode error') return e try: return html.fromstring(text) except Exception as e: print(url, 'has a XML/HTML parsing error') return e except Exception as e: print(url, 'has a HTTP/SSL errors') return e async def google(scrapper, keywords, pages=50): url = '/search?filter=0&query='+keywords for n in range(pages): print('GOOGLE SEARCH FOR', keywords, 'PAGE', n) html = await scrapper.get('https://www.google.com'+url) if isinstance(html, Exception): print('Error loading google page', url) continue; organicLinks = html.xpath('//h3[@class="r"]//a/@href') for link in organicLinks: yield link, 'organic' # next page url = html.xpath('//a[@id="pnnext"]/@href') if not url: break url = url[0] async def bing(scrapper, keywords, pages=50): url = '/search?q='+keywords for n in range(pages): html = await scrapper.get('https://www.bing.com'+url) if isinstance(html, Exception): print('Error loading google page', url) continue; organicLinks = html.xpath('//h3[@class="r"]//a/@href') for link in organicLinks: yield link, 'organic' # next page url = html.xpath('//a[@id="pnnext"]/@href') if not url: break url = url[0] async def searchLoop(loop, searchEngine, keywords): scrapper = Scrapper(loop) pages = set() async for link, origin in searchEngine(scrapper, keywords): urlparts = urlparse(link) link = '{url.scheme}://{url.netloc}'.format(url=urlparts) tld = get_tld(link) if tld in pages or tld in blacklist: continue pages.add(tld) print('Scanning', tld) #print('scanning', link) page = await scrapper.get(link) if isinstance(page, Exception): yield Row(url=link, origin=origin, doctype=None, tableCount=None, error=str(page)) else: # cuenta el numero de tablas doctType=page.getroottree().docinfo.doctype; tableCount = len(page.xpath('//table')) yield Row(url=link, origin=origin, doctype=doctType, tableCount=tableCount, error=None) async def search(loop, keywords): outputFilename = './data/' + keywords + '.csv' with open(outputFilename, 'w', newline='') as csvFile: csvWriter = csv.writer(csvFile) csvWriter.writerow(headRow) async for row in searchLoop(loop, google, keywords): csvWriter.writerow(row) loop = asyncio.get_event_loop() loop.run_until_complete(search(loop, '+'.join(sys.argv[1:]))) loop.close()
Python
0.000001
@@ -1011,16 +1011,146 @@ us code%0A +%09%09%09%09%09%09if response.status != 200:%0A%09%09%09%09%09%09%09print(url, 'response', response.status, ':', response.reason)%0A%09%09%09%09%09%09%09return %0A%09%09%09%09%09%09else:%0A%09 %09%09%09%09%09%09tr @@ -1148,24 +1148,25 @@ %09%09%09%09%09%09%09try:%0A +%09 %09%09%09%09%09%09%09text @@ -1187,32 +1187,33 @@ se.text()%0A%09%09%09%09%09%09 +%09 except Exception @@ -1211,32 +1211,33 @@ Exception as e:%0A +%09 %09%09%09%09%09%09%09print(url @@ -1261,32 +1261,33 @@ error')%0A%09%09%09%09%09%09%09 +%09 return e%0A%09%09%09%09%09%09t @@ -1275,24 +1275,25 @@ %09%09%09return e%0A +%09 %09%09%09%09%09%09try:%0A%09 @@ -1294,24 +1294,25 @@ try:%0A%09%09%09%09%09%09%09 +%09 return html. @@ -1328,16 +1328,17 @@ g(text)%0A +%09 %09%09%09%09%09%09ex @@ -1357,32 +1357,33 @@ on as e:%0A%09%09%09%09%09%09%09 +%09 print(url, 'has @@ -1405,24 +1405,25 @@ ing error')%0A +%09 %09%09%09%09%09%09%09retur @@ -1756,32 +1756,48 @@ html, Exception) + or html is None :%0A%09%09%09print('Erro @@ -1824,33 +1824,29 @@ e', url)%0A%09%09%09 -continue; +break %0A%0A%09%09organicL
d7e6db61a0100e69b9a18c17a906e094e91ce7b3
fix wrong keyword param (passws) to MySQLdb.connect
database.py
database.py
""" Database Manager. """ import MySQLdb import MySQLdb.cursors class DatabaseManager(object): def __init__(self, host, user, passwd, database, charset='utf8', large_scale=False): """Be careful using large_scale=True, SSDictCursor seems not reliable.""" self.conn = MySQLdb.connect(host=host, user=user, passws=passwd, db=database, charset=charset) self.large_scale = large_scale def close(self): self.conn.close() # put here for better understandability cursor_types = { True: { True: MySQLdb.cursors.SSDictCursor, False: MySQLdb.cursors.SSCursor, }, False: { True: MySQLdb.cursors.DictCursor, False: MySQLdb.cursors.Cursor, }, } def __get_cursor_type(self, use_dict): return self.cursor_types[self.large_scale][use_dict] def __query(self, sql, values=(), use_dict=True): """Execute any SQL. You can use %s placeholder in sql and fill with values. return cursor""" cursor = self.conn.cursor(self.__get_cursor_type(use_dict)) cursor.execute(sql, values) return cursor def query(self, sql, values=()): """Execute any SQL and return affected rows.""" cursor = self.__query(sql, values) return cursor.rowcount def insert(self, sql, values=()): """Insert a row and return insert id.""" cursor = self.__query(sql, values) return cursor.lastrowid def get_rows(self, sql, values=()): """[Generator]Get rows of SELECT query.""" cursor = self.__query(sql, values) for i in xrange(cursor.rowcount): yield cursor.fetchone() def get_value(self, sql, idx=0): """Get value of the first row. This is handy if you want to retrive COUNT(*).""" cursor = self.__query(sql, use_dict=False) row = cursor.fetchone() return row[idx]
Python
0.000026
@@ -324,17 +324,17 @@ r, passw -s +d =passwd,
4baba777801765b0ce9025c9ef170d3465d874fc
Add state class measurement to SwitchBot signal strength sensors (#79886)
homeassistant/components/switchbot/sensor.py
homeassistant/components/switchbot/sensor.py
"""Support for SwitchBot sensors.""" from __future__ import annotations from homeassistant.components.sensor import ( SensorDeviceClass, SensorEntity, SensorEntityDescription, SensorStateClass, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, TEMP_CELSIUS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from .const import DOMAIN from .coordinator import SwitchbotDataUpdateCoordinator from .entity import SwitchbotEntity PARALLEL_UPDATES = 0 SENSOR_TYPES: dict[str, SensorEntityDescription] = { "rssi": SensorEntityDescription( key="rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "wifi_rssi": SensorEntityDescription( key="wifi_rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "battery": SensorEntityDescription( key="battery", native_unit_of_measurement=PERCENTAGE, device_class=SensorDeviceClass.BATTERY, state_class=SensorStateClass.MEASUREMENT, entity_category=EntityCategory.DIAGNOSTIC, ), "lightLevel": SensorEntityDescription( key="lightLevel", native_unit_of_measurement="Level", state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.ILLUMINANCE, ), "humidity": SensorEntityDescription( key="humidity", native_unit_of_measurement=PERCENTAGE, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.HUMIDITY, ), "temperature": SensorEntityDescription( key="temperature", native_unit_of_measurement=TEMP_CELSIUS, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.TEMPERATURE, ), } async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Switchbot sensor based on a config entry.""" coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] entities = [ SwitchBotSensor( coordinator, sensor, ) for sensor in coordinator.data["data"] if sensor in SENSOR_TYPES ] entities.append(SwitchbotRSSISensor(coordinator, "rssi")) async_add_entities(entities) class SwitchBotSensor(SwitchbotEntity, SensorEntity): """Representation of a Switchbot sensor.""" def __init__( self, coordinator: SwitchbotDataUpdateCoordinator, sensor: str, ) -> None: """Initialize the Switchbot sensor.""" super().__init__(coordinator) self._sensor = sensor self._attr_unique_id = f"{coordinator.base_unique_id}-{sensor}" name = coordinator.device_name self._attr_name = f"{name} {sensor.replace('_', ' ').title()}" self.entity_description = SENSOR_TYPES[sensor] @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.data["data"][self._sensor] class SwitchbotRSSISensor(SwitchBotSensor): """Representation of a Switchbot RSSI sensor.""" @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.coordinator.ble_device.rssi
Python
0
@@ -911,32 +911,82 @@ IGNAL_STRENGTH,%0A + state_class=SensorStateClass.MEASUREMENT,%0A entity_r @@ -1260,32 +1260,82 @@ IGNAL_STRENGTH,%0A + state_class=SensorStateClass.MEASUREMENT,%0A entity_r
63d7b33ece78303ff66b159c5db7be373e69dfea
Fix comment
hoomd/md/pytest/test_lj_equation_of_state.py
hoomd/md/pytest/test_lj_equation_of_state.py
import hoomd import pytest import hoomd.conftest import numpy import math # Selected state points in the high density fluid. Both of these state points # are in single phase regions of the phase diagram and suitable for NVT/NPT and # MC cross validation. # T_star, rho_star, mean_U_ref, sigma_U_ref, mean_P_ref, sigma_P_ref, # log_period, equilibration_steps, run_steps statepoints = [ (1.4, 0.9, -4.6622, 0.0006089, 6.6462, 0.00328, 64, 2**10, 2**16), ] @pytest.mark.validate @pytest.mark.parametrize( 'T_star, rho_star, mean_U_ref, sigma_U_ref, mean_P_ref, sigma_P_ref,' 'log_period, equilibration_steps, run_steps', statepoints) @pytest.mark.parametrize('method_name', ['Langevin', 'NVT', 'NPT']) def test_lj_equation_of_state( T_star, rho_star, mean_U_ref, sigma_U_ref, mean_P_ref, sigma_P_ref, log_period, equilibration_steps, run_steps, method_name, fcc_snapshot_factory, simulation_factory, device, ): # construct the system at the given density n = 6 if device.communicator.num_ranks > 1: # MPI tests need a box large enough to decompose n = 8 N = n**3 * 4 V = N / rho_star L = V**(1 / 3) r_cut = 2.5 a = L / n snap = fcc_snapshot_factory(n=n, a=a) sim = simulation_factory(snap) sim.seed = 10 # set the simulation parameters integrator = hoomd.md.Integrator(dt=0.005) lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(), r_cut=r_cut, mode='shift') lj.params.default = {'sigma': 1, 'epsilon': 1} integrator.forces.append(lj) if method_name == 'NVT': method = hoomd.md.methods.NVT(filter=hoomd.filter.All(), kT=T_star, tau=0.1) elif method_name == 'Langevin': method = hoomd.md.methods.Langevin(filter=hoomd.filter.All(), kT=T_star) elif method_name == 'NPT': method = hoomd.md.methods.NPT(filter=hoomd.filter.All(), kT=T_star, tau=0.1, S=mean_P_ref, tauS=0.5, couple='xyz') integrator.methods.append(method) sim.operations.integrator = integrator # equilibrate the simulation sim.run(0) sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=T_star) if method_name == 'NVT': method.thermalize_thermostat_dof() elif method_name == 'NPT': method.thermalize_thermostat_and_barostat_dof() sim.run(equilibration_steps) # log energy and pressure thermo = hoomd.md.compute.ThermodynamicQuantities(filter=hoomd.filter.All()) sim.operations.computes.append(thermo) energy_log = hoomd.conftest.ListWriter(thermo, 'potential_energy') pressure_log = hoomd.conftest.ListWriter(thermo, 'pressure') volume_log = hoomd.conftest.ListWriter(thermo, 'volume') log_trigger = hoomd.trigger.Periodic(log_period) sim.operations.writers.append( hoomd.write.CustomWriter(action=energy_log, trigger=log_trigger)) sim.operations.writers.append( hoomd.write.CustomWriter(action=pressure_log, trigger=log_trigger)) sim.operations.writers.append( hoomd.write.CustomWriter(action=volume_log, trigger=log_trigger)) sim.always_compute_pressure = True sim.run(run_steps) # compute the average and error energy = hoomd.conftest.BlockAverage(numpy.array(energy_log.data) / N) pressure = hoomd.conftest.BlockAverage(pressure_log.data) rho = hoomd.conftest.BlockAverage(N / numpy.array(volume_log.data)) # Useful information to know when the test fails print('U_ref = ', mean_U_ref, '+/-', sigma_U_ref) print('U = ', energy.mean, '+/-', energy.standard_deviation, '(', energy.relative_error * 100, '%)') print('P_ref = ', mean_P_ref, '+/-', sigma_P_ref) print('P = ', pressure.mean, '+/-', pressure.standard_deviation, pressure.standard_deviation, '(', pressure.relative_error * 100, '%)') print('rho = ', rho.mean, '+/-', rho.standard_deviation) print(f'Statepoint entry: {T_star:0.4}, {rho_star:0.4}, ' f'{energy.mean:0.5}, {energy.standard_deviation:0.4}, ' f'{pressure.mean:0.5}, {pressure.standard_deviation:0.4}') energy.assert_close(mean_U_ref, sigma_U_ref) # use larger tolerances for pressure and density as these have larger # fluctuations if method_name == 'NVT' or method_name == 'Langevin': pressure.assert_close(mean_P_ref, sigma_P_ref) if method_name == 'NPT': rho.assert_close(rho_star, 0)
Python
0
@@ -90,17 +90,19 @@ te point -s +(s) in the @@ -125,17 +125,9 @@ id. -Both of t +T hese @@ -142,18 +142,20 @@ oint -s%0A# +(s) are in +%0A# sin @@ -225,13 +225,13 @@ and -%0A# MC +%0A# cro
73ff711881475b8e18b7a569e9b6fc3e024b0119
Update design-twitter.py
Python/design-twitter.py
Python/design-twitter.py
# Time: O(klogu), k is most recently number of tweets, # u is the number of the user's following. # Space: O(t + f), t is the total number of tweets, # f is the total number of followings. # Design a simplified version of Twitter where users can post tweets, # follow/unfollow another user and is able to see the 10 most recent # tweets in the user's news feed. Your design should support the following methods: # # postTweet(userId, tweetId): Compose a new tweet. # getNewsFeed(userId): Retrieve the 10 most recent tweet ids in the user's # news feed. Each item in the news feed must be posted by users who the user followed # or by the user herself. Tweets must be ordered from most recent to least recent. # follow(followerId, followeeId): Follower follows a followee. # unfollow(followerId, followeeId): Follower unfollows a followee. # Example: # # Twitter twitter = new Twitter(); # # // User 1 posts a new tweet (id = 5). # twitter.postTweet(1, 5); # # // User 1's news feed should return a list with 1 tweet id -> [5]. # twitter.getNewsFeed(1); # # // User 1 follows user 2. # twitter.follow(1, 2); # # // User 2 posts a new tweet (id = 6). # twitter.postTweet(2, 6); # # // User 1's news feed should return a list with 2 tweet ids -> [6, 5]. # // Tweet id 6 should precede tweet id 5 because it is posted after tweet id 5. # twitter.getNewsFeed(1); # # // User 1 unfollows user 2. # twitter.unfollow(1, 2); # # // User 1's news feed should return a list with 1 tweet id -> [5], # // since user 1 is no longer following user 2. # twitter.getNewsFeed(1); class Twitter(object): def __init__(self): """ Initialize your data structure here. """ self.__number_of_most_recent_tweets = 10 self.__followings = collections.defaultdict(set) self.__messages = collections.defaultdict(list) self.__time = 0 def postTweet(self, userId, tweetId): """ Compose a new tweet. :type userId: int :type tweetId: int :rtype: void """ self.__time += 1 self.__messages[userId].append((self.__time, tweetId)) def getNewsFeed(self, userId): """ Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent. :type userId: int :rtype: List[int] """ max_heap = [] if self.__messages[userId]: heapq.heappush(max_heap, (-self.__messages[userId][-1][0], userId, 0, len(self.__messages[userId]))) for uid in self.__followings[userId]: if self.__messages[uid]: heapq.heappush(max_heap, (-self.__messages[uid][-1][0], uid, 0, len(self.__messages[uid]))) result = [] while max_heap and len(result) < self.__number_of_most_recent_tweets: t, uid, curr, end = heapq.heappop(max_heap) nxt = curr + 1; if nxt != end: heapq.heappush(max_heap, (-self.__messages[uid][-(nxt+1)][0], uid, nxt, len(self.__messages[uid]))) result.append(self.__messages[uid][-(curr+1)][1]); return result def follow(self, followerId, followeeId): """ Follower follows a followee. If the operation is invalid, it should be a no-op. :type followerId: int :type followeeId: int :rtype: void """ if followerId != followeeId: self.__followings[followerId].add(followeeId) def unfollow(self, followerId, followeeId): """ Follower unfollows a followee. If the operation is invalid, it should be a no-op. :type followerId: int :type followeeId: int :rtype: void """ self.__followings[followerId].discard(followeeId) # Your Twitter object will be instantiated and called as such: # obj = Twitter() # obj.postTweet(userId,tweetId) # param_2 = obj.getNewsFeed(userId) # obj.follow(followerId,followeeId) # obj.unfollow(followerId,followeeId)
Python
0
@@ -2632,38 +2632,8 @@ d, 0 -, len(self.__messages%5BuserId%5D) ))%0A @@ -2796,35 +2796,8 @@ d, 0 -, len(self.__messages%5Buid%5D) ))%0A%0A @@ -2922,13 +2922,8 @@ curr -, end = h @@ -2995,19 +2995,41 @@ nxt != +l en -d +(self.__messages%5Buid%5D) :%0A @@ -3112,35 +3112,8 @@ nxt -, len(self.__messages%5Buid%5D) ))%0A
2e164c5fe2e3a208dbdcbc51f287a9e5b7cc34a8
Add package_data entry in setup.py
setup.py
setup.py
from setuptools import setup from klink import __version__ setup( name='klink', version=__version__, url='https://github.com/pmorissette/klink', description='Klink is a simple and clean theme for creating Sphinx docs, inspired by jrnl', license='MIT', author='Philippe Morissette', author_email='[email protected]', packages=['klink'] )
Python
0.000001
@@ -372,11 +372,156 @@ 'klink'%5D +,%0A package_data = %7B'klink': %5B%0A 'theme.conf',%0A 'layout.html',%0A 'static/css/klink.css',%0A 'static/fonts/*.*',%0A %5D%7D, %0A)%0A
82e735dabd789139ded53bcb4b1dd5af1065d7ef
fix a module name in setup.py
setup.py
setup.py
# -*- coding: utf-8 -*- import sys import os from distutils.core import setup from setuptools import find_packages ######### # settings ######### project_var_name = "pyrsslocal" sversion = "0.8" versionPython = "%s.%s" % (sys.version_info.major, sys.version_info.minor) path = "Lib/site-packages/" + project_var_name readme = 'README.rst' KEYWORDS = project_var_name + ', RSS, viewer, blog, post' DESCRIPTION = """Local RSS reader/viewer""" CLASSIFIERS = [ 'Programming Language :: Python :: 3', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering', 'Topic :: Education', 'License :: OSI Approved :: MIT License', 'Development Status :: 5 - Production/Stable' ] ####### # data ####### packages = find_packages('src', exclude='src') package_dir = {k: "src/" + k.replace(".", "/") for k in packages} package_data = {project_var_name + ".rss": ["*.html", "*.css", "*.js", "*.png"], project_var_name + ".javascript": ["*.html", "*.css", "*.js"], } ############ # functions ############ def is_local(): file = os.path.abspath(__file__).replace("\\", "/").lower() if "/temp/" in file and "pip-" in file: return False if \ "bdist_msi" in sys.argv or \ "build27" in sys.argv or \ "build_script" in sys.argv or \ "build_sphinx" in sys.argv or \ "bdist_wheel" in sys.argv or \ "bdist_wininst" in sys.argv or \ "clean_pyd" in sys.argv or \ "clean_space" in sys.argv or \ "copy27" in sys.argv or \ "copy_dist" in sys.argv or \ "local_pypi" in sys.argv or \ "notebook" in sys.argv or \ "publish" in sys.argv or \ "publish_doc" in sys.argv or \ "register" in sys.argv or \ "unittests" in sys.argv or \ "unittests_LONG" in sys.argv or \ "unittests_SKIP" in sys.argv or \ "run27" in sys.argv or \ "sdist" in sys.argv or \ "setupdep" in sys.argv or \ "test_local_pypi" in sys.argv or \ "upload_docs" in sys.argv or \ "setup_hook" in sys.argv or \ "write_version" in sys.argv: try: import_pyquickhelper() except ImportError: return False return True else: return False def import_pyquickhelper(): try: import pyquickhelper except ImportError: sys.path.append( os.path.normpath( os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "pyquickhelper", "src")))) try: import pyquickhelper except ImportError as e: message = "module pyquickhelper is needed to build the documentation ({0}), not found in path {1}".format( sys.executable, sys.path[ -1]) raise ImportError(message) from e return pyquickhelper def verbose(): print("---------------------------------") print("package_dir =", package_dir) print("packages =", packages) print("package_data=", package_data) print("current =", os.path.abspath(os.getcwd())) print("---------------------------------") ########## # version ########## if is_local(): def write_version(): pyquickhelper = import_pyquickhelper() from pyquickhelper import write_version_for_setup return write_version_for_setup(__file__) write_version() if os.path.exists("version.txt"): with open("version.txt", "r") as f: lines = f.readlines() subversion = "." + lines[0].strip("\r\n ") else: raise FileNotFoundError("version.txt") else: # when the module is installed, no commit number is displayed subversion = "" ############## # common part ############## if os.path.exists(readme): with open(readme, "r", encoding='utf-8-sig') as f: long_description = f.read() else: long_description = "" if "--verbose" in sys.argv: verbose() if is_local(): pyquickhelper = import_pyquickhelper() r = pyquickhelper.process_standard_options_for_setup( sys.argv, __file__, project_var_name, unittest_modules=["pyquickhelper", "pyense"], requirements=["pyquickhelper", "pyensae"]) else: r = False if len(sys.argv) == 1 and "--help" in sys.argv: pyquickhelper = import_pyquickhelper() pyquickhelper.process_standard_options_for_setup_help() if not r: setup( name=project_var_name, version='%s%s' % (sversion, subversion), author='Xavier Dupré', author_email='xavier.dupre AT gmail.com', url="http://www.xavierdupre.fr/app/pyrsslocal/helpsphinx/index.html", download_url="https://github.com/sdpython/pyrsslocal/", description=DESCRIPTION, long_description=long_description, keywords=KEYWORDS, classifiers=CLASSIFIERS, packages=packages, package_dir=package_dir, package_data=package_data, install_requires=["pyquickhelper", "pyensae", "feedparser"], )
Python
0.00002
@@ -4289,16 +4289,17 @@ , %22pyens +a e%22%5D,%0A
64c50a273c3e113affdb700f137bda78fd1a684d
update examples/progressbar.by
examples/progressbar.py
examples/progressbar.py
#!/usr/bin/env python # Tai Sakuma <[email protected]> from AlphaTwirl.ProgressBar import ProgressBar, MPProgressMonitor, ProgressReport from AlphaTwirl.EventReader import MPEventLoopRunner import time, random ##____________________________________________________________________________|| class EventLoop(object): def __init__(self, name): self.name = name self.readers = [ ] def __call__(self, progressReporter = None): n = random.randint(5, 50) for i in xrange(n): time.sleep(0.1) report = ProgressReport(name = self.name, done = i + 1, total = n) progressReporter.report(report) return self.readers ##____________________________________________________________________________|| progressBar = ProgressBar() progressMonitor = MPProgressMonitor(presentation = progressBar) runner = MPEventLoopRunner(progressMonitor = progressMonitor) runner.begin() runner.run(EventLoop("loop1")) runner.run(EventLoop("loop2")) runner.run(EventLoop("loop3")) runner.run(EventLoop("loop4")) runner.run(EventLoop("loop5")) runner.run(EventLoop("loop6")) runner.run(EventLoop("loop7")) runner.run(EventLoop("loop8")) runner.end() ##____________________________________________________________________________||
Python
0
@@ -93,16 +93,30 @@ ressBar, + ProgressBar2, MPProgr @@ -142,16 +142,16 @@ sReport%0A - from Alp @@ -487,16 +487,57 @@ (5, 50)%0A + time.sleep(random.randint(0, 3))%0A @@ -1012,17 +1012,16 @@ op(%22loop -1 %22))%0Arunn @@ -1042,13 +1042,20 @@ op(%22 +another loop -2 %22))%0A @@ -1080,13 +1080,17 @@ op(%22 +more loop -3 %22))%0A @@ -1119,9 +1119,18 @@ loop -4 + loop loop %22))%0A @@ -1156,12 +1156,8 @@ p(%22l -oop5 %22))%0A
f8bd4073beb50f9fb750170e79804d13ea50db0b
update example
examples/raster_mesh.py
examples/raster_mesh.py
from bluesky.examples import Mover, SynGauss, Syn2DGauss import bluesky.simple_scans as bss import bluesky.spec_api as bsa import bluesky.callbacks from bluesky.standard_config import gs import bluesky.qt_kicker # motors theta = Mover('theta', ['theta']) gamma = Mover('gamma', ['gamma']) # synthetic detectors coupled to one motor theta_det = SynGauss('theta_det', theta, 'theta', center=0, Imax=1, sigma=1) gamma_det = SynGauss('gamma_det', gamma, 'gamma', center=0, Imax=1, sigma=1) # synthetic detector coupled to two detectors tgd = Syn2DGauss('theta_gamma_det', theta, 'theta', gamma, 'gamma', center=(0, 0), Imax=1) # set up the default detectors gs.DETS = [theta_det, gamma_det, tgd] ysteps = 25 xsteps = 20 # hook up the live raster callback #cb = bluesky.callbacks.LiveRaster((ysteps + 1, xsteps + 1), # 'theta_gamma_det', clim=[0, 1]) mesha = bss.OuterProductAbsScanPlan() # run a mesh scan gs.MASTER_DET_FIELD = 'theta_gamma_det' bsa.mesh(theta, -2.5, 2.5, ysteps, gamma, -2, 2, xsteps, False)
Python
0
@@ -69,25 +69,18 @@ sky. -simple_sc +pl ans as b ss%0Ai @@ -75,18 +75,17 @@ ans as b -ss +p %0Aimport @@ -196,16 +196,54 @@ t_kicker +%0Abluesky.qt_kicker.install_qt_kicker() %0A%0A# moto @@ -797,17 +797,16 @@ allback%0A -# cb = blu @@ -842,20 +842,16 @@ teps - + 1 , xsteps + 1 @@ -850,16 +850,11 @@ teps - + 1 ),%0A -# @@ -919,164 +919,289 @@ 1%5D)%0A -mesha = bss.OuterProductAbsScanPlan()%0A# run a mesh scan%0Ags.MASTER_DET_FIELD = 'theta_gamma_det'%0Absa.mesh(theta, -2.5, 2.5, ysteps, gamma, -2, 2, xsteps, False +lt = bluesky.callbacks.LiveTable(%5Btheta, gamma, tgd%5D)%0Ags.MASTER_DET_FIELD = 'theta_gamma_det'%0Amesha = bp.OuterProductAbsScanPlan(gs.DETS,%0A theta, -2.5, 2.5, ysteps,%0A gamma, -2, 2, xsteps, True)%0Ags.RE(mesha, %5Bcb, lt%5D )%0A
2fe555e71d0b428a85c63c39dcfeecb30420f9b1
Handle SIGTERM by raising SystemExit
src/main/python/afp_alppaca/main.py
src/main/python/afp_alppaca/main.py
from __future__ import print_function, absolute_import, unicode_literals, division import argparse import signal import sys import threading from afp_alppaca.assume_role import AssumedRoleCredentialsProvider from afp_alppaca.ims_interface import IMSCredentialsProvider from afp_alppaca.scheduler import Scheduler from afp_alppaca.webapp import WebApp from afp_alppaca.util import setup_logging, load_config from afp_alppaca.compat import OrderedDict from succubus import Daemon class AlppacaDaemon(Daemon): def run(self): self.logger.warn("Alppaca starting.") try: # Handle SIGTERM the same way SIGINT is handled, i.e. throw a # KeyboardInterrupt exception. This makes the "finally:" work. sigint_handler = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGTERM, sigint_handler) # Credentials is a shared object that connects the scheduler and the # bottle_app. The scheduler writes into it and the bottle_app reads # from it. self.credentials = OrderedDict() self.launch_scheduler() self.run_webapp() except Exception: self.logger.exception("Error in Alppaca") finally: self.logger.warn("Alppaca shutting down.") def parse_arguments(self): parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config', help="Alppaca YAML config directory", type=str, default='/etc/alppaca') return parser.parse_args() def load_configuration(self): args = self.parse_arguments() self.config = load_config(args.config) self.setup_logging() def setup_logging(self): try: self.logger = setup_logging(self.config) except Exception: print("Could not setup logging with config '{0}'".format(self.config), file=sys.stderr) raise else: self.logger.debug("Alppaca logging was set up") def run_webapp(self): bind_ip = self.config.get('bind_ip', '127.0.0.1') bind_port = self.config.get('bind_port', '25772') self.logger.debug("Starting webserver on %s:%s", bind_ip, bind_port) webapp = WebApp(self.credentials) webapp.run(host=bind_ip, port=bind_port, quiet=True) def get_credentials_provider(self): # initialize the credentials provider ims_host_port = '%s:%s' % (self.config['ims_host'], self.config['ims_port']) ims_protocol = self.config.get('ims_protocol', 'https') self.logger.info("Will get credentials from '%s' using '%s'", ims_host_port, ims_protocol) credentials_provider = IMSCredentialsProvider(ims_host_port, ims_protocol=ims_protocol) role_to_assume = self.config.get('assume_role') if role_to_assume: self.logger.info("Option assume_role set to '%s'", role_to_assume) credentials_provider = AssumedRoleCredentialsProvider( credentials_provider, role_to_assume, self.config.get('aws_proxy_host'), self.config.get('aws_proxy_port'), self.config.get('aws_region') ) return credentials_provider def launch_scheduler(self): credentials_provider = self.get_credentials_provider() scheduler = Scheduler(self.credentials, credentials_provider) scheduler_thread = threading.Thread(target=scheduler.refresh_credentials) scheduler_thread.daemon = True scheduler_thread.start()
Python
0
@@ -475,16 +475,91 @@ aemon%0A%0A%0A +def sigterm_handler(*args):%0A raise SystemExit(%22SIGTERM was received%22)%0A%0A%0A class Al @@ -690,106 +690,37 @@ ERM -the same way SIGINT is handled, i.e. throw a%0A # KeyboardInterrupt exception. This +by raising SystemExit to make -s the @@ -741,69 +741,8 @@ rk.%0A - sigint_handler = signal.getsignal(signal.SIGINT)%0A @@ -782,19 +782,20 @@ ERM, sig -int +term _handler
9f8a9f1ac7d283decc57e7779efeb8cb351cb646
handle case of no records to save
put_base_records_in_elastic.py
put_base_records_in_elastic.py
import boto import os from time import sleep from time import time from util import elapsed import zlib import re import json import argparse from elasticsearch import Elasticsearch, RequestsHttpConnection, serializer, compat, exceptions # from https://github.com/elastic/elasticsearch-py/issues/374 # to work around unicode problem class JSONSerializerPython2(serializer.JSONSerializer): """Override elasticsearch library serializer to ensure it encodes utf characters during json dump. See original at: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L42 A description of how ensure_ascii encodes unicode characters to ensure they can be sent across the wire as ascii can be found here: https://docs.python.org/2/library/json.html#basic-usage """ def dumps(self, data): # don't serialize strings if isinstance(data, compat.string_types): return data try: return json.dumps(data, default=self.default, ensure_ascii=True) except (ValueError, TypeError) as e: raise exceptions.SerializationError(data, e) class MissingTagException(Exception): pass def tag_match(tagname, str, return_list=False): regex_str = "<{}>(.+?)</{}>".format(tagname, tagname) matches = re.findall(regex_str, str) if return_list: return matches # will be empty list if we found naught else: try: return matches[0] except IndexError: # no matches. return None def is_complete(record): required_keys = [ "id", "title", "urls" ] for k in required_keys: if not record[k]: # empty list is falsey print u"Record is missing required key '{}'!".format(k) print record return False if record["oa"] == 0: print u"record {} is closed access. skipping.".format(record["id"]) return False return True def main(first=None, last=None): print "running main()" # set up elasticsearch INDEX_NAME = "base" TYPE_NAME = "record" es = Elasticsearch(os.getenv("ELASTICSEARCH_URL"), serializer=JSONSerializerPython2(), retry_on_timeout=True, max_retries=100) # if es.indices.exists(INDEX_NAME): # print("deleting '%s' index..." % (INDEX_NAME)) # res = es.indices.delete(index = INDEX_NAME) # print(" response: '%s'" % (res)) # # print u"creating index" # res = es.indices.create(index=INDEX_NAME) # set up aws s3 connection conn = boto.connect_s3( os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY") ) my_bucket = conn.get_bucket('base-initial') i = 0 for key in my_bucket.list(): if not key.name.startswith("base_dc_dump") or not key.name.endswith(".gz"): continue key_filename = key.name.split("/")[1] if first and key_filename < first: continue if last and key_filename > last: continue # if i >= 2: # break print "getting this key...", key.name print "done." # that second arg is important. see http://stackoverflow.com/a/18319515 res = zlib.decompress(key.get_contents_as_string(), 16+zlib.MAX_WBITS) xml_records = re.findall("<record>.+?</record>", res, re.DOTALL) records_to_save = [] for xml_record in xml_records: record = {} record["id"] = tag_match("identifier", xml_record) record["title"] = tag_match("dc:title", xml_record) record["license"] = tag_match("base_dc:rights", xml_record) try: record["oa"] = int(tag_match("base_dc:oa", xml_record)) except TypeError: record["oa"] = 0 record["urls"] = tag_match("dc:identifier", xml_record, return_list=True) record["authors"] = tag_match("dc:creator", xml_record, return_list=True) record["relations"] = tag_match("dc:relation", xml_record, return_list=True) record["sources"] = tag_match("base_dc:collname", xml_record, return_list=True) if is_complete(record): op_dict = { "index": { "_index": INDEX_NAME, "_type": TYPE_NAME, "_id": record["id"] } } records_to_save.append(op_dict) records_to_save.append(record) i += 1 # save it! print u"saving a chunk of {} records.".format(len(records_to_save)) start_time = time() res = es.bulk(index=INDEX_NAME, body=records_to_save, refresh=False, request_timeout=60) print u"done sending them to elastic in {}s".format(elapsed(start_time, 4)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run stuff.") # just for updating lots parser.add_argument('--first', nargs="?", type=str, help="start filename") parser.add_argument('--last', nargs="?", type=str, help="end filename") parsed = parser.parse_args() main(parsed.first, parsed.last)
Python
0
@@ -1783,24 +1783,26 @@ %0A + # print recor @@ -4764,16 +4764,48 @@ time()%0A + if records_to_save:%0A
4fb812463bd9af16fbf70442d1626e7f275a8f0f
add BADCOLUMN to fiberbitmasking
py/desispec/fiberbitmasking.py
py/desispec/fiberbitmasking.py
""" desispec.fiberbitmasking ============== Functions to properly take FIBERSTATUS into account in the variances for data reduction """ from __future__ import absolute_import, division import numpy as np from astropy.table import Table from desiutil.log import get_logger from desispec.maskbits import fibermask as fmsk from desispec.maskbits import specmask def get_fiberbitmasked_frame(frame,bitmask=None,ivar_framemask=True): """ Wrapper script of get_fiberbitmasked_frame_arrays that will return a modified version of the frame instead of just the flux and ivar NOTE: The input "frame" variable itself is modified and returned, not a copy. """ ivar,mask = get_fiberbitmasked_frame_arrays(frame,bitmask,ivar_framemask,return_mask=True) frame.mask = mask frame.ivar = ivar return frame def get_fiberbitmasked_frame_arrays(frame,bitmask=None,ivar_framemask=True,return_mask=False): """ Function that takes a frame object and a bitmask and returns ivar (and optionally mask) array(s) that have fibers with offending bits in fibermap['FIBERSTATUS'] set to 0 in ivar and optionally flips a bit in mask. input: frame: frame object bitmask: int32 or list/array of int32's derived from desispec.maskbits.fibermask OR string indicating a keyword for get_fiberbitmask_comparison_value() ivar_framemask: bool (default=True), tells code whether to multiply the output variance by (frame.mask==0) return_mask: bool, (default=False). Returns the frame.mask with the logic of FIBERSTATUS applied. output: ivar: frame.ivar where the fibers with FIBERSTATUS & bitmask > 0 set to zero ivar mask: (optional) frame.mask logically OR'ed with BADFIBER bit in cases with a bad FIBERSTATUS example bitmask list: bitmask = [fmsk.BROKENFIBER,fmsk.UNASSIGNED,fmsk.BADFIBER,\ fmsk.BADTRACE,fmsk.MANYBADCOL, fmsk.MANYREJECTED] bitmask = get_fiberbitmask_comparison_value(kind='fluxcalib') bitmask = 'fluxcalib' bitmask = 4128780 """ ivar = frame.ivar.copy() mask = frame.mask.copy() if ivar_framemask and frame.mask is not None: ivar *= (frame.mask==0) fmap = Table(frame.fibermap) if frame.fibermap is None: log = get_logger() log.warning("No fibermap was given, so no FIBERSTATUS check applied.") if bitmask is None or frame.fibermap is None: if return_mask: return ivar, mask else: return ivar if type(bitmask) in [int,np.int32]: bad = bitmask elif type(bitmask) == str: if bitmask.isnumeric(): bad = np.int32(bitmask) else: bad = get_fiberbitmask_comparison_value(kind=bitmask) else: bad = bitmask[0] for bit in bitmask[1:]: bad |= bit # find if any fibers have an intersection with the bad bits badfibers = fmap['FIBER'][ (fmap['FIBERSTATUS'] & bad) > 0 ].data badfibers = badfibers % 500 # For the bad fibers, loop through and nullify them for fiber in badfibers: mask[fiber] |= specmask.BADFIBER if ivar_framemask : ivar[fiber] = 0. if return_mask: return ivar,mask else: return ivar def get_fiberbitmask_comparison_value(kind='fluxcalib'): """ Takes a string argument and returns a 32-bit integer representing the logical OR of all relevant fibermask bits for that given reduction step input: kind: str : string designating which combination of bits to use based on the operation possible values are: "all", "sky" (or "skysub"), "flat", "flux" (or "fluxcalib"), "star" (or "stdstars") """ if kind.lower() == 'all': return get_all_fiberbitmask_val() elif kind.lower()[:3] == 'sky': return get_skysub_fiberbitmask_val() elif kind.lower() == 'flat': return get_flat_fiberbitmask_val() elif 'star' in kind.lower(): return get_stdstars_fiberbitmask_val() elif 'flux' in kind.lower(): return get_fluxcalib_fiberbitmask_val() else: log = get_logger() log.warning("Keyword {} given to get_fiberbitmask_comparison_value() is invalid.".format(kind)+\ " Using 'fluxcalib' fiberbitmask.") return get_fluxcalib_fiberbitmask_val() def get_skysub_fiberbitmask_val(): return get_all_fiberbitmask_val() def get_flat_fiberbitmask_val(): return (fmsk.BROKENFIBER | fmsk.BADFIBER | fmsk.BADTRACE | fmsk.BADARC | \ fmsk.MANYBADCOL | fmsk.MANYREJECTED ) def get_fluxcalib_fiberbitmask_val(): return get_all_fiberbitmask_val() def get_stdstars_fiberbitmask_val(): return get_all_fiberbitmask_val() | fmsk.POORPOSITION def get_all_nonamp_fiberbitmask_val(): """Return a mask for all fatally bad FIBERSTATUS bits except BADAMPB/R/Z Note: does not include STUCKPOSITIONER or RESTRICTED, which could still be on a valid sky location, or even a target for RESTRICTED. Also does not include POORPOSITION which is bad for stdstars but not necessarily fatal for otherwise processing a normal fiber. """ return (fmsk.UNASSIGNED | fmsk.BROKENFIBER | fmsk.MISSINGPOSITION | fmsk.BADPOSITION | \ fmsk.BADFIBER | fmsk.BADTRACE | fmsk.BADARC | fmsk.BADFLAT | \ fmsk.MANYBADCOL | fmsk.MANYREJECTED ) def get_justamps_fiberbitmask(): return ( fmsk.BADAMPB | fmsk.BADAMPR | fmsk.BADAMPZ ) def get_all_fiberbitmask_with_amp(band): nonamp_mask = get_all_nonamp_fiberbitmask_val() if band.lower()[0] == 'b': amp_mask = fmsk.BADAMPB elif band.lower()[0] == 'r': amp_mask = fmsk.BADAMPR elif band.lower()[0] == 'z': amp_mask = fmsk.BADAMPZ else: log = get_logger() log.error("Didn't recognize band={}".format(band)) amp_mask = np.int32(0) return ( nonamp_mask | amp_mask ) def get_all_fiberbitmask_val(): return ( get_all_nonamp_fiberbitmask_val() | get_justamps_fiberbitmask() )
Python
0.000008
@@ -4690,32 +4690,49 @@ %7C %5C%0A + fmsk.BADCOLUMN %7C fmsk.MANYBADCOL @@ -5401,16 +5401,30 @@ SITION %7C + %5C%0A fmsk.BA @@ -5426,32 +5426,49 @@ sk.BADPOSITION %7C + fmsk.BADCOLUMN %7C %5C%0A f
5da88c648e338d21b782a8a36a69e873da6c04ae
use --http-socket rather than --http for uwsgi
gnocchi/cli/api.py
gnocchi/cli/api.py
# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2015-2017 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from distutils import spawn import math import os import sys import daiquiri from oslo_config import cfg from oslo_policy import opts as policy_opts from gnocchi import opts from gnocchi import service from gnocchi import utils LOG = daiquiri.getLogger(__name__) def prepare_service(conf=None): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() policy_opts.set_defaults(conf) conf = service.prepare_service(conf=conf) cfg_path = conf.oslo_policy.policy_file if not os.path.isabs(cfg_path): cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rest', 'policy.json')) conf.set_default('policy_file', cfg_path, group='oslo_policy') return conf def api(): # Compat with previous pbr script try: double_dash = sys.argv.index("--") except ValueError: double_dash = None else: sys.argv.pop(double_dash) conf = cfg.ConfigOpts() for opt in opts.API_OPTS: # NOTE(jd) Register the API options without a default, so they are only # used to override the one in the config file c = copy.copy(opt) c.default = None conf.register_cli_opt(c) conf = prepare_service(conf) if double_dash is not None: # NOTE(jd) Wait to this stage to log so we're sure the logging system # is in place LOG.warning( "No need to pass `--' in gnocchi-api command line anymore, " "please remove") uwsgi = spawn.find_executable("uwsgi") if not uwsgi: LOG.error("Unable to find `uwsgi'.\n" "Be sure it is installed and in $PATH.") return 1 workers = utils.get_default_workers() args = [ "--if-not-plugin", "python", "--plugin", "python", "--endif", "--if-not-plugin", "http", "--plugin", "http", "--endif", "--http", "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port), "--master", "--enable-threads", "--die-on-term", # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 "--add-header", "Connection: close", "--processes", str(math.floor(workers * 1.5)), "--threads", str(workers), "--lazy-apps", "--chdir", "/", "--wsgi", "gnocchi.rest.wsgi", "--pyargv", " ".join(sys.argv[1:]), ] virtual_env = os.getenv("VIRTUAL_ENV") if virtual_env is not None: args.extend(["-H", os.getenv("VIRTUAL_ENV", ".")]) return os.execl(uwsgi, uwsgi, *args)
Python
0
@@ -2575,78 +2575,19 @@ %22-- -if-not-plugin%22, %22http%22, %22--plugin%22, %22http%22, %22--endif%22,%0A %22--http +http-socket %22, %22 @@ -2624,16 +2624,23 @@ i.host,%0A +
f8bb295bf1d10410d36a8a8880ff96303bbda451
Update announcements.py
announcements.py
announcements.py
import sys import icalendar import requests import pytz from datetime import datetime, timedelta from libs import post_text from icalendar import Calendar from database import find_bot_nname import re r = requests.get(sys.argv[2]) icsData = r.text cal = Calendar.from_ical(icsData) for evt in cal.subcomponents: print(evt.items()) print(evt.subcomponents start = evt.decoded('DTSTART') now = datetime.now(tz=pytz.utc) time_left = start - now if timedelta(minutes=0) < time_left < timedelta(minutes=10): raw_text = str(evt.decoded('SUMMARY')) search = re.search(r"([^ ]+)\s(.+)", raw_text) (nname, message) = search.groups('1') nname = nname[2:] message = message[:-1] print(nname) print(message) bot_id = find_bot_nname(nname) if not bot_id: bot_id = sys.argv[1] post_text("I was supposed to post '" + message + "' to " + nname, bot_id) else: bot_id = bot_id[0][0] post_text(message, bot_id)
Python
0
@@ -358,16 +358,17 @@ mponents +) %0A sta
d35c6124c60ce0ef7edd7291db5787f18b9e4374
Fix hostname logging for connection created log in pool_logger
pycassa/logging/pool_logger.py
pycassa/logging/pool_logger.py
import pycassa_logger import logging class PoolLogger(object): def __init__(self): self.root_logger = pycassa_logger.PycassaLogger() self.logger = self.root_logger.add_child_logger('pool', self.name_changed) def name_changed(self, new_logger): self.logger = new_logger def connection_created(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] conn = dic.get('connection') if level <= logging.INFO: self.logger.log(level, "Connection %s (%s) opened for %s (id = %s)", id(conn), conn.server[0], dic.get('pool_type'), dic.get('pool_id')) else: self.logger.log(level, "Error opening connection (%s) for %s (id = %s): %s", conn.server, dic.get('pool_type'), dic.get('pool_id'), dic.get('error')) def connection_checked_out(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] conn = dic.get('connection') self.logger.log(level, "Connection %s (%s) was checked out from %s (id = %s)", id(conn), conn.server, dic.get('pool_type'), dic.get('pool_id')) def connection_checked_in(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] conn = dic.get('connection') self.logger.log(level, "Connection %s (%s) was checked in to %s (id = %s)", id(conn), conn.server, dic.get('pool_type'), dic.get('pool_id')) def connection_disposed(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] conn = dic.get('connection') if level <= logging.INFO: self.logger.log(level, "Connection %s (%s) was closed; pool %s (id = %s), reason: %s", id(conn), conn.server, dic.get('pool_type'), dic.get('pool_id'), dic.get('message')) else: error = dic.get('error') self.logger.log(level, "Error closing connection %s (%s) in %s (id = %s), " "reason: %s, error: %s %s", id(conn), conn.server, dic.get('pool_type'), dic.get('pool_id'), dic.get('message'), error.__class__, error) def connection_recycled(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] old_conn = dic.get('old_conn') new_conn = dic.get('new_conn') self.logger.log(level, "Connection %s (%s) is being recycled in %s (id = %s) " "after %d operations; it is replaced by connection %s (%s)", id(old_conn), old_conn.server, dic.get('pool_type'), dic.get('pool_id'), old_conn.operation_count, id(new_conn)) def connection_failed(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] conn = dic.get('connection') self.logger.log(level, "Connection %s (%s) in %s (id = %s) failed: %s", id(conn), dic.get('server'), dic.get('pool_type'), dic.get('pool_id'), str(dic.get('error'))) def obtained_server_list(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] self.logger.log(level, "Server list obtained for %s (id = %s): [%s]", dic.get('pool_type'), dic.get('pool_id'), ", ".join(dic.get('server_list'))) def pool_recreated(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] self.logger.log(level, "%s (id = %s) was recreated", dic.get('pool_type'), dic.get('pool_id')) def pool_disposed(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] self.logger.log(level, "%s (id = %s) was disposed", dic.get('pool_type'), dic.get('pool_id')) def pool_at_max(self, dic): level = pycassa_logger.levels[dic.get('level', 'info')] self.logger.log(level, "%s (id = %s) had a checkout request but was already " "at its max size (%s)", dic.get('pool_type'), dic.get('pool_id'), dic.get('pool_max'))
Python
0.000001
@@ -618,11 +618,8 @@ rver -%5B0%5D , di
37bd5193dd25c3cd64d956d967454f3a781a979c
Update chainer/training/extensions/log_report.py
chainer/training/extensions/log_report.py
chainer/training/extensions/log_report.py
import json import os import shutil import warnings import six from chainer import reporter from chainer import serializer as serializer_module from chainer.training import extension from chainer.training import trigger as trigger_module from chainer import utils from chainer.utils import argument class LogReport(extension.Extension): """Trainer extension to output the accumulated results to a log file. This extension accumulates the observations of the trainer to :class:`~chainer.DictSummary` at a regular interval specified by a supplied trigger, and writes them into a log file in JSON format. There are two triggers to handle this extension. One is the trigger to invoke this extension, which is used to handle the timing of accumulating the results. It is set to ``1, 'iteration'`` by default. The other is the trigger to determine when to emit the result. When this trigger returns True, this extension appends the summary of accumulated values to the list of past summaries, and writes the list to the log file. Then, this extension makes a new fresh summary object which is used until the next time that the trigger fires. It also adds some entries to each result dictionary. - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the output, respectively. - ``'elapsed_time'`` is the elapsed time in seconds since the training begins. The value is taken from :attr:`Trainer.elapsed_time`. Args: keys (iterable of strs): Keys of values to accumulate. If this is None, all the values are accumulated and output to the log file. trigger: Trigger that decides when to aggregate the result and output the values. This is distinct from the trigger of this extension itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`. postprocess: Callback to postprocess the result dictionaries. Each result dictionary is passed to this callback on the output. This callback can modify the result dictionaries, which are used to output to the log file. filename (str): Name of the log file under the output directory. It can be a format string: the last result dictionary is passed for the formatting. For example, users can use '{iteration}' to separate the log files for different iterations. If the log name is None, it does not output the log to any file. Although it is recommended to use `filename`, you can specify the name of the log file with the `log_name` argument for back compatibility. However, `filename` will be used if both `filename` and `log_name` are specified. """ def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None, filename=None, **kwargs): self._keys = keys self._trigger = trigger_module.get_trigger(trigger) self._postprocess = postprocess self._log = [] log_name, = argument.parse_kwargs( kwargs, ('log_name', 'log'), ) if filename is None: filename = log_name self._log_name = filename self._init_summary() def __call__(self, trainer): # accumulate the observations keys = self._keys observation = trainer.observation summary = self._summary if keys is None: summary.add(observation) else: summary.add({k: observation[k] for k in keys if k in observation}) if self._trigger(trainer): # output the result stats = self._summary.compute_mean() stats_cpu = {} for name, value in six.iteritems(stats): stats_cpu[name] = float(value) # copy to CPU updater = trainer.updater stats_cpu['epoch'] = updater.epoch stats_cpu['iteration'] = updater.iteration stats_cpu['elapsed_time'] = trainer.elapsed_time if self._postprocess is not None: self._postprocess(stats_cpu) self._log.append(stats_cpu) # write to the log file if self._log_name is not None: log_name = self._log_name.format(**stats_cpu) with utils.tempdir(prefix=log_name, dir=trainer.out) as tempd: path = os.path.join(tempd, 'log.json') with open(path, 'w') as f: json.dump(self._log, f, indent=4) new_path = os.path.join(trainer.out, log_name) shutil.move(path, new_path) # reset the summary for the next output self._init_summary() @property def log(self): """The current list of observation dictionaries.""" return self._log def serialize(self, serializer): if hasattr(self._trigger, 'serialize'): self._trigger.serialize(serializer['_trigger']) try: self._summary.serialize(serializer['_summary']) except KeyError: warnings.warn('The statistics are not saved.') # Note that this serialization may lose some information of small # numerical differences. if isinstance(serializer, serializer_module.Serializer): log = json.dumps(self._log) serializer('_log', log) else: log = serializer('_log', '') self._log = json.loads(log) def _init_summary(self): self._summary = reporter.DictSummary()
Python
0
@@ -2651,16 +2651,21 @@ you can +also specify%0A
d7a4948b8ee015ad918dac473114b728c65418f8
add total number of assignments to progress API (AA-816)
lms/djangoapps/course_home_api/progress/v1/serializers.py
lms/djangoapps/course_home_api/progress/v1/serializers.py
""" Progress Tab Serializers """ from rest_framework import serializers from rest_framework.reverse import reverse from lms.djangoapps.course_home_api.mixins import VerifiedModeSerializerMixin class CourseGradeSerializer(serializers.Serializer): """ Serializer for course grade """ letter_grade = serializers.CharField() percent = serializers.FloatField() is_passing = serializers.BooleanField(source='passed') class SubsectionScoresSerializer(serializers.Serializer): """ Serializer for subsections in section_scores """ assignment_type = serializers.CharField(source='format') display_name = serializers.CharField() has_graded_assignment = serializers.BooleanField(source='graded') num_points_earned = serializers.IntegerField(source='graded_total.earned') num_points_possible = serializers.IntegerField(source='graded_total.possible') percent_graded = serializers.FloatField() show_correctness = serializers.CharField() show_grades = serializers.SerializerMethodField() url = serializers.SerializerMethodField() def get_url(self, subsection): relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location]) request = self.context['request'] return request.build_absolute_uri(relative_path) def get_show_grades(self, subsection): return subsection.show_grades(self.context['staff_access']) class SectionScoresSerializer(serializers.Serializer): """ Serializer for sections in section_scores """ display_name = serializers.CharField() subsections = SubsectionScoresSerializer(source='sections', many=True) class GradingPolicySerializer(serializers.Serializer): """ Serializer for grading policy """ assignment_policies = serializers.SerializerMethodField() grade_range = serializers.DictField(source='GRADE_CUTOFFS') def get_assignment_policies(self, grading_policy): return [{ 'num_droppable': assignment_policy['drop_count'], 'short_label': assignment_policy.get('short_label', ''), 'type': assignment_policy['type'], 'weight': assignment_policy['weight'], } for assignment_policy in grading_policy['GRADER']] class CertificateDataSerializer(serializers.Serializer): """ Serializer for certificate data """ cert_status = serializers.CharField() cert_web_view_url = serializers.CharField() download_url = serializers.CharField() class VerificationDataSerializer(serializers.Serializer): """ Serializer for verification data object """ link = serializers.URLField() status = serializers.CharField() status_date = serializers.DateTimeField() class ProgressTabSerializer(VerifiedModeSerializerMixin): """ Serializer for progress tab """ certificate_data = CertificateDataSerializer() completion_summary = serializers.DictField() course_grade = CourseGradeSerializer() end = serializers.DateTimeField() user_has_passing_grade = serializers.BooleanField() has_scheduled_content = serializers.BooleanField() section_scores = SectionScoresSerializer(many=True) enrollment_mode = serializers.CharField() grading_policy = GradingPolicySerializer() studio_url = serializers.CharField() verification_data = VerificationDataSerializer()
Python
0
@@ -2032,24 +2032,81 @@ op_count'%5D,%0A + 'num_total': assignment_policy%5B'min_count'%5D,%0A
c5e13436d7d453bd851e39591f82e2ef0d740d92
Fix typo
pyfarm/scheduler/celery_app.py
pyfarm/scheduler/celery_app.py
# No shebang line, this module is meant to be imported # # Copyright 2014 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import timedelta from pyfarm.core.config import read_env_int from celery import Celery celery_app = Celery('pyfarm.tasks', broker='redis://', include=['pyfarm.scheduler.tasks']) celery_app.conf.CELERYBEAT_SCHEDULE = { "periodically_poll_agents": { "task": "pyfarm.scheduler.tasks.poll_agents", "schedule": timedelta( seconds=read_env_int("AGENTS_POLL_INTERVALE", 30))}, "periodical_scheduler": { "task": "pyfarm.scheduler.tasks.assign_tasks", "schedule": timedelta(seconds=read_env_int("SCHEDULER_INTERVAL", 30))}} if __name__ == '__main__': celery_app.start()
Python
0.999999
@@ -1097,17 +1097,16 @@ INTERVAL -E %22, 30))%7D
f755f9857020cfceaeb3cf9607e96cef66ccb048
update dev version after 0.21.1 tag [skip ci]
py/desitarget/_version.py
py/desitarget/_version.py
__version__ = '0.21.1'
Python
0
@@ -14,10 +14,18 @@ '0.21.1 +.dev2037 '%0A
47527996fe967d8ef713ff8814f71d49ab539fd8
update version
grizli/version.py
grizli/version.py
# git describe --tags __version__ = "0.6.0-86-g140db75"
Python
0
@@ -40,17 +40,18 @@ 6.0- -86-g140db75 +109-g647e4b4 %22%0A
b493082352de19ed8d3d52c8eda838064957bbc2
bump version to 1.2-BETA2
libnamebench/version.py
libnamebench/version.py
VERSION = '1.2-BETA1'
Python
0
@@ -12,12 +12,12 @@ 1.2-BETA -1 +2 '%0A%0A
19cfe70c69b026429454fb8361ec3e8d6f1a0505
add show/hide requested signals
pyqode/core/widgets/preview.py
pyqode/core/widgets/preview.py
""" This module contains a widget that can show the html preview of an editor. """ from weakref import proxy from pyqode.qt import QtCore, QtWebWidgets from pyqode.core.api import DelayJobRunner class HtmlPreviewWidget(QtWebWidgets.QWebView): def __init__(self, parent=None): super(HtmlPreviewWidget, self).__init__(parent) self._editor = None self._timer = DelayJobRunner(delay=1000) try: # prevent opening internal links when using QtWebKit self.page().setLinkDelegationPolicy( QtWebWidgets.QWebPage.DelegateAllLinks) except (TypeError, AttributeError): # no needed with QtWebEngine, internal links are properly handled # by the default implementation pass def set_editor(self, editor): try: self.setHtml(editor.to_html()) except (TypeError, AttributeError): self.setHtml('<center>No preview available...</center>') self._editor = None else: if self._editor is not None and editor != self._editor: try: self._editor.textChanged.disconnect(self._on_text_changed) except TypeError: pass editor.textChanged.connect(self._on_text_changed) self._editor = proxy(editor) def _on_text_changed(self, *_): self._timer.request_job(self._update_preview) def _update_preview(self): try: pos = self.page().mainFrame().scrollBarValue(QtCore.Qt.Vertical) self.setHtml(self._editor.to_html()) self.page().mainFrame().setScrollBarValue(QtCore.Qt.Vertical, pos) except AttributeError: # Not possible with QtWebEngine??? # self._scroll_pos = self.page().mainFrame().scrollBarValue( # QtCore.Qt.Vertical) self.setHtml(self._editor.to_html())
Python
0
@@ -238,16 +238,91 @@ bView):%0A + hide_requested = QtCore.Signal()%0A show_requested = QtCore.Signal()%0A%0A def @@ -880,24 +880,147 @@ f, editor):%0A + url = QtCore.QUrl('')%0A if editor is not None:%0A url = QtCore.QUrl.fromLocalFile(editor.file.path)%0A try: @@ -1053,32 +1053,37 @@ editor.to_html() +, url )%0A except @@ -1179,16 +1179,21 @@ center%3E' +, url )%0A @@ -1210,32 +1210,71 @@ ._editor = None%0A + self.hide_requested.emit()%0A else:%0A @@ -1600,16 +1600,55 @@ (editor) +%0A self.show_requested.emit() %0A%0A de @@ -1763,16 +1763,151 @@ (self):%0A + url = QtCore.QUrl('')%0A if self._editor is not None:%0A url = QtCore.QUrl.fromLocalFile(self._editor.file.path)%0A @@ -2035,16 +2035,21 @@ o_html() +, url )%0A @@ -2357,10 +2357,15 @@ o_html() +, url )%0A
d428bb582c6fe71e39bdedfbed1b355421f48139
Fix that
src/mysql_proto/com/stmt/prepare.py
src/mysql_proto/com/stmt/prepare.py
#!/usr/bin/env python # coding=utf-8 from packet import Packet from proto import Proto from flags import Flags class Prepare(Packet): query = "" def getPayload(self): payload = bytearray() payload.extend(Proto.build_byte(Flags.COM_STMT_PREPARE)) payload.extend(Proto.build_eop_str(self.query)) return payload @staticmethod def loadFromPacket(packet): obj = Statistics() proto = Proto(packet, 3) obj.sequenceId = proto.get_fixed_int(1) proto.get_filler(1) obj.query = proto.get_eop_str() return obj if __name__ == "__main__": import doctest doctest.testmod()
Python
0.999999
@@ -440,18 +440,15 @@ j = -Statistics +Prepare ()%0A
76611b7e6e97089b93626b472f91c04f16644034
Fix up some comments
channels/management/commands/runserver.py
channels/management/commands/runserver.py
import threading from django.core.management.commands.runserver import \ Command as RunserverCommand from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.handler import ViewConsumer from channels.log import setup_logger from channels.worker import Worker class Command(RunserverCommand): def handle(self, *args, **options): self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) super(Command, self).handle(*args, **options) def inner_run(self, *args, **options): # Check a handler is registered for http reqs; if not, add default one self.channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER] if not self.channel_layer.registry.consumer_for_channel("http.request"): self.channel_layer.registry.add_consumer(ViewConsumer(), ["http.request"]) # Report starting up # Launch worker as subthread (including autoreload logic) worker = WorkerThread(self.channel_layer, self.logger) worker.daemon = True worker.start() # Launch server in main thread (Twisted doesn't like being in a # subthread, and it doesn't need to autoreload as there's no user code) self.logger.info("Daphne running, listening on %s:%s", self.addr, self.port) from daphne.server import Server Server( channel_layer=self.channel_layer, host=self.addr, port=int(self.port), signal_handlers=False, ).run() class WorkerThread(threading.Thread): """ Class that runs a worker """ def __init__(self, channel_layer, logger): super(WorkerThread, self).__init__() self.channel_layer = channel_layer self.logger = logger def run(self): self.logger.info("Worker thread running") worker = Worker(channel_layer=self.channel_layer) worker.run()
Python
0.000153
@@ -899,92 +899,34 @@ # -Report starting up%0A # Launch worker as subthread (including autoreload logic) +Launch worker as subthread %0A @@ -1068,132 +1068,112 @@ in +' main +' thread - (Twisted doesn't like being in a%0A # subthread, and it doesn't need to autoreload as there's no user code) +. Signals are disabled as it's still%0A # actually a subthread under the autoreloader. %0A
e451ea4d698450813bd11fed6b501b839cd477a6
Reformat runworker a bit
channels/management/commands/runworker.py
channels/management/commands/runworker.py
from __future__ import unicode_literals from django.core.management import BaseCommand, CommandError from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.log import setup_logger from channels.worker import Worker class Command(BaseCommand): leave_locale_alone = True def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument('--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER, help='Channel layer alias to use, if not the default.') parser.add_argument('--only-channels', action='append', dest='only_channels', help='Limits this worker to only listening on the provided channels (supports globbing).') parser.add_argument('--exclude-channels', action='append', dest='exclude_channels', help='Prevents this worker from listening on the provided channels (supports globbing).') def handle(self, *args, **options): # Get the backend to use self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) self.channel_layer = channel_layers[options.get("layer", DEFAULT_CHANNEL_LAYER)] # Check that handler isn't inmemory if self.channel_layer.local_only(): raise CommandError( "You cannot span multiple processes with the in-memory layer. " + "Change your settings to use a cross-process channel layer." ) # Check a handler is registered for http reqs self.channel_layer.router.check_default() # Launch a worker self.logger.info("Running worker against channel layer %s", self.channel_layer) # Optionally provide an output callback callback = None if self.verbosity > 1: callback = self.consumer_called # Run the worker try: Worker( channel_layer=self.channel_layer, callback=callback, only_channels=options.get("only_channels", None), exclude_channels=options.get("exclude_channels", None), ).run() except KeyboardInterrupt: pass def consumer_called(self, channel, message): self.logger.debug("%s", channel)
Python
0
@@ -406,16 +406,29 @@ rgument( +%0A '--layer @@ -557,16 +557,26 @@ efault.' +,%0A )%0A @@ -597,16 +597,29 @@ rgument( +%0A '--only- @@ -769,16 +769,26 @@ bbing).' +,%0A )%0A @@ -809,16 +809,29 @@ rgument( +%0A '--exclu @@ -986,16 +986,26 @@ bbing).' +,%0A )%0A%0A d
33fb480ba089cae579ed1b9a061c8f5f2fd72a61
make regex raw strings
src/nibetaseries/interfaces/bids.py
src/nibetaseries/interfaces/bids.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import re from shutil import copy from nipype.interfaces.base import ( traits, isdefined, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface ) BIDS_NAME = re.compile( '^(.*\/)?(?P<subject_id>sub-[a-zA-Z0-9]+)(_(?P<session_id>ses-[a-zA-Z0-9]+))?' '(_(?P<task_id>task-[a-zA-Z0-9]+))?(_(?P<acq_id>acq-[a-zA-Z0-9]+))?' '(_(?P<rec_id>rec-[a-zA-Z0-9]+))?(_(?P<run_id>run-[a-zA-Z0-9]+))?' '(_(?P<space_id>space-[a-zA-Z0-9]+))?(_(?P<variant_id>variant-[a-zA-Z0-9]+))?') BETASERIES_NAME = re.compile( '^(.*\/)?betaseries(_(?P<trialtype_id>trialtype-[a-zA-Z0-9]+))?' ) class DerivativesDataSinkInputSpec(BaseInterfaceInputSpec): base_directory = traits.Directory( desc='Path to the base directory for storing data.') betaseries_file = File(exists=True, mandatory=True, desc='the betaseries file') in_file = File(exists=True, mandatory=True) source_file = File(exists=False, mandatory=True, desc='the input func file') suffix = traits.Str('', mandatory=True, desc='suffix appended to source_file') extra_values = traits.List(traits.Str) class DerivativesDataSinkOutputSpec(TraitedSpec): out_file = File(exists=True, desc='written file path') class DerivativesDataSink(SimpleInterface): input_spec = DerivativesDataSinkInputSpec output_spec = DerivativesDataSinkOutputSpec out_path_base = "nibetaseries" _always_run = True def __init__(self, out_path_base=None, **inputs): super(DerivativesDataSink, self).__init__(**inputs) self._results['out_file'] = [] if out_path_base: self.out_path_base = out_path_base def _run_interface(self, runtime): src_fname, _ = _splitext(self.inputs.source_file) betaseries_fname, _ = _splitext(self.inputs.betaseries_file) _, ext = _splitext(self.inputs.in_file) bids_dict = BIDS_NAME.search(src_fname).groupdict() bids_dict = {key: value for key, value in bids_dict.items() if value is not None} betaseries_dict = BETASERIES_NAME.search(betaseries_fname).groupdict() # TODO: this quick and dirty modality detection needs to be implemented # correctly mod = 'func' if 'anat' in os.path.dirname(self.inputs.source_file): mod = 'anat' elif 'dwi' in os.path.dirname(self.inputs.source_file): mod = 'dwi' elif 'fmap' in os.path.dirname(self.inputs.source_file): mod = 'fmap' base_directory = runtime.cwd if isdefined(self.inputs.base_directory): base_directory = os.path.abspath(self.inputs.base_directory) out_path = '{}/{subject_id}'.format(self.out_path_base, **bids_dict) if bids_dict.get('session_id', None) is not None: out_path += '/{session_id}'.format(**bids_dict) out_path += '/{}'.format(mod) out_path = os.path.join(base_directory, out_path) os.makedirs(out_path, exist_ok=True) base_fname = os.path.join(out_path, src_fname) formatstr = '{bname}_{trialtype}_{suffix}{ext}' out_file = formatstr.format( bname=base_fname, trialtype=betaseries_dict['trialtype_id'], suffix=self.inputs.suffix, ext=ext) # copy the file to the output directory copy(self.inputs.in_file, out_file) self._results['out_file'] = out_file return runtime def _splitext(fname): fname, ext = os.path.splitext(os.path.basename(fname)) if ext == '.gz': fname, ext2 = os.path.splitext(fname) ext = ext2 + ext return fname, ext
Python
0.00384
@@ -343,32 +343,33 @@ re.compile(%0A +r '%5E(.*%5C/)?(?P%3Csub @@ -431,24 +431,25 @@ 9%5D+))?'%0A +r '(_(?P%3Ctask_ @@ -505,24 +505,25 @@ 9%5D+))?'%0A +r '(_(?P%3Crec_i @@ -581,16 +581,17 @@ )?'%0A +r '(_(?P%3Cs @@ -693,20 +693,21 @@ ompile(%0A - +r '%5E(.*%5C/)
f980f4b557df7cb4984cb428dd4bebcfe7ca7bc6
use urgent when you got mails
py3status/modules/imap.py
py3status/modules/imap.py
# -*- coding: utf-8 -*- """ Display number of unread messages from IMAP account. Configuration parameters: cache_timeout: refresh interval for this module (default 60) criterion: status of emails to check for (default 'UNSEEN') format: display format for this module (default 'Mail: {unseen}') hide_if_zero: hide this module when no new mail (default False) mailbox: name of the mailbox to check (default 'INBOX') password: login password (default None) port: number to use (default '993') security: login authentication method: 'ssl' or 'starttls' (startssl needs python 3.2 or later) (default 'ssl') server: server to connect (default None) user: login user (default None) Format placeholders: {unseen} number of unread emails Color options: color_new_mail: use color when new mail arrives, default to color_good @author obb """ import imaplib from ssl import create_default_context STRING_UNAVAILABLE = 'N/A' class Py3status: """ """ # available configuration parameters cache_timeout = 60 criterion = 'UNSEEN' format = 'Mail: {unseen}' hide_if_zero = False mailbox = 'INBOX' password = None port = '993' security = 'ssl' server = None user = None class Meta: deprecated = { 'rename': [ { 'param': 'new_mail_color', 'new': 'color_new_mail', 'msg': 'obsolete parameter use `color_new_mail`', }, { 'param': 'imap_server', 'new': 'server', 'msg': 'obsolete parameter use `server`', }, ], } def post_config_hook(self): if self.security not in ["ssl", "starttls"]: raise ValueError("Unknown security protocol") def check_mail(self): mail_count = self._get_mail_count() response = {'cached_until': self.py3.time_in(self.cache_timeout)} if mail_count is None: response['color'] = self.py3.COLOR_BAD, response['full_text'] = self.py3.safe_format( self.format, {'unseen': STRING_UNAVAILABLE}) elif mail_count > 0: response['color'] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD if mail_count == 0 and self.hide_if_zero: response['full_text'] = '' else: response['full_text'] = self.py3.safe_format(self.format, {'unseen': mail_count}) return response def _connection_ssl(self): connection = imaplib.IMAP4_SSL(self.server, self.port) return connection def _connection_starttls(self): connection = imaplib.IMAP4(self.server, self.port) connection.starttls(create_default_context()) return connection def _get_mail_count(self): try: mail_count = 0 directories = self.mailbox.split(',') if self.security == "ssl": connection = self._connection_ssl() elif self.security == "starttls": connection = self._connection_starttls() connection.login(self.user, self.password) for directory in directories: connection.select(directory) unseen_response = connection.search(None, self.criterion) mails = unseen_response[1][0].split() mail_count += len(mails) connection.close() return mail_count except: return None if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)
Python
0
@@ -97,24 +97,93 @@ parameters:%0A + allow_urgent: display urgency on unread messages (default False)%0A cache_ti @@ -1110,16 +1110,41 @@ ameters%0A + allow_urgent = False%0A cach @@ -2420,16 +2420,92 @@ LOR_GOOD +%0A if self.allow_urgent:%0A response%5B'urgent'%5D = True %0A%0A
596f9752a7956c259217b0528bed924812d0631f
Add admin filter to filter attendees with children.
pyconde/accounts/admin.py
pyconde/accounts/admin.py
from django.contrib import admin from . import models admin.site.register(models.Profile, list_display=['user'])
Python
0
@@ -30,91 +30,813 @@ min%0A -%0Afrom . import models%0A%0A%0Aadmin.site.register(models.Profile,%0A list_display=%5B'user'%5D +from django.contrib.admin import SimpleListFilter%0A%0Afrom . import models%0A%0A%0Aclass WithChildrenFilter(SimpleListFilter):%0A title = 'Anzahl Kinder'%0A parameter_name = 'children'%0A%0A def lookups(self, request, model_admin):%0A return (('y', 'mit Kindern'),%0A ('n', 'ohne Kinder'))%0A%0A def queryset(self, request, queryset):%0A if self.value() == 'y':%0A queryset = queryset.filter(num_accompanying_children__gt=0)%0A elif self.value() == 'n':%0A queryset = queryset.filter(num_accompanying_children=0)%0A return queryset%0A%0A%0Aclass ProfileAdmin(admin.ModelAdmin):%0A list_display = ('pk', 'user', 'num_accompanying_children')%0A list_display_links = ('pk', 'user')%0A list_filter = (WithChildrenFilter,)%0A%0A%0Aadmin.site.register(models.Profile, ProfileAdmin )%0A
1b5b43542fe3ba8f85076c6b6cb1e98a4614a0c6
reformat JobGroup to match other tables
pyfarm/models/jobgroup.py
pyfarm/models/jobgroup.py
# No shebang line, this module is meant to be imported # # Copyright 2015 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Job Group Model =============== Model for job groups """ from pyfarm.master.application import db from pyfarm.models.core.cfg import ( TABLE_JOB_GROUP, TABLE_JOB_TYPE, TABLE_USER, MAX_JOBGROUP_NAME_LENGTH) from pyfarm.models.core.mixins import UtilityMixins from pyfarm.models.core.types import id_column, IDTypeWork class JobGroup(db.Model, UtilityMixins): """ Used to group jobs together for better presentation in the UI """ __tablename__ = TABLE_JOB_GROUP id = id_column(IDTypeWork) title = db.Column(db.String(MAX_JOBGROUP_NAME_LENGTH), nullable=False) main_jobtype_id = db.Column(IDTypeWork, db.ForeignKey("%s.id" % TABLE_JOB_TYPE), nullable=False, doc="ID of the jobtype of the main job in this " "group. Purely for display and " "filtering.") user_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_USER), doc="The id of the user who owns these jobs") main_jobtype = db.relationship("JobType", backref=db.backref("jobgroups", lazy="dynamic"), doc="The jobtype of the main job in this " "group") user = db.relationship("User", backref=db.backref("jobgroups", lazy="dynamic"), doc="The user who owns these jobs")
Python
0.000001
@@ -103,18 +103,49 @@ Co. KG%0A +# Copyright 2015 Oliver Palmer%0A #%0A - # Licens @@ -742,16 +742,17 @@ ups%0A%22%22%22%0A +%0A from pyf @@ -1008,16 +1008,17 @@ peWork%0A%0A +%0A class Jo @@ -1200,16 +1200,17 @@ peWork)%0A +%0A titl @@ -1223,16 +1223,25 @@ .Column( +%0A db.Strin @@ -1268,16 +1268,24 @@ LENGTH), +%0A nullabl @@ -1291,17 +1291,72 @@ le=False -) +,%0A doc=%22The title of the job group's name%22%0A )%0A %0A mai @@ -1376,24 +1376,33 @@ = db.Column( +%0A IDTypeWork,%0A @@ -1405,32 +1405,8 @@ rk,%0A - @@ -1462,72 +1462,24 @@ - nullable=False,%0A +nullable=False,%0A @@ -1535,32 +1535,8 @@ s %22%0A - @@ -1578,47 +1578,8 @@ and -%22%0A %22 filt @@ -1587,16 +1587,17 @@ ring.%22)%0A +%0A user @@ -1612,16 +1612,25 @@ .Column( +%0A db.Integ @@ -1632,16 +1632,24 @@ Integer, +%0A db.Fore @@ -1678,32 +1678,16 @@ _USER),%0A - @@ -1730,17 +1730,55 @@ se jobs%22 -) +%0A )%0A%0A #%0A # Relationships%0A # %0A mai @@ -1805,16 +1805,25 @@ ionship( +%0A %22JobType @@ -1829,35 +1829,8 @@ e%22,%0A - @@ -1868,206 +1868,83 @@ ps%22, -%0A lazy=%22dynamic%22),%0A doc=%22The jobtype of the main job in this %22%0A %22 + lazy=%22dynamic%22),%0A doc=%22The jobtype of the main job in this group%22)%0A @@ -1939,16 +1939,17 @@ group%22)%0A +%0A user @@ -1971,35 +1971,25 @@ hip( -%22User%22,%0A +%0A %22User%22,%0A @@ -2027,91 +2027,26 @@ ps%22, -%0A lazy=%22dynamic%22),%0A + lazy=%22dynamic%22),%0A @@ -2083,10 +2083,15 @@ se jobs%22 +%0A )%0A
9801345043972715e3d6cdfcce09d61f3b6c7d46
input 1: commenting out keys
Roles/avl_input1/main.py
Roles/avl_input1/main.py
""" inputs: 4 cap sensors on I2C 3 rotary encoders on SPI output topics: pitch_key_event - integer from 0 to 47 voice_key_1_position - float from 0.0 to 1.0 voice_key_2_position - float from 0.0 to 1.0 voice_key_3_position - float from 0.0 to 1.0 """ import Adafruit_MPR121.MPR121 as MPR121 import importlib import json import os import Queue import random import settings import sys import threading import time from thirtybirds_2_0.Network.manager import init as network_init from thirtybirds_2_0.Network.email_simple import init as email_init from thirtybirds_2_0.Adaptors.Sensors import AMT203 BASE_PATH = os.path.dirname(os.path.realpath(__file__)) UPPER_PATH = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] DEVICES_PATH = "%s/Hosts/" % (BASE_PATH ) THIRTYBIRDS_PATH = "%s/thirtybirds" % (UPPER_PATH ) sys.path.append(BASE_PATH) sys.path.append(UPPER_PATH) class Main(threading.Thread): def __init__(self, hostname): threading.Thread.__init__(self) self.hostname = hostname self.queue = Queue.Queue() def add_to_queue(self, topic, msg): self.queue.put([topic, msg]) def run(self): while True: topic_msg = self.queue.get(True) network.send(topic_msg[0], topic_msg[1]) class MPR121Array(threading.Thread): def __init__(self, i2c_address): threading.Thread.__init__(self) position_raw = 0 self.i2c_address = i2c_address self.capsensors = [] self.last_touched = [0,0,0,0] for sensor_id in range(4): self.capsensors.append(MPR121.MPR121()) if not self.capsensors[sensor_id].begin(self.i2c_address[sensor_id]): print('Error initializing MPR121 @{}'.format(self.i2c_address[sensor_id])) print repr(self.capsensors[sensor_id]) print "class CapSensorArray instantiated with values", self.i2c_address def run(self): print "class CapSensorArray thread started" for sensor_id in range(4): self.last_touched[sensor_id] = self.capsensors[sensor_id].touched() global_position = 1 while True: for sensor_id in range(4): current_touched = self.capsensors[sensor_id].touched() for i in range(12): pin_bit = 1 << i if current_touched & pin_bit and not self.last_touched[sensor_id] & pin_bit: print('{0} touched!'.format(i)) global_position = i + (12 * sensor_id) if not current_touched & pin_bit and self.last_touched[sensor_id] & pin_bit: print('{0} released!'.format(i)) self.last_touched[sensor_id] = current_touched if global_position > 1: time.sleep(0.01) main.add_to_queue("pitch_key_event", global_position) class Key(threading.Thread): def __init__(self, name, bus, deviceId): threading.Thread.__init__(self) self.name = name self.bus = bus self.deviceId = deviceId print "creating amt203 object" self.encoder = AMT203.AMT203(bus, deviceId) print "setting zero ", self.bus, self.deviceId self.encoder.set_zero() print "after zero ", self.bus, self.deviceId print "class Key instantiated with values", name, bus, deviceId self.encoder_min = 0 self.encoder_max = 100 def run(self): print "class Key thread started" while True: pos = self.encoder.get_position() mapped_pos = self.map_key(self.name, pos) main.add_to_queue(self.name, mapped_pos) time.sleep(0.01) def map_key(self, name, value): value = encoder_max if value > encoder_max else value value = encoder_min if value < encoder_min else value mapped_value = (((value - encoder_min))/(encoder_max - encoder_min)) return mapped_value def network_status_handler(msg): print "network_status_handler", msg def network_message_handler(msg): print "network_message_handler", msg topic = msg[0] #host, sensor, data = yaml.safe_load(msg[1]) if topic == "__heartbeat__": print "heartbeat received", msg network = None # makin' it global def init(HOSTNAME): global network network = network_init( hostname=HOSTNAME, role="client", discovery_multicastGroup=settings.discovery_multicastGroup, discovery_multicastPort=settings.discovery_multicastPort, discovery_responsePort=settings.discovery_responsePort, pubsub_pubPort=settings.pubsub_pubPort, message_callback=network_message_handler, status_callback=network_status_handler ) network.subscribe_to_topic("system") # subscribe to all system messages #network.subscribe_to_topic("sensor_data") main = Main(HOSTNAME) main.start() mpr121array = MPR121Array([0x5a, 0x5b, 0x5c, 0x5d]) mpr121array.start() key_0 = Key("voice_key_1_position",0,0) key_1 = Key("voice_key_2_position",0,1) key_2 = Key("voice_key_3_position",1,1) key_0.start() time.sleep(5) key_1.start() time.sleep(5) key_2.start()
Python
0.999997
@@ -5065,24 +5065,26 @@ .start()%0A + # key_0 = Key @@ -5111,24 +5111,26 @@ on%22,0,0)%0A + # key_1 = Key @@ -5157,24 +5157,26 @@ on%22,0,1)%0A + # key_2 = Key @@ -5203,24 +5203,26 @@ on%22,1,1)%0A + # key_0.start @@ -5219,32 +5219,34 @@ ey_0.start()%0A + # time.sleep(5)%0A @@ -5243,24 +5243,26 @@ sleep(5)%0A + # key_1.start @@ -5267,16 +5267,18 @@ rt()%0A + # time.sl @@ -5287,16 +5287,18 @@ p(5)%0A + # key_2.s
d63049e8c3901386570ac4324786701d67dbf0dc
Add a redirectLegacyRequest response
app/soc/views/helper/responses.py
app/soc/views/helper/responses.py
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers used to render response. """ __authors__ = [ '"Todd Larsen" <[email protected]>', '"Pawel Solyga" <[email protected]>', '"Sverre Rabbelier" <[email protected]>', ] from google.appengine.api import users from django import http from django.template import loader from soc.logic import accounts from soc.logic import system from soc.logic.helper import timeline from soc.logic.models import site from soc.logic.models.user import logic as user_logic from soc.modules import callback from soc.views import helper from soc.views.helper import redirects from soc.views.helper import templates def respond(request, template, context=None, response_args=None, response_headers=None): """Helper to render a response, passing standard stuff to the response. Args: request: the Django HTTP request object template: the template (or search list of templates) to render context: the context supplied to the template (implements dict) response_args: keyword arguments passed to http.HttpResponse() (response_args['content'] is created with render_to_string(template, dictionary=context) if it is not present) response_headers: optional dict containing HTTP response header names and corresponding values to set in the HttpResponse object before it is returned; default is None Returns: django.shortcuts.render_to_response(template, context) results Raises: Any exceptions that django.template.loader.render_to_string() or django.http.HttpResponse() might raise. """ if not context: from soc.views.helper import params context = getUniversalContext(request) useJavaScript(context, params.DEF_JS_USES_LIST) if response_args is None: response_args = {} if 'content' not in response_args: content = loader.render_to_string(template, dictionary=context) response_args['content'] = content.strip('\n') http_response = http.HttpResponse(**response_args) if response_headers: for key, value in response_headers.iteritems(): http_response[key] = value return http_response def getUniversalContext(request): """Constructs a template context dict will many common variables defined. Args: request: the Django HTTP request object Returns: a new context dict containing: { 'request': the Django HTTP request object passed in by the caller 'account': the logged-in Google Account if there is one 'user': the User entity corresponding to the Google Account in context['account'] 'is_admin': True if users.is_current_user_admin() is True 'is_debug': True if system.isDebug() is True 'sign_in': a Google Account login URL 'sign_out': a Google Account logout URL 'sidebar_menu_html': an HTML string that renders the sidebar menu } """ core = callback.getCore() context = core.getRequestValue('context', {}) if context: return context account = accounts.getCurrentAccount() user = None is_admin = False context['request'] = request if account: user = user_logic.getForAccount(account) is_admin = user_logic.isDeveloper(account=account, user=user) context['account'] = account context['user'] = user context['is_admin'] = is_admin context['is_local'] = system.isLocal() context['is_debug'] = system.isDebug() context['sign_in'] = users.create_login_url(request.path) context['sign_out'] = users.create_logout_url(request.path) context['sidebar_menu_items'] = core.getSidebar(account, user) context['gae_version'] = system.getAppVersion() context['soc_release'] = system.getMelangeVersion() settings = site.logic.getSingleton() context['ga_tracking_num'] = settings.ga_tracking_num context['gmaps_api_key'] = settings.gmaps_api_key context['site_name'] = settings.site_name context['site_notice'] = settings.site_notice context['tos_link'] = redirects.getToSRedirect(settings) context['in_maintenance'] = timeline.isActivePeriod(settings, 'maintenance') core.setRequestValue('context', context) return context def useJavaScript(context, uses): """Updates the context for JavaScript usage. """ for use in uses: context['uses_%s' % use] = True def redirectToChangedSuffix( request, old_suffix, new_suffix=None, params=None): """Changes suffix of URL path and returns an HTTP redirect response. Args: request: the Django HTTP request object; redirect path is derived from request.path old_suffix, new_suffix, params: see helper.requests.replaceSuffix() Returns: a Django HTTP redirect response pointing to the altered path. """ path = helper.requests.replaceSuffix(request.path, old_suffix, new_suffix, params=params) return http.HttpResponseRedirect(path) def errorResponse(error, request, template=None, context=None): """Creates an HTTP response from the soc.views.out_of_band.Error exception. Args: error: a out_of_band.Error object request: a Django HTTP request template: the "sibling" template (or a search list of such templates) from which to construct the actual template name (or names) context: optional context dict supplied to the template, which is modified (so supply a copy if such modification is not acceptable) """ if not context: context = error.context if not context: from soc.views.helper import params context = getUniversalContext(request) useJavaScript(context, params.DEF_JS_USES_LIST) if not template: template = [] # make a list of possible "sibling" templates, then append a default sibling_templates = templates.makeSiblingTemplatesList(template, error.TEMPLATE_NAME, default_template=error.DEF_TEMPLATE) context['status'] = error.response_args.get('status') if not context.get('message'): # supplied context did not explicitly override the message context['message'] = error.message_fmt % context return respond(request, sibling_templates, context=context, response_args=error.response_args)
Python
0
@@ -6721,20 +6721,266 @@ rror.response_args)%0A +%0A%0Adef redirectLegacyRequest(request, *args, **kwargs):%0A %22%22%22Redirects a request to a legacy url to the new '/gsoc' suffix.%0A%0A Args:%0A request: a Django HTTP request%0A %22%22%22%0A%0A url = '/gsoc' + request.path%0A%0A return http.HttpResponseRedirect(url)%0A
853878cbf218728608a783260ae74c408ef4b8a2
fix the wrong format
python/paddle/fluid/average.py
python/paddle/fluid/average.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import warnings """ Class of all kinds of Average. All Averages are accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. """ __all__ = ["WeightedAverage"] def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) class WeightedAverage(object): """ Calculate weighted average. The average calculating is accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. Examples: .. code-block:: python avg = fluid.average.WeightedAverage() avg.add(value=2.0, weight=1) avg.add(value=4.0, weight=2) avg.eval() # The result is 3.333333333. # For (2.0 * 1 + 4.0 * 2) / (1 + 2) = 3.333333333 """ def __init__(self): warnings.warn( "The %s is deprecated, please use fluid.metrics.Accuracy instead." % (self.__class__.__name__), Warning) self.reset() def reset(self): self.numerator = None self.denominator = None def add(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") if self.numerator is None or self.denominator is None: self.numerator = value * weight self.denominator = weight else: self.numerator += value * weight self.denominator += weight def eval(self): if self.numerator is None or self.denominator is None: raise ValueError( "There is no data to be averaged in WeightedAverage.") return self.numerator / self.denominator
Python
0.999964
@@ -1539,16 +1539,17 @@ python%0A +%0A
662aaa79305cbbbceeba8d46f9a7e543621f45a3
Add harvest edit view
Seeder/harvests/views.py
Seeder/harvests/views.py
import time import models import forms import datetime from django.http.response import Http404, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from django.views.generic.base import TemplateView from django.views.generic import DetailView, FormView from urljects import U, URLView, pk from core import generic_views from comments.views import CommentViewGeneric def timestamp_to_datetime(ms_string): """ :param ms_string: string representing milliseconds since the famous day :return: datetime or None """ try: return datetime.datetime.fromtimestamp( float(ms_string) / 1000 ) except ValueError: return None def timestamp(dtm_object): """ :param dtm_object: datetime :return: int with epoch timestamp in milliseconds """ return time.mktime(dtm_object.timetuple()) * 1000 class HarvestView(generic_views.LoginMixin): view_name = 'harvests' model = models.Harvest title = _('Harvests') class CalendarView(HarvestView, URLView, TemplateView): template_name = 'calendar.html' url = U url_name = 'calendar' def get_context_data(self, **kwargs): context = super(CalendarView, self).get_context_data(**kwargs) context['harvest_form'] = forms.HarvestCreateForm() return context class CalendarJsonView(generic_views.JSONView, URLView): url = U / 'json' url_name = 'json_calendar' def get_data(self, context): date_from = timestamp_to_datetime(self.request.GET.get('from', '')) date_to = timestamp_to_datetime(self.request.GET.get('to', '')) if not (date_from and date_to): raise Http404('Invalid format') harvests = models.Harvest.objects.filter( scheduled_on__gte=date_from, scheduled_on__lte=date_to ) return { "success": 1, "result": [ { "id": harvest.id, "title": harvest.repr(), "url": harvest.get_absolute_url(), "class": harvest.get_calendar_style(), "start": timestamp(harvest.scheduled_on), "end": timestamp(harvest.scheduled_on) + 3600 * 1000 } for harvest in harvests ] } class AddView(HarvestView, FormView, URLView): url = U / 'add' url_name = 'add' form_class = forms.HarvestCreateForm template_name = 'add_form.html' def form_valid(self, form): harvest = form.save(commit=False) harvest.status = models.Harvest.STATE_INITIAL harvest.save() return HttpResponseRedirect(harvest.get_absolute_url()) class Detail(HarvestView, DetailView, CommentViewGeneric, URLView): template_name = 'harvest.html' url = U / pk / 'detail' url_name = 'detail'
Python
0
@@ -20,16 +20,30 @@ models%0A +import source%0A import f @@ -402,16 +402,56 @@ Generic%0A +from core.generic_views import EditView%0A %0A%0Adef ti @@ -2712,16 +2712,52 @@ .save()%0A + harvest.pair_custom_seeds()%0A @@ -2961,16 +2961,149 @@ name = 'detail'%0A +%0A%0Aclass Edit(HarvestView, EditView, URLView):%0A url = U / pk / 'edit'%0A url_name = 'edit'%0A form_class = forms.HarvestEditForm%0A
dca5301379ea16fa1adbbace699e623b0abd1c58
Add util method to get subsystem counts with postselection
qiskit/aqua/utils/subsystem.py
qiskit/aqua/utils/subsystem.py
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ sub system """ from collections import defaultdict import numpy as np from scipy.linalg import sqrtm from qiskit.tools.qi.qi import partial_trace def get_subsystem_density_matrix(statevector, trace_systems): """ Compute the reduced density matrix of a quantum subsystem. Args: statevector (list|array): The state vector of the complete system trace_systems (list|range): The indices of the qubits to be traced out. Returns: numpy.ndarray: The reduced density matrix for the desired subsystem """ rho = np.outer(statevector, np.conj(statevector)) rho_sub = partial_trace(rho, trace_systems) return rho_sub def get_subsystem_fidelity(statevector, trace_systems, subsystem_state): """ Compute the fidelity of the quantum subsystem. Args: statevector (list|array): The state vector of the complete system trace_systems (list|range): The indices of the qubits to be traced. to trace qubits 0 and 4 trace_systems = [0,4] subsystem_state (list|array): The ground-truth state vector of the subsystem Returns: numpy.ndarray: The subsystem fidelity """ rho = np.outer(np.conj(statevector), statevector) rho_sub = partial_trace(rho, trace_systems) rho_sub_in = np.outer(np.conj(subsystem_state), subsystem_state) fidelity = np.trace( sqrtm( np.dot( np.dot(sqrtm(rho_sub), rho_sub_in), sqrtm(rho_sub) ) ) ) ** 2 return fidelity def get_subsystems_counts(complete_system_counts): """ Extract all subsystems' counts from the single complete system count dictionary. If multiple classical registers are used to measure various parts of a quantum system, Each of the measurement dictionary's keys would contain spaces as delimiters to separate the various parts being measured. For example, you might have three keys '11 010', '01 011' and '11 011', among many other, in the count dictionary of the 5-qubit complete system, and would like to get the two subsystems' counts (one 2-qubit, and the other 3-qubit) in order to get the counts for the 2-qubit partial measurement '11' or the 3-qubit partial measurement '011'. Args: complete_system_counts (dict): The measurement count dictionary of a complete system that contains multiple classical registers for measurements s.t. the dictionary's keys have space delimiters. Returns: list: A list of measurement count dictionaries corresponding to each of the subsystems measured. """ mixed_measurements = list(complete_system_counts) subsystems_counts = [defaultdict(int) for _ in mixed_measurements[0].split()] for mixed_measurement in mixed_measurements: count = complete_system_counts[mixed_measurement] for k, d_l in zip(mixed_measurement.split(), subsystems_counts): d_l[k] += count return [dict(d) for d in subsystems_counts]
Python
0
@@ -3549,8 +3549,1342 @@ counts%5D%0A +%0Adef get_subsystems_counts_postselected(complete_system_counts, index, postselect_value):%0A %22%22%22%0A Extract all subsystems' counts from the single complete system count dictionary subject to a%0A specific postselection.%0A%0A Args:%0A complete_system_counts (dict): The measurement count dictionary of a complete system%0A that contains multiple classical registers for measurements s.t. the dictionary's%0A keys have space delimiters.%0A index (int): The index of the subsystem to apply the post-selection to.%0A postselect_value (str): The postselection value to apply to the subsystem at index.%0A%0A Returns:%0A list: A list of measurement count dictionaries corresponding to%0A each of the subsystems measured subject to a specific postselection.%0A %22%22%22%0A mixed_measurements = list(complete_system_counts)%0A subsystems_counts = %5Bdefaultdict(int) for _ in mixed_measurements%5B0%5D.split()%5D%0A%0A for mixed_measurement in mixed_measurements:%0A count = complete_system_counts%5Bmixed_measurement%5D%0A subsystem_measurements = mixed_measurement.split()%0A for k, d_l in zip(subsystem_measurements, subsystems_counts):%0A if (subsystem_measurements%5Bindex%5D == postselect_value):%0A d_l%5Bk%5D += count%0A%0A return %5Bdict(d) for d in subsystems_counts%5D%0A
f4106e3025c5dbb3136db94081b9998a052c8e70
Bump version to 2.0.0-alpha2
pyqode/python/__init__.py
pyqode/python/__init__.py
# -*- coding: utf-8 -*- """ pyqode.python is an extension of pyqode.core that brings support for the python programming language. It does so by providing a set of additional modes and panels for the frontend and by supplying dedicated workers for the backend. """ __version__ = "2.0.0-alpha1"
Python
0.000001
@@ -288,7 +288,7 @@ lpha -1 +2 %22%0A
2aa98c286e5abb7da424e5f3b305df765b5f7a4e
Remove dependency on webkit_strings from webkit_unit_tests.
Source/web/web_tests.gyp
Source/web/web_tests.gyp
# # Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # { 'includes': [ '../../../../build/ui_resources_location.gypi', '../bindings/bindings.gypi', '../build/features.gypi', '../build/scripts/scripts.gypi', '../core/core.gypi', '../modules/modules.gypi', '../platform/blink_platform.gypi', '../web/web.gypi', '../wtf/wtf.gypi', ], 'targets': [ { 'target_name': 'webkit_unit_tests_resources', 'type': 'none', 'dependencies': [ '<(DEPTH)/net/net.gyp:net_resources', '<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources', '<(DEPTH)/webkit/webkit_resources.gyp:webkit_resources', '<(DEPTH)/webkit/webkit_resources.gyp:webkit_strings', ], 'actions': [{ 'action_name': 'repack_webkit_unit_tests_resources', 'variables': { 'repack_path': '<(DEPTH)/tools/grit/grit/format/repack.py', 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/net/net_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/ui/<(ui_resources_gen_subdir)/ui_resources_100_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/webkit/blink_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_en-US.pak', '<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_resources_100_percent.pak', ]}, 'inputs': [ '<(repack_path)', '<@(pak_inputs)', ], 'outputs': [ '<(PRODUCT_DIR)/webkit_unit_tests_resources.pak', ], 'action': ['python', '<(repack_path)', '<@(_outputs)', '<@(pak_inputs)'], }], 'conditions': [ ['OS=="mac"', { 'all_dependent_settings': { 'mac_bundle_resources': [ '<(PRODUCT_DIR)/webkit_unit_tests_resources.pak', ], }, }], ] }, { 'target_name': 'webkit_unit_tests', 'type': 'executable', 'variables': { 'enable_wexit_time_destructors': 1, }, 'dependencies': [ '../config.gyp:unittest_config', '../../public/blink.gyp:blink', '../wtf/wtf_tests.gyp:wtf_unittest_helpers', 'web.gyp:blink_web_test_support', '<(DEPTH)/base/base.gyp:base', '<(DEPTH)/base/base.gyp:base_i18n', '<(DEPTH)/base/base.gyp:test_support_base', '<(DEPTH)/testing/gmock.gyp:gmock', '<(DEPTH)/testing/gtest.gyp:gtest', '<(DEPTH)/third_party/libwebp/libwebp.gyp:libwebp', '<(DEPTH)/third_party/zlib/zlib.gyp:zlib', '<(DEPTH)/url/url.gyp:url_lib', '<(DEPTH)/v8/tools/gyp/v8.gyp:v8', '<(DEPTH)/content/content_shell_and_tests.gyp:test_support_content', 'webkit_unit_tests_resources', ], 'sources': [ '../web/tests/RunAllTests.cpp', ], 'include_dirs': [ '../../public/web', '../web', 'src', ], 'conditions': [ ['component=="shared_library"', { 'defines': [ 'BLINK_DLL_UNITTEST', ], }, { 'dependencies': [ '../core/core.gyp:webcore', ], 'defines': [ 'BLINK_IMPLEMENTATION=1', 'INSIDE_BLINK', ], 'sources': [ '<@(bindings_unittest_files)', '<@(core_unittest_files)', '<@(modules_unittest_files)', '<@(platform_web_unittest_files)', '<@(web_unittest_files)', ], }], ['OS=="win" and component!="shared_library"', { 'configurations': { 'Debug_Base': { 'msvs_settings': { 'VCLinkerTool': { 'LinkIncremental': '<(msvs_large_module_debug_link_mode)', }, }, }, }, 'conditions': [ ['win_use_allocator_shim==1', { 'dependencies': [ '<(DEPTH)/base/allocator/allocator.gyp:allocator', ], }], ], }], ['OS=="android"', { 'type': 'shared_library', 'dependencies': [ '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code', '<(DEPTH)/tools/android/forwarder2/forwarder.gyp:forwarder2', ], }], ['OS=="mac"', { 'include_dirs': [ '../../public/web/mac', ], }], [ 'os_posix==1 and OS!="mac" and OS!="android" and OS!="ios" and use_allocator!="none"', { 'dependencies': [ '<(DEPTH)/base/allocator/allocator.gyp:allocator', ], }], ], } ], # targets 'conditions': [ ['gcc_version>=46', { 'target_defaults': { # Disable warnings about c++0x compatibility, as some names (such # as nullptr) conflict with upcoming c++0x types. 'cflags_cc': ['-Wno-c++0x-compat'], }, }], ['OS=="android" and android_webview_build==0 and gtest_target_type == "shared_library"', { # Wrap libwebkit_unit_tests.so into an android apk for execution. 'targets': [{ 'target_name': 'webkit_unit_tests_apk', 'type': 'none', 'dependencies': [ '<(DEPTH)/base/base.gyp:base_java', '<(DEPTH)/net/net.gyp:net_java', 'webkit_unit_tests', ], 'variables': { 'test_suite_name': 'webkit_unit_tests', 'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)webkit_unit_tests<(SHARED_LIB_SUFFIX)', }, 'includes': [ '../../../../build/apk_test.gypi' ], }], }], ], }
Python
0.999999
@@ -2240,79 +2240,8 @@ s',%0A - '%3C(DEPTH)/webkit/webkit_resources.gyp:webkit_strings',%0A @@ -2767,94 +2767,8 @@ k',%0A - '%3C(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_en-US.pak',%0A
6990f6c57737a01debc398b8de42f375e84672e1
Use with statement around open() calls
pysmi/reader/localfile.py
pysmi/reader/localfile.py
# # This file is part of pysmi software. # # Copyright (c) 2015-2016, Ilya Etingof <[email protected]> # License: http://pysmi.sf.net/license.html # import os import sys import time from pysmi.reader.base import AbstractReader from pysmi.mibinfo import MibInfo from pysmi.compat import decode from pysmi import debug from pysmi import error class FileReader(AbstractReader): """Fetch ASN.1 MIB text by name from local file. *FileReader* class instance tries to locate ASN.1 MIB files by name, fetch and return their contents to caller. """ useIndexFile = True # optional .index file mapping MIB to file name indexFile = '.index' def __init__(self, path, recursive=True, ignoreErrors=True): """Create an instance of *FileReader* serving a directory. Args: path (str): directory to search MIB files Keyword Args: recursive (bool): whether to include subdirectories ignoreErrors (bool): ignore filesystem access errors """ self._path = os.path.normpath(path) self._recursive = recursive self._ignoreErrors = ignoreErrors self._indexLoaded = False self._mibIndex = None def __str__(self): return '%s{"%s"}' % (self.__class__.__name__, self._path) def getSubdirs(self, path, recursive=True, ignoreErrors=True): if not recursive: return [path] dirs = [path] try: subdirs = os.listdir(path) except OSError: if ignoreErrors: return dirs else: raise error.PySmiError('directory %s access error: %s' % (path, sys.exc_info()[1])) for d in subdirs: d = os.path.join(decode(path), decode(d)) if os.path.isdir(d): dirs.extend(self.getSubdirs(d, recursive)) return dirs @staticmethod def loadIndex(indexFile): mibIndex = {} if os.path.exists(indexFile): try: mibIndex = dict( [x.split()[:2] for x in open(indexFile).readlines()] ) debug.logger & debug.flagReader and debug.logger( 'loaded MIB index map from %s file, %s entries' % (indexFile, len(mibIndex))) except IOError: pass return mibIndex def getMibVariants(self, mibname): if self.useIndexFile: if not self._indexLoaded: self._mibIndex = self.loadIndex( os.path.join(self._path, self.indexFile) ) self._indexLoaded = True if mibname in self._mibIndex: debug.logger & debug.flagReader and debug.logger( 'found %s in MIB index: %s' % (mibname, self._mibIndex[mibname])) return [(mibname, self._mibIndex[mibname])] return super(FileReader, self).getMibVariants(mibname) def getData(self, mibname): debug.logger & debug.flagReader and debug.logger( '%slooking for MIB %s' % (self._recursive and 'recursively ' or '', mibname)) for path in self.getSubdirs(self._path, self._recursive, self._ignoreErrors): for mibalias, mibfile in self.getMibVariants(mibname): f = os.path.join(decode(path), decode(mibfile)) debug.logger & debug.flagReader and debug.logger('trying MIB %s' % f) if os.path.exists(f) and os.path.isfile(f): try: mtime = os.stat(f)[8] debug.logger & debug.flagReader and debug.logger( 'source MIB %s mtime is %s, fetching data...' % ( f, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime)))) return MibInfo(path='file://%s' % f, file=mibfile, name=mibalias, mtime=mtime), decode( open(f, mode='rb').read(self.maxMibSize)) except (OSError, IOError): debug.logger & debug.flagReader and debug.logger( 'source file %s open failure: %s' % (f, sys.exc_info()[1])) if not self._ignoreErrors: raise error.PySmiError('file %s access error: %s' % (f, sys.exc_info()[1])) raise error.PySmiReaderFileNotModifiedError('source MIB %s is older than needed' % f, reader=self) raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
Python
0
@@ -2005,32 +2005,79 @@ try:%0A + with open(indexFile) as f:%0A @@ -2093,16 +2093,20 @@ = dict(%0A + @@ -2141,31 +2141,17 @@ or x in -open(indexFile) +f .readlin @@ -2156,16 +2156,20 @@ ines()%5D%0A + @@ -3911,24 +3911,83 @@ e(mtime))))%0A + with open(f, mode='rb') as fp:%0A @@ -4114,34 +4114,22 @@ -open(f, mode='rb') + fp .read(se
d96aac74b32a166ec724234540dc93a8ea526a3f
fix test error in windows
pythainlp/tag/__init__.py
pythainlp/tag/__init__.py
# -*- coding: utf-8 -*- # TODO ปรับ API ให้เหมือน nltk from __future__ import absolute_import,division,print_function,unicode_literals import sys def pos_tag(text,engine='old'): """ ระบบ postaggers pos_tag(text,engine='old') engine ที่รองรับ * old เป็น UnigramTagger * artagger เป็น RDR POS Tagger """ if engine=='old': from .old import tag elif engine=='artagger': if sys.version_info < (3,4): sys.exit('Sorry, Python < 3.4 is not supported') def tag(text1): try: from artagger import Tagger except ImportError: import pip pip.main(['install','https://github.com/franziz/artagger/archive/master.zip']) try: from artagger import Tagger except ImportError: print("Error ! using 'pip install https://github.com/franziz/artagger/archive/master.zip'") sys.exit(0) tagger = Tagger() words = tagger.tag(' '.join(text1)) totag=[] for word in words: totag.append((word.word, word.tag)) return totag return tag(text)
Python
0.000001
@@ -608,39 +608,45 @@ ps://github.com/ -franziz +wannaphongcom /artagger/archiv @@ -797,15 +797,21 @@ com/ -franziz +wannaphongcom /art
6d6ba9e84c0b53cc05cec36047c8e701493d826e
Update rules
pythainlp/tokenize/tcc.py
pythainlp/tokenize/tcc.py
# -*- coding: utf-8 -*- """ The implementation of tokenizer accorinding to Thai Character Clusters (TCCs) rules purposed by `Theeramunkong et al. 2000. \ <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.2548>`_ Credits: * TCC: Jakkrit TeCho * Grammar: Wittawat Jitkrittum (`link to the source file \ <https://github.com/wittawatj/jtcc/blob/master/TCC.g>`_) * Python code: Korakot Chaovavanich """ import re from typing import List, Set _RE_TCC = ( """\ เc็ck เcctาะk เccีtยะk เccีtย(?=[เ-ไก-ฮ]|$)k เcc็ck เcิc์ck เcิtck เcีtยะ?k เcืtอะ?k เc[ิีุู]tย(?=[เ-ไก-ฮ]|$) เctา?ะ?k cัtวะk c[ัื]tc[ุิะ]?k c[ิุู]์ c[ะ-ู]tk c็ ck ct[ะาำ]?(์?) แc็c แcc์ แctะ แcc็c แccc์ โctะ [เ-ไ]ct ก็ อึ หึ """.replace( "c", "[ก-ฮ]" ) .replace("t", "[่-๋]?") .replace("k","((cc|c)?[ะ]?[์])?") .split() ) _PAT_TCC = re.compile("|".join(_RE_TCC)) def tcc(text: str) -> str: """ TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subwords (character clusters) :rtype: Iterator[str] """ if not text or not isinstance(text, str): return "" len_text = len(text) p = 0 while p < len_text: m = _PAT_TCC.match(text[p:]) if m: n = m.span()[1] else: n = 1 yield text[p : p + n] p += n def tcc_pos(text: str) -> Set[int]: """ TCC positions :param str text: text to be tokenized to character clusters :return: list of the end position of subwords :rtype: set[int] """ if not text or not isinstance(text, str): return set() p_set = set() p = 0 for w in tcc(text): p += len(w) p_set.add(p) return p_set def segment(text: str) -> List[str]: """ Subword segmentation :param str text: text to be tokenized to character clusters :return: list of subwords (character clusters), tokenized from the text :rtype: list[str] """ return list(tcc(text))
Python
0.000001
@@ -646,19 +646,16 @@ %E0%B8%B9%5Dtk%0Ac%E0%B9%87%0A -ck%0A ct%5B%E0%B8%B0%E0%B8%B2%E0%B8%B3%5D? @@ -659,16 +659,19 @@ %E0%B8%B3%5D?(%E0%B9%8C?)%0A +ck%0A %E0%B9%81c%E0%B9%87c%0A%E0%B9%81cc @@ -802,17 +802,15 @@ %22,%22( -( cc -%7Cc)?%5B%E0%B8%B0 +?%5Bd%7C%E0%B8%B4 %5D?%5B%E0%B9%8C @@ -811,24 +811,67 @@ %7C%E0%B8%B4%5D?%5B%E0%B9%8C%5D)?%22)%0A + .replace(%22d%22,%22%E0%B8%B8%22) # DSara: lower vowel%0A .split()
0df8cbad8b67163c3e4a0274b6bb22b8927a36ff
add example API map route
python/marvin/api/maps.py
python/marvin/api/maps.py
#!/usr/bin/env python # encoding: utf-8 # # maps.py # # Created by José Sánchez-Gallego on 25 Jun 2016. from __future__ import division from __future__ import print_function from __future__ import absolute_import import flask.ext.classy import json import brain.utils.general import marvin.api.base import marvin.core.exceptions import marvin.tools.maps import marvin.utils.general def _getMaps(name, **kwargs): """Returns a Maps object after parsing the name.""" results = {} print(kwargs) # Makes sure we don't use the wrong mode. kwargs.pop('mode', None) # Parses name into either mangaid or plateifu try: idtype = marvin.utils.general.parseIdentifier(name) except Exception as ee: results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(ee)) return None, results plateifu = None mangaid = None try: if idtype == 'plateifu': plateifu = name elif idtype == 'mangaid': mangaid = name else: raise marvin.core.exceptions.MarvinError( 'invalid plateifu or mangaid: {0}'.format(idtype)) maps = marvin.tools.maps.Maps(mangaid=mangaid, plateifu=plateifu, mode='local', **kwargs) results['status'] = 1 except Exception as ee: maps = None results['error'] = 'Failed to retrieve maps {0}: {1}'.format(name, str(ee)) return maps, results class MapsView(marvin.api.base.BaseView): """Class describing API calls related to MaNGA Maps.""" route_base = '/maps/' def index(self): self.results['data'] = 'this is a maps!' return json.dumps(self.results) @flask.ext.classy.route('/<name>/<bintype>/<niter>/', methods=['GET', 'POST'], endpoint='getMaps') def get(self, name, bintype, niter): """Returns the parameters needed to initialise a Maps remotely. To initialise a Maps we need to return: - mangaid - plateifu - Header with WCS information - Maps shape - bintype - template_kin """ kwargs = {'bintype': bintype, 'niter': niter} maps, results = _getMaps(name, **kwargs) self.update_results(results) if maps is None: return json.dumps(self.results) wcs_header = maps.data.cube.wcs.makeHeader().tostring() shape = maps.shape bintype = maps.bintype template_kin = maps.template_kin # Redefines plateifu and mangaid from the Maps mangaid = maps.mangaid plateifu = maps.plateifu self.results['data'] = {name: {'mangaid': mangaid, 'plateifu': plateifu, 'wcs': wcs_header, 'shape': shape, 'bintype': bintype, 'template_kin': template_kin}} return json.dumps(self.results) @flask.ext.classy.route('/<name>/dap_props/<path:path>', methods=['GET', 'POST'], endpoint='getdap_props') @brain.utils.general.parseRoutePath def getDAP_props(self, **kwargs): """Returns a dictionary of DAP parameters for a Maps spaxel. Parameters: name (str): The ``plateifu`` or ``mangaid`` of the object. x,y (int): The x/y coordinates of the spaxel (origin is ``lower``). kwargs (dict): Any other parameter to pass for the ``Maps`` initialisation. """ name = kwargs.pop('name') xx = int(kwargs.pop('x')) yy = int(kwargs.pop('y')) # Initialises the Maps object maps, results = _getMaps(name, **kwargs) self.update_results(results) if maps is None: return json.dumps(self.results) dict_of_props = marvin.utils.general.dap.maps_db2dict_of_props( maps.data, xx, yy) self.results['data'] = dict_of_props return json.dumps(self.results) @flask.ext.classy.route('/<name>/map/<path:path>', methods=['GET', 'POST'], endpoint='getmap') @brain.utils.general.parseRoutePath def getMap(self, **kwargs): """Returns data, ivar, mask, and unit for a given map. Parameters: name (str): The ``plateifu`` or ``mangaid`` of the object. category (str): The category of the map to be extractred. E.g., `'EMLINE_GFLUX'`. channel (str or None): If the ``category`` contains multiple channels, the channel to use, e.g., ``Ha-6564'. Otherwise, ``None``. """ name = kwargs.pop('name') category = kwargs.pop('category') channel = kwargs.pop('channel') # Initialises the Maps object maps, results = _getMaps(name, **kwargs) self.update_results(results) if maps is None: return json.dumps(self.results) try: mmap = maps.getMap(category=category, channel=channel) self.results['data'] = {} self.results['data']['value'] = mmap.value.tolist() self.results['data']['ivar'] = mmap.ivar.tolist() self.results['data']['mask'] = mmap.ivar.tolist() self.results['data']['unit'] = mmap.unit except Exception as ee: self.results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(ee)) return json.dumps(self.results)
Python
0
@@ -4809,16 +4809,130 @@ %60None%60%60. +%0A %0A e.g., https://api.sdss.org/marvin2/api/maps/8485-1901/map/category=EMLINE_GFLUX/channel=Ha-6564/ %0A%0A
8dc08d3733461ebe0ea770d0af07fdd4cfa00b64
Use mujoco function instead.
python/mujoco/__init__.py
python/mujoco/__init__.py
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python bindings for MuJoCo.""" import ctypes import os import platform import subprocess HEADERS_DIR = os.path.join(os.path.dirname(__file__), 'include') _MUJOCO_GL_ENABLE = ('enable', 'enabled', 'on', 'true', '1' , '') _MUJOCO_GL_DISABLE = ('disable', 'disabled', 'off', 'false', '0') _MUJOCO_GL = os.environ.get('MUJOCO_GL', '').lower().strip() _MUJOCO_GL_IS_VALID = True _SYSTEM = platform.system() if _SYSTEM == 'Linux': libglew_name = None if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'glx'): libglew_name = 'libglew.so' elif _MUJOCO_GL == 'egl': libglew_name = 'libglewegl.so' elif _MUJOCO_GL == 'osmesa': libglew_name = 'libglewosmesa.so' elif _MUJOCO_GL not in _MUJOCO_GL_DISABLE: _MUJOCO_GL_IS_VALID = False if libglew_name is not None: ctypes.CDLL(os.path.join(os.path.dirname(__file__), libglew_name), ctypes.RTLD_GLOBAL) ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco.so.2.1.3'), ctypes.RTLD_GLOBAL) else: ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco_nogl.so.2.1.3'), ctypes.RTLD_GLOBAL) elif _SYSTEM == 'Windows': if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'wgl'): ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco.dll')) elif _MUJOCO_GL in _MUJOCO_GL_DISABLE: ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco_nogl.dll')) else: _MUJOCO_GL_IS_VALID = False if not _MUJOCO_GL_IS_VALID: raise RuntimeError( f'invalid value for environment variable MUJOCO_GL: {_MUJOCO_GL}') from mujoco._callbacks import * from mujoco._constants import * from mujoco._enums import * from mujoco._errors import * from mujoco._functions import * from mujoco._structs import * # pylint: disable=g-import-not-at-top if _MUJOCO_GL not in _MUJOCO_GL_DISABLE: from mujoco._render import * if _SYSTEM != 'Linux': from mujoco.glfw import GLContext else: _dl_handle = ctypes.CDLL(None) if hasattr(_dl_handle, 'OSMesaCreateContextExt'): from mujoco.osmesa import GLContext elif hasattr(_dl_handle, 'eglCreateContext'): from mujoco.egl import GLContext else: from mujoco.glfw import GLContext def _get_version() -> str: with open(os.path.join(HEADERS_DIR, 'mujoco.h'), 'r') as f: for line in f: if line.startswith('#define mjVERSION_HEADER'): version = line.split()[2] break return '.'.join([d for d in str(version)]) __version__ = _get_version()
Python
0
@@ -2882,289 +2882,37 @@ t%0A%0A%0A -def _get_version() -%3E str:%0A with open(os.path.join(HEADERS_DIR, 'mujoco.h'), 'r') as f:%0A for line in f:%0A if line.startswith('#define mjVERSION_HEADER'):%0A version = line.split()%5B2%5D%0A break%0A return '.'.join(%5Bd for d in str(version)%5D)%0A%0A__version__ = _get_version +__version__ = mj_versionString ()%0A
518ef4c0b3d523225582f7defeff2b8c3bf237d8
Increase timeout waiting for HTTP server (#8286)
python/ray/serve/utils.py
python/ray/serve/utils.py
import asyncio from functools import wraps import inspect import json import logging import random import string import time import io import os import ray import requests from pygments import formatters, highlight, lexers from ray.serve.context import FakeFlaskRequest, TaskContext from ray.serve.http_util import build_flask_request import numpy as np try: import pydantic except ImportError: pydantic = None ACTOR_FAILURE_RETRY_TIMEOUT_S = 60 def parse_request_item(request_item): if request_item.request_context == TaskContext.Web: is_web_context = True asgi_scope, body_bytes = request_item.request_args flask_request = build_flask_request(asgi_scope, io.BytesIO(body_bytes)) args = (flask_request, ) kwargs = {} else: is_web_context = False args = (FakeFlaskRequest(), ) kwargs = request_item.request_kwargs return args, kwargs, is_web_context def _get_logger(): logger = logging.getLogger("ray.serve") # TODO(simon): Make logging level configurable. if os.environ.get("SERVE_LOG_DEBUG"): logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) return logger logger = _get_logger() class ServeEncoder(json.JSONEncoder): """Ray.Serve's utility JSON encoder. Adds support for: - bytes - Pydantic types - Exceptions - numpy.ndarray """ def default(self, o): # pylint: disable=E0202 if isinstance(o, bytes): return o.decode("utf-8") if pydantic is not None and isinstance(o, pydantic.BaseModel): return o.dict() if isinstance(o, Exception): return str(o) if isinstance(o, np.ndarray): return o.tolist() return super().default(o) def pformat_color_json(d): """Use pygments to pretty format and colroize dictionary""" formatted_json = json.dumps(d, sort_keys=True, indent=4) colorful_json = highlight(formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter()) return colorful_json def block_until_http_ready(http_endpoint, num_retries=5, backoff_time_s=1): http_is_ready = False retries = num_retries while not http_is_ready: try: resp = requests.get(http_endpoint) assert resp.status_code == 200 http_is_ready = True except Exception: pass # Exponential backoff time.sleep(backoff_time_s) backoff_time_s *= 2 retries -= 1 if retries == 0: raise Exception( "HTTP proxy not ready after {} retries.".format(num_retries)) def get_random_letters(length=6): return "".join(random.choices(string.ascii_letters, k=length)) def async_retryable(cls): """Make all actor method invocations on the class retryable. Note: This will retry actor_handle.method_name.remote(), but it must be invoked in an async context. Usage: @ray.remote(max_reconstructions=10000) @async_retryable class A: pass """ for name, method in inspect.getmembers(cls, predicate=inspect.isfunction): def decorate_with_retry(f): @wraps(f) async def retry_method(*args, **kwargs): start = time.time() while time.time() - start < ACTOR_FAILURE_RETRY_TIMEOUT_S: try: return await f(*args, **kwargs) except ray.exceptions.RayActorError: logger.warning( "Actor method '{}' failed, retrying after 100ms.". format(name)) await asyncio.sleep(0.1) raise RuntimeError("Timed out after {}s waiting for actor " "method '{}' to succeed.".format( ACTOR_FAILURE_RETRY_TIMEOUT_S, name)) return retry_method method.__ray_invocation_decorator__ = decorate_with_retry return cls def retry_actor_failures(f, *args, **kwargs): start = time.time() while time.time() - start < ACTOR_FAILURE_RETRY_TIMEOUT_S: try: return ray.get(f.remote(*args, **kwargs)) except ray.exceptions.RayActorError: logger.warning( "Actor method '{}' failed, retrying after 100ms".format( f._method_name)) time.sleep(0.1) raise RuntimeError("Timed out after {}s waiting for actor " "method '{}' to succeed.".format( ACTOR_FAILURE_RETRY_TIMEOUT_S, f._method_name)) async def retry_actor_failures_async(f, *args, **kwargs): start = time.time() while time.time() - start < ACTOR_FAILURE_RETRY_TIMEOUT_S: try: return await f.remote(*args, **kwargs) except ray.exceptions.RayActorError: logger.warning( "Actor method '{}' failed, retrying after 100ms".format( f._method_name)) await asyncio.sleep(0.1) raise RuntimeError("Timed out after {}s waiting for actor " "method '{}' to succeed.".format( ACTOR_FAILURE_RETRY_TIMEOUT_S, f._method_name))
Python
0
@@ -2171,9 +2171,9 @@ ies= -5 +6 , ba
adb0c2bd97c6c4ca7272d764b669cef90f81a5bb
Allow non-dev logins to dev builds
handlers/login.py
handlers/login.py
from rorn.Box import LoginBox, ErrorBox, WarningBox, SuccessBox from rorn.Session import delay from User import User from Button import Button from LoadValues import isDevMode from Event import Event from utils import * @get('login') def login(handler, request): handler.title('Login') if handler.session['user']: print WarningBox('Logged In', 'You are already logged in as %s' % handler.session['user']) else: print LoginBox() @post('login') def loginPost(handler, request, p_username, p_password): handler.title('Login') user = User.load(username = p_username, password = User.crypt(p_username, p_password)) if user: if not user.hasPrivilege('User'): Event.login(handler, user, False, "Account disabled") delay(handler, ErrorBox("Login Failed", "Your account has been disabled")) redirect('/') elif isDevMode() and not user.hasPrivilege('Dev'): Event.login(handler, user, False, "Non-dev login blocked") delay(handler, ErrorBox("Login Failed", "This is a development build")) redirect('/') if user.resetkey: user.resetkey = None user.save() handler.session['user'] = user Event.login(handler, user, True) delay(handler, SuccessBox("Login Complete", "Logged in as %s" % user, close = True)) redirect('/') else: Event.login(handler, None, False, "Failed login for %s" % p_username) delay(handler, ErrorBox("Login Failed", "Invalid username/password combination")) redirect('/') @get('logout') def logout(handler, request): print "<form method=\"post\" action=\"/logout\">" print Button('Logout', type = 'submit').negative() print "</form>" @post('logout') def logoutPost(handler, request): if handler.session['user']: del handler.session['user'] if 'impersonator' in handler.session: del handler.session['impersonator'] redirect('/') else: print ErrorBox("Logout Failed", "You are not logged in")
Python
0
@@ -816,215 +816,8 @@ '/') -%0A%09%09elif isDevMode() and not user.hasPrivilege('Dev'):%0A%09%09%09Event.login(handler, user, False, %22Non-dev login blocked%22)%0A%09%09%09delay(handler, ErrorBox(%22Login Failed%22, %22This is a development build%22))%0A%09%09%09redirect('/') %0A%0A%09%09
64713296cf4f4f3772a1ac23248d4fb930ee23ff
Bump to 0.3
python_gdrive/__init__.py
python_gdrive/__init__.py
from client import GoogleDrive __version__ = '0.3-dev'
Python
0.000198
@@ -47,10 +47,6 @@ '0.3 --dev '%0A
6b2f3b76b14214dc1ff6bfc3a0a679b775a5c12d
Add json response option
quotes_page/core/views.py
quotes_page/core/views.py
import random import time from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from django.core.cache import cache from django.utils import simplejson from django.views.decorators.cache import cache_page import _qi as qi from core.models import Speaker, Episode, Quote def main(request): return _quote(request, None) @cache_page(60*60*24) def quote(request, quote_id): return _quote(request, quote_id) def _quote(request, quote_id): to_search = request.GET.get('search') or request.POST.get('search') response = 'raw' if request.GET.get('response') == 'raw' else 'html' if quote_id: try: quote = Quote.objects.get(pk=quote_id) except: quote = None elif to_search: search_list = to_search.split(" ") quotes = Quote.objects.all() for term in search_list: if term.startswith("-who:"): quotes = quotes.exclude(speaker__full_name__icontains=term[5:]) elif term.startswith("who:"): quotes = quotes.filter(speaker__full_name__icontains=term[4:]) elif term.startswith("-"): quotes = quotes.exclude(text__icontains=term[1:]) else: quotes = quotes.filter(text__icontains=term) quote = quotes[random.randint(0, len(quotes)-1)] if len(quotes) else None else: num_quotes = cache.get('num_quotes') if not num_quotes: num_quotes = Quote.objects.all().count() cache.set('num_quotes', num_quotes, 60*60*24*7) quote = Quote.objects.all()[random.randint(0, num_quotes-1)] subs = { 'quote': quote, 'to_search': to_search if to_search else '', 'context_before': quote.get_previous(3) if quote else [], 'context_after': quote.get_next(3) if quote else [], 'forfeit': quote.speaker.name.startswith("Forfeit") if quote else False } if response == 'raw': subs['raw'] = True return render_to_response('quote.html', subs, context_instance=RequestContext(request)) else: return render_to_response('main.html', subs, context_instance=RequestContext(request)) def init(request): reset = request.GET.get("reset") == "true" episodes = qi.load(debug=True) added = [] for episode_name, episode_dict in episodes.items(): episode, created = Episode.objects.get_or_create(name=episode_name) # If reset is true, delete existing quotes for existing episode, otherwise ignore existing episodes. if created: episode.description = episode_dict['description'] episode.save() elif reset: episode.quote_set.all().delete() else: print "ignoring episode %s" % episode continue print episode if not 'transcript' in episode_dict: continue speaker_names = episode.speaker_names() number_lines = len(episode_dict['transcript']) previous_quote = None for line in range(0, number_lines): print line speaker_name, text = episode_dict['transcript'][line].split(":", 1) speaker = Speaker.objects.get_or_create(name=speaker_name, full_name=episode.full_speaker_name(speaker_name, speaker_names) or "")[0] quote = Quote(episode=episode, speaker=speaker, text=text) if previous_quote: quote.previous = previous_quote quote.save() previous_quote.next = quote previous_quote.save() else: quote.save() previous_quote = quote added.append(episode) return HttpResponse("ok, added episodes: %s" % added) def stats(request): subs = { 'episodes': Episode.objects.all(), 'speaker': Speaker.objects.all() } if 'episode' in request.GET: subs['episode'] = Episode.objects.get(pk=request.GET.get('episode')) return render_to_response('stats.html', subs, context_instance=RequestContext(request))
Python
0.00002
@@ -219,16 +219,24 @@ mplejson + as json %0Afrom dj @@ -612,17 +612,8 @@ se = - 'raw' if req @@ -639,30 +639,17 @@ nse' -) == 'raw' else +, 'html' +) %0A%0A @@ -2127,16 +2127,477 @@ quest))%0A + elif response == 'json':%0A data = json.dumps(%7B%0A 'quote_text': quote.text.strip(),%0A 'speaker': quote.speaker.name,%0A 'speaker_full': quote.speaker.full_name,%0A 'next': 'http://qiquotes.com/%25s' %25 quote.next.pk,%0A 'previous': 'http://qiquotes.com/%25s' %25 quote.previous.pk,%0A 'link': 'http://qiquotes.com/%25s' %25 quote.pk%0A %7D)%0A return HttpResponse(data, mimetype='application/json')%0A else
dcd84fec03daee62f05a70b93753d88cb356f196
add skipping of empty lines
catdumps.py
catdumps.py
""" Concatenates dumps from a LAMMPS script. All dumps in the given LAMMPS script will be concatenated into single files separately, which are to be written in the current working directory. """ import argparse import re import os.path import glob def main(): """Drive the script.""" parser = argparse.ArgumentParser(description=globals()['__doc__']) parser.add_argument( 'input', type=argparse.FileType(mode='r'), metavar='INPUT', help='The LAMMPS input file whose dumps are to be concatenated.' ) args = parser.parse_args() dump_cater = DumpCater(args.input) args.input.close() dump_cater.cat_dumps() return 0 class DumpCater(object): """Concatenator of LAMMPS dump files.""" __slots__ = [ 'base_path', 'vars', 'dumps' ] def __init__(self, input_fp): """Initialize the concatenator from the input file object.""" self.base_path = os.path.dirname(input_fp.name) self.vars = {} self.dumps = [] for line in input_fp: fields = line.split() cmd = fields[0] if cmd == 'variable': self.vars[fields[1]] = fields[-1] elif cmd == 'dump': self.dumps.append( self.subst_vars(fields[-1]) ) else: pass # Skip all other lines. return def subst_vars(self, inp_str): """Substitute all variable references in the given string.""" var_ref = re.compile(r'\$\{(?P<name>\w*)\}') # The string is going to be substituted for variable reference # repeatedly. curr = inp_str while True: match = var_ref.search(curr) if not match: break else: var_name = match.group('name') try: curr = curr.replace( ''.join(['${', var_name, '}']), self.vars[var_name] ) except KeyError: print('Undefined variable {} in script!'.format(var_name)) continue return curr def cat_dumps(self): """Concatenates all the dumps in the input script.""" for dump in self.dumps: # Get all the file names and sort according to step number. file_names = sorted(glob.glob( os.path.join(self.base_path, dump) ), key=self.form_step_getter(dump)) with open(dump.replace('*', ''), 'w') as out_fp: for name in file_names: with open(name, 'r') as inp_fp: out_fp.write(inp_fp.read()) continue continue return @staticmethod def form_step_getter(dump): """Form the function to get the step number from a file name.""" patt = re.compile( dump.replace('.', r'\.').replace('*', r'(?P<step>\d+)') ) def get_step(name): """Get the step number from the file name.""" match = patt.search(os.path.basename(name)) return int(match.group('step')) return get_step if __name__ == '__main__': main()
Python
0.000035
@@ -1088,16 +1088,74 @@ split()%0A + if len(fields) == 0:%0A continue%0A
712f300bb3b735ad38c95334581523859825bf73
increment the right names
redbot/webui/ratelimit.py
redbot/webui/ratelimit.py
#!/usr/bin/env python """ Rate Limiting for RED, the Resource Expert Droid. """ from collections import defaultdict from configparser import SectionProxy from typing import Dict, Set, Union, Callable, TYPE_CHECKING from urllib.parse import urlsplit import thor.loop if TYPE_CHECKING: from redbot.webui import RedWebUi # pylint: disable=cyclic-import,unused-import class RateLimiter: limits = {} # type: Dict[str, int] counts = {} # type: Dict[str, Dict[str, int]] periods = {} # type: Dict[str, float] watching = set() # type: Set[str] running = False def __init__(self) -> None: self.loop = thor.loop def process(self, webui: "RedWebUi", error_response: Callable) -> None: """Enforce limits on webui.""" if not self.running: self.setup(webui.config) # enforce client limits client_id = webui.get_client_id() if client_id: try: self.increment("client_id", client_id) except RateLimitViolation: error_response( b"429", b"Too Many Requests", "Your client is over limit. Please try later.", "client over limit: %s" % client_id, ) raise ValueError # enforce origin limits origin = url_to_origin(webui.test_uri) if origin: try: self.increment("origin", origin) except RateLimitViolation: error_response( b"429", b"Too Many Requests", "Origin is over limit. Please try later.", "origin over limit: %s" % origin, ) raise ValueError def process_slack(self, webui: "RedWebUi") -> None: """Enforce limits on Slack.""" if not self.running: self.setup(webui.config) # enforce user limits user_id = webui.body_args.get("user_id", [""])[0].strip() if user_id: try: self.increment("slack_user_id", user_id) except RateLimitViolation: user_name = webui.body_args.get("user_name", ["unknown"])[0].strip() webui.error_log(f"slack user over limit: {user_name} ({user_id})") raise ValueError( "_You've hit the per-user request limit. Please try later._" ) else: webui.error_log("Can't find slack user id.") # enforce team limits team_id = webui.body_args.get("team_id", [""])[0].strip() if team_id: try: self.increment("slack_team_id", user_id) except RateLimitViolation: team_name = webui.body_args.get("team_name", ["unknown"])[0].strip() webui.error_log(f"slack team over limit: {team_name} ({team_id})") raise ValueError( "_You've hit the per-team request limit. Please try later._" ) else: webui.error_log("Can't find slack team id.") def setup(self, config: SectionProxy) -> None: """Set up the counters for config.""" client_limit = config.getint("limit_client_tests", fallback=0) if client_limit: client_period = config.getfloat("limit_client_period", fallback=1) * 3600 self._setup("client_id", client_limit, client_period) origin_limit = config.getint("limit_origin_tests", fallback=0) if origin_limit: origin_period = config.getfloat("limit_origin_period", fallback=1) * 3600 self._setup("origin", origin_limit, origin_period) slack_user_limit = config.getint("limit_slack_user_tests", fallback=0) if slack_user_limit: slack_user_period = ( config.getfloat("limit_slack_user_period", fallback=1) * 3600 ) self._setup("slack_user", slack_user_limit, slack_user_period) slack_team_limit = config.getint("limit_slack_team_tests", fallback=0) if slack_team_limit: slack_team_period = ( config.getfloat("limit_slack_team_period", fallback=1) * 3600 ) self._setup("slack_team", slack_team_limit, slack_team_period) self.running = True def _setup(self, metric_name: str, limit: int, period: float) -> None: """ Set up a metric with a limit and a period (expressed in hours). Can be called multiple times. """ if not metric_name in self.watching: self.limits[metric_name] = limit self.counts[metric_name] = defaultdict(int) self.periods[metric_name] = period self.loop.schedule(period, self.clear, metric_name) self.watching.add(metric_name) def increment(self, metric_name: str, discriminator: str) -> None: """ Increment a metric for a discriminator. If the metric isn't set up, it will be ignored. Raises RateLimitViolation if this discriminator is over the limit. """ if not metric_name in self.watching: return self.counts[metric_name][discriminator] += 1 if self.counts[metric_name][discriminator] > self.limits[metric_name]: raise RateLimitViolation def clear(self, metric_name: str) -> None: """ Clear a metric's counters. """ self.counts[metric_name] = defaultdict(int) self.loop.schedule(self.periods[metric_name], self.clear, metric_name) ratelimiter = RateLimiter() class RateLimitViolation(Exception): pass def url_to_origin(url: str) -> Union[str, None]: "Convert an URL to an RFC6454 Origin." default_port = {"http": 80, "https": 443} try: p_url = urlsplit(url) origin = "%s://%s:%s" % ( p_url.scheme.lower(), p_url.hostname.lower(), p_url.port or default_port.get(p_url.scheme, 0), ) except (AttributeError, ValueError): origin = None return origin
Python
0.999999
@@ -2123,19 +2123,16 @@ ack_user -_id %22, user_ @@ -2722,19 +2722,16 @@ ack_team -_id %22, user_
3475aee89ef5b22a92a674400ea37430f8255924
handle Appengine Datastore Key Type
huTools/hujson.py
huTools/hujson.py
#!/usr/bin/env python # encoding: utf-8 """ hujson.py - extended json - tries to be compatible with simplejson hujson can encode additional types like decimal and datetime into valid json. All the heavy lifting is done by John Millikin's `jsonlib`, see https://launchpad.net/jsonlib Created by Maximillian Dornseif on 2010-09-10. Copyright (c) 2010 HUDORA. All rights reserved. """ from _jsonlib import UnknownSerializerError import _jsonlib import datetime def _unknown_handler(value): if isinstance(value, datetime.date): return str(value) elif isinstance(value, datetime.datetime): return value.isoformat() + 'Z' elif hasattr(value, 'properties'): return dict([(key, getattr(value, key)) for key in value.properties().keys()]) elif 'google.appengine.api.users.User' in str(type(value)): return "%s/%s" % (value.user_id(), value.email()) raise UnknownSerializerError("%s(%s)" % (type(value), value)) def dumps(val): return _jsonlib.write(val, on_unknown=_unknown_handler, indent=' ') def loads(data): return _jsonlib.read(data)
Python
0
@@ -887,16 +887,115 @@ mail())%0A + elif 'google.appengine.api.datastore_types.Key' in str(type(value)):%0A return str(value)%0A rais
530844a16a573ab49850a22631f97d8ad89465c9
Clean Up NLU state
sara_flexbe_states/src/sara_flexbe_states/sara_nlu_spr.py
sara_flexbe_states/src/sara_flexbe_states/sara_nlu_spr.py
#!/usr/bin/env python # encoding=utf8 from __future__ import print_function from flexbe_core import EventState, Logger import rospy import re from wm_nlu.srv import AnswerQuestion from std_msgs.msg import String class SaraNLUspr(EventState): ''' Use wm_nlu to parse a sentence and return the detected actions in a standard format (ActionForm) ># sentence string sentence to parse #> ActionForms string[] list of ActionForms <= understood Finished job. <= not_understood Finished job but no commands detected. <= fail service unavailable. ''' def __init__(self): # See example_state.py for basic explanations. super(SaraNLUspr, self).__init__(outcomes=['understood', 'not_understood', 'fail'], input_keys=['sentence'], output_keys=['answer']) self.RecurentSubject = None self.Person = None self.serviceName = "/answer_question" Logger.loginfo("waiting forservice: " + self.serviceName) rospy.wait_for_service(self.serviceName) def execute(self, userdata): # Call the NLU service serv = rospy.ServiceProxy(self.serviceName, AnswerQuestion) Resp = serv(String(userdata.sentence)) # Checking the validity of the responce if Resp.str.data is "": userdata.answer = Resp.str return "fail" userdata.answer = Resp.str return "understood" def on_enter(self, userdata): Logger.loginfo('Enter SaraNLU')
Python
0
@@ -129,18 +129,8 @@ spy%0A -import re%0A from @@ -289,59 +289,15 @@ the -detected actions in a standard format (ActionForm)%0A +answer. %0A @@ -309,24 +309,28 @@ entence + string @@ -351,27 +351,27 @@ rse%0A -%0A #%3E -ActionForms +answer @@ -381,35 +381,20 @@ ring -%5B%5D -list of ActionForms +answer %0A%0A @@ -405,32 +405,34 @@ understood + Finished job.%0A @@ -453,18 +453,16 @@ stood - Finished @@ -734,16 +734,19 @@ ence'%5D,%0A + @@ -799,24 +799,25 @@ swer'%5D)%0A +%0A self.Rec @@ -812,76 +812,8 @@ -self.RecurentSubject = None%0A self.Person = None%0A self. serv @@ -890,21 +890,16 @@ ce: %22 + -self. serviceN @@ -940,177 +940,188 @@ e(se -lf.serviceName)%0A%0A def execute(self, userdata):%0A%0A # Call the NLU service%0A serv = rospy.ServiceProxy(self.serviceName, AnswerQuestion)%0A Resp = serv +rviceName)%0A%0A self.service = rospy.ServiceProxy(serviceName, AnswerQuestion)%0A%0A def execute(self, userdata):%0A%0A # Call the NLU service%0A response = self.service (Str @@ -1190,17 +1190,17 @@ e respon -c +s e%0A @@ -1204,20 +1204,24 @@ if -R +r esp +onse .str.dat @@ -1255,32 +1255,41 @@ ta.answer = -Resp.str +response.str.data %0A @@ -1334,16 +1334,25 @@ r = -Resp.str +response.str.data %0A @@ -1380,79 +1380,4 @@ od%22%0A -%0A def on_enter(self, userdata):%0A Logger.loginfo('Enter SaraNLU')%0A
142d3ebf66e31aad2363fc0c421dc573dc9b1157
Simplify current_service() function
ci/utils.py
ci/utils.py
# -*- coding: utf-8 -*- """This module defines functions generally useful in scikit-ci.""" import os try: from .constants import SERVICES, SERVICES_ENV_VAR except (SystemError, ValueError): from constants import SERVICES, SERVICES_ENV_VAR def current_service(): for service in SERVICES.keys(): if os.environ.get( SERVICES_ENV_VAR[service], 'false').lower() == 'true': return service raise LookupError( "unknown service: None of the environment variables {} are set " "to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values())) ) def current_operating_system(service): return os.environ[SERVICES[service]] if SERVICES[service] else None
Python
0.000006
@@ -283,16 +283,25 @@ service +, env_var in SERV @@ -308,12 +308,21 @@ ICES -.key +_ENV_VAR.item s(): @@ -352,50 +352,15 @@ get( -%0A SERVICES_ENV_VAR%5Bservice%5D +env_var , 'f
9a7c0a07d14b81b134963a9459326ffdb53cf28d
Disable fe build
ci/zoeci.py
ci/zoeci.py
#!/usr/bin/env python3 # Copyright (c) 2016, Quang-Nhat Hoang-Xuan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ZOE CI entry point.""" import yaml import sys from typing import Iterable, Callable, Dict, Any, Union import docker from docker import Client from utils.DockerContainerParameter import DockerContainerParameter from deploy.frontenddeploy import ZoeFrontendDeploy from deploy.backenddeploy import ZoeBackendDeploy class ZoeDeploy(): def __init__(self, dockerUrl, dockerComposePath, image): self.currentImage = image self.typeDeploy = 1 if 'prod' in dockerComposePath else 0 self.backend = ZoeBackendDeploy(dockerUrl, dockerComposePath) self.frontend = ZoeFrontendDeploy(dockerUrl, 'apache2') def deploy(self): """ Deploy zoe backend and frontend """ try: retBE = self.backend.deploy(self.currentImage) print('Deployed BE with latest image...') if self.typeDeploy == 1 and retBE == 0: print('Redeploy BE with previous image') self.backend.deploy(self.backend.previousImage) retFE = 1 if self.typeDeploy == 1: retFE = self.frontend.deploy() print('Deployed FE with latest codes...') if retFE == 0 or retBE == 0: retFE = self.frontend.fallback() except Exception as ex: print(ex) retBE = 0 return (retBE and retFE) class ZoeImage(): def __init__(self, dockerUrl, tag): self.cli = Client(base_url=dockerUrl) self.tag = tag def build(self): """ Build docker image """ ret = 1 for line in self.cli.build(path='.', tag=self.tag, rm=True): print(line) if 'error' in str(line): ret = 0 return ret def push(self): """ Push docker image """ ret = 1 for line in self.cli.push(self.tag, stream=True): print(line) if 'error' in str(line): ret = 0 return ret if __name__ == '__main__': if len(sys.argv) < 4: sys.exit(1) else: if sys.argv[1] == '0': deployer = ZoeDeploy(sys.argv[2], sys.argv[3], sys.argv[4]) ret = deployer.deploy() if ret == 0: sys.exit(1) elif sys.argv[1] == '1': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.build() if ret == 0: sys.exit(1) elif sys.argv[1] == '2': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.push() if ret == 0: sys.exit(1)
Python
0.000001
@@ -1682,32 +1682,33 @@ +# retFE = self.fro
84556b2480f7d49843a6e42edbc9415980837308
Remove an unused variable
eval/python/evaluate.py
eval/python/evaluate.py
import argparse import numpy as np def main(): parser = argparse.ArgumentParser() parser.add_argument('--vocab_file', default='vocab.txt', type=str) parser.add_argument('--vectors_file', default='vectors.txt', type=str) args = parser.parse_args() with open(args.vocab_file, 'r') as f: words = [x.rstrip().split(' ')[0] for x in f.readlines()] with open(args.vectors_file, 'r') as f: vectors = {} for line in f: vals = line.rstrip().split(' ') vectors[vals[0]] = [float(x) for x in vals[1:]] vocab_size = len(words) vocab = {w: idx for idx, w in enumerate(words)} ivocab = {idx: w for idx, w in enumerate(words)} vector_dim = len(vectors[ivocab[0]]) W = np.zeros((vocab_size, vector_dim)) for word, v in vectors.items(): if word == '<unk>': continue W[vocab[word], :] = v # normalize each word vector to unit length W_norm = np.zeros(W.shape) d = (np.sum(W ** 2, 1) ** (0.5)) W_norm = (W.T / d).T evaluate_vectors(W_norm, vocab, ivocab) def evaluate_vectors(W, vocab, ivocab): """Evaluate the trained word vectors on a variety of tasks""" filenames = [ 'capital-common-countries.txt', 'capital-world.txt', 'currency.txt', 'city-in-state.txt', 'family.txt', 'gram1-adjective-to-adverb.txt', 'gram2-opposite.txt', 'gram3-comparative.txt', 'gram4-superlative.txt', 'gram5-present-participle.txt', 'gram6-nationality-adjective.txt', 'gram7-past-tense.txt', 'gram8-plural.txt', 'gram9-plural-verbs.txt', ] prefix = './eval/question-data/' # to avoid memory overflow, could be increased/decreased # depending on system and vocab size split_size = 100 correct_sem = 0; # count correct semantic questions correct_syn = 0; # count correct syntactic questions correct_tot = 0 # count correct questions count_sem = 0; # count all semantic questions count_syn = 0; # count all syntactic questions count_tot = 0 # count all questions full_count = 0 # count all questions, including those with unknown words for i in range(len(filenames)): with open('%s/%s' % (prefix, filenames[i]), 'r') as f: full_data = [line.rstrip().split(' ') for line in f] full_count += len(full_data) data = [x for x in full_data if all(word in vocab for word in x)] if len(data) == 0: print("ERROR: no lines of vocab kept for %s !" % filenames[i]) print("Example missing line:", full_data[0]) continue indices = np.array([[vocab[word] for word in row] for row in data]) ind1, ind2, ind3, ind4 = indices.T predictions = np.zeros((len(indices),)) num_iter = int(np.ceil(len(indices) / float(split_size))) for j in range(num_iter): subset = np.arange(j*split_size, min((j + 1)*split_size, len(ind1))) pred_vec = (W[ind2[subset], :] - W[ind1[subset], :] + W[ind3[subset], :]) #cosine similarity if input W has been normalized dist = np.dot(W, pred_vec.T) for k in range(len(subset)): dist[ind1[subset[k]], k] = -np.Inf dist[ind2[subset[k]], k] = -np.Inf dist[ind3[subset[k]], k] = -np.Inf # predicted word index predictions[subset] = np.argmax(dist, 0).flatten() val = (ind4 == predictions) # correct predictions count_tot = count_tot + len(ind1) correct_tot = correct_tot + sum(val) if i < 5: count_sem = count_sem + len(ind1) correct_sem = correct_sem + sum(val) else: count_syn = count_syn + len(ind1) correct_syn = correct_syn + sum(val) print("%s:" % filenames[i]) print('ACCURACY TOP1: %.2f%% (%d/%d)' % (np.mean(val) * 100, np.sum(val), len(val))) print('Questions seen/total: %.2f%% (%d/%d)' % (100 * count_tot / float(full_count), count_tot, full_count)) print('Semantic accuracy: %.2f%% (%i/%i)' % (100 * correct_sem / float(count_sem), correct_sem, count_sem)) print('Syntactic accuracy: %.2f%% (%i/%i)' % (100 * correct_syn / float(count_syn), correct_syn, count_syn)) print('Total accuracy: %.2f%% (%i/%i)' % (100 * correct_tot / float(count_tot), correct_tot, count_tot)) if __name__ == "__main__": main()
Python
0.000071
@@ -1068,24 +1068,16 @@ m, vocab -, ivocab )%0A%0Adef e @@ -1104,16 +1104,8 @@ ocab -, ivocab ):%0A
037527c028e2b6326c930f4390cb754f110d6450
Fix bug in test helper function clear_clipboard().
clipocr1.py
clipocr1.py
""" clipocr1.py Reads an image from the system clipboard, and prints text recognized in the image, by using tesseract OCR. The one perhaps unusual trick to be found herein is resizing the image to larger sizes, which sometimes makes a screen capture easier for OCR code to process. SIDE EFFECT: Creates image files and text file in current working dir. REQUIREMENTS: Written and tested 2014 March, 2014 April on an Ubuntu 12.04 system (64-bit Intel) Relies on system having these python packages installed (it's ok to install them as Ubuntu/Debian packages): - wx for portable clipboard access. - PIL [can we make do with Pillow?] for rescaling the image NOTE: We might be able to get away with rewriting to use the right version(s) of wx for this instead? Relies on system having this software installed, e.g. as an Ubuntu/Debian package: - tesseract the OCR software. Conveniently, these packages are all open source. COPYRIGHT: Copyright (c) 2014 Chris Niswander. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os import Image # PIL import wx # just to access the system clipboard. def get_file_text(fname): """Reads the text out of the text file having pathname /fname/.""" with open(fname) as fin: return fin.read() def read_test1(fname): """Demonstrates OCRing the text from an image file named /fname/, and printing it to stdout. Makes multiple OCR attempts, based on resizing the image to different size image files, and prints multiple OCR attempts' text. """ def params_textname(params): """Given /params/, a resize method specification from resize_methods, constructs a text string that can be used in a filename for a resized/rescaled image. """ params = params[0][0], params[0][1], params[1] return '_'.join([str(x).strip() for x in params]) # do ocr on original, non-rescaled image. print 'ORIGINAL IMAGE:' print do_ocr_to_imagefile(fname) im1 = Image.open(fname) # List of image resizing methods to try. # Each method consists of: # [Rescale factor tuple, image rescaling method]. # A rescale factor tuple is (width-rescale-factor, height-rescale-factor) # Image rescaling method is given as eval()-able text because: # - convenient for naming image files produced using that method. resize_methods = [ [(2, 2), 'Image.BICUBIC'], [(2, 2), 'Image.BILINEAR'], [(3, 2), 'Image.BICUBIC'], [(3, 2), 'Image.BILINEAR'], [(3, 3), 'Image.BICUBIC'], [(3, 3), 'Image.BILINEAR'], ] for resize_method in resize_methods: rescale = resize_method[0] im_resized = im1.resize( (im1.size[0] * rescale[0], im1.size[1] * rescale[1]), (eval (resize_method[1]) )) resized_path = fname + '__' + params_textname(resize_method) + '.png' print resized_path im_resized.save(resized_path) print do_ocr_to_imagefile(resized_path) def do_ocr_to_imagefile(fname): """Runs tesseract command line utility on image file /fname/ and returns the perceived text. SIDE EFFECTS: Creates file 3.txt in current working directory. """ os.system('tesseract ' + fname + ' 3' ) # ^ OCR text from the file named /resized_path/, save the text to 3.txt. return get_file_text('3.txt') def save_clipboard(fname): """Saves an image from the system clipboard to the filename /fname/.""" app = wx.App() if not wx.TheClipboard: del app raise Exception("can't get clipboard") wx.TheClipboard.Open() data = wx.BitmapDataObject() clipboard_getdata_status = wx.TheClipboard.GetData(data) wx.TheClipboard.Close() if not clipboard_getdata_status: del app raise Exception("couldn't find image data in clipboard") image = data.GetBitmap().ConvertToImage() image.SaveFile(fname, 1) # 1 --> save as Windows bitmap. del app def clippy(): """Demonstrates OCRing the text from an image in the system clipboard, and printing it to stdout. Makes multiple OCR attempts, based on resizing the image to different sizes, and prints multiple OCR attempts' text. """ clippy_fname = 'image_from_clipboard' save_clipboard(clippy_fname) read_test1(clippy_fname) clippy() #--------------------------------------------------------------------------- # Test code not normally called, but tester might run it from e.g. IDE. def clear_clipboard(): """Clear the clipboard, which can be useful for error testing.""" app = wx.App() if not wx.TheClipboard: del app raise Exception("can't get clipboard") wx.TheClipboard.Open() wx.TheClipboard.Clear() wx.TheClipboard.Close() if not clipboard_getdata_status: del app raise Exception("couldn't find image data in clipboard") del app
Python
0
@@ -6193,116 +6193,8 @@ e()%0A - if not clipboard_getdata_status:%0A del app%0A raise Exception(%22couldn't find image data in clipboard%22)%0A de
89766874e7ef17bdce4cfa7cae9898336928c19e
Remove satellites from JSON
modules/gy-gps6mv1/core/get.py
modules/gy-gps6mv1/core/get.py
#! /usr/bin/python # Written by Dan Mandle http://dan.mandle.me September 2012 # Modified by Broda Noel @brodanoel (in all social networks) # License: GPL 2.0 from gps import * from time import * import time import threading import sys gpsd = None #seting the global variable class GpsPoller(threading.Thread): def __init__(self): threading.Thread.__init__(self) global gpsd #bring it in scope gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info self.current_value = None self.running = True #setting the thread running to true def run(self): global gpsd while gpsp.running: gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer if __name__ == '__main__': gpsp = GpsPoller() # create the thread try: gpsp.start() # start it up attempts = 0 gotData = False while gotData == False and attempts < 3: #It may take a second or two to get good data if gpsd.fix.latitude != 0 or gpsd.fix.longitude != 0: gotData = True attempts += 1 print '{' print 'latitude:', gpsd.fix.latitude, ',' print 'longitude:', gpsd.fix.longitude, ',' print 'time:', gpsd.fix.time, ',' print 'utcTime:', gpsd.utc, ',' print 'altitude:', gpsd.fix.altitude, ',' print 'eps:', gpsd.fix.eps, ',' print 'epx:', gpsd.fix.epx, ',' print 'epv:', gpsd.fix.epv, ',' print 'ept:', gpsd.fix.ept, ',' print 'speed:', gpsd.fix.speed, ',' print 'climb:', gpsd.fix.climb, ',' print 'track:', gpsd.fix.track, ',' print 'mode:', gpsd.fix.mode, ',' print 'satellites:', gpsd.satellites print '}' sys.exit() else: time.sleep(1) #set to whatever except (KeyboardInterrupt, SystemExit): #when you press ctrl+c gpsp.running = False gpsp.join() # wait for the thread to finish what it's doing
Python
0.000003
@@ -1643,32 +1643,33 @@ de, ','%0A +# print 'satellite
3685715cd260f4f5ca392caddf7fb0c01af9ebcc
Add in comments for orgs and places too, remove limit
mzalendo/comments2/feeds.py
mzalendo/comments2/feeds.py
from disqus.wxr_feed import ContribCommentsWxrFeed # from comments2.models import Comment from core.models import Person # http://help.disqus.com/customer/portal/articles/472150-custom-xml-import-format class CommentWxrFeed(ContribCommentsWxrFeed): link = "/" def items(self): return Person.objects.all()[:5] # remove [:5] before generating full dump def item_pubdate(self, item): return item.created def item_description(self, item): return str(item) def item_guid(self, item): # set to none so that the output dsq:thread_identifier is empty return None def item_comments(self, item): return item.comments.all() def comment_user_name(self, comment): return str(comment.user) def comment_user_email(self, comment): return comment.user.email or str(comment.id) + '@bogus-email-address.com' def comment_user_url(self, comment): return None def comment_is_approved(self, comment): return 1
Python
0
@@ -113,17 +113,38 @@ t Person +, Place, Organisation %0A - %0A%0A# http @@ -315,81 +315,174 @@ -return Person.objects.all()%5B:5%5D # remove %5B:5%5D before generating full dump +list = %5B%5D%0A list.extend( Person.objects.all() )%0A list.extend( Organisation.objects.all() )%0A list.extend( Place.objects.all() )%0A return list %0A%0A
35c1b48864858ee4aa38ce39d69f52a7fbf73fc5
Allow specifying path to read dumps
classify.py
classify.py
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.base import TransformerMixin from sklearn.dummy import DummyRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn import cross_validation import numpy as np import sys import itertools import linecache import json import os import pickle from glob import glob from utils import Utilities # Make it possible to use classifiers and regressors that want dense matrices # as input with our TF.IDF vecotrizer transformer in the pipeline. # Source: # http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html class DenseTransformer(TransformerMixin): def transform(self, X, y=None, **fit_params): return X.todense() def fit_transform(self, X, y=None, **fit_params): self.fit(X, y, **fit_params) return self.transform(X) def fit(self, X, y=None, **fit_params): return self class Classifier(object): def __init__(self, group, model_file=""): self.dataset_name = "commit_comments-dump.2015-01-29" self.group = group self.display = (self.group == "id") self.model_file = model_file self.train_ids = set() def create_model(self, train=True, class_name=DummyRegressor, parameters={}, dense=False): trained = False if self.model_file != "" and os.path.isfile(self.model_file): with open(self.model_file, 'rb') as f: objects = pickle.load(f) models = objects[0:-1] models[0][1].tokenizer = Utilities.split self.train_ids = objects[-1][1] trained = True else: models = [] models.append(('tfidf', TfidfVectorizer(input='content', tokenizer=Utilities.split))) if dense: models.append(('to_dense', DenseTransformer())) models.append(('clf', class_name(**parameters))) self.regressor = Pipeline(models) if not trained and train: self.train() if self.model_file != "": with open(self.model_file, 'wb') as f: models[0][1].tokenizer = None models.append(('train_ids', self.train_ids)) pickle.dump(models, f) print("Wrote trained model to output file {}".format(self.model_file)) def get_train_data(self): # Collect the training data train_data = [] train_labels = [] with open(self.dataset_name + ".labeled.json", 'r') as f: i = 0 for data in Utilities.read_json(f, ['id','label'], self.group): i = i + 1 score = Utilities.label_to_score(data["label"]) if score is None: # unknown continue line = linecache.getline(self.dataset_name + '.json', i) json_object = json.loads(line) if json_object['id'] != data['id']: raise(ValueError('ID in label dataset does not match with dataset on line {}: {} vs {}'.format(i, data['id'], json_object['id']))) message = json_object['body'].replace('\r\n', '\n') self.train_ids.add(data['id']) train_data.append(message) train_labels.append(score) return (train_data, train_labels) def train(self): (train_data, train_labels) = self.get_train_data() # Train the regressor self.regressor.fit(train_data, train_labels) def cross_validate(self, folds=5): (train_data, train_labels) = self.get_train_data() # Crossvalidate the regressor on the labeled data return cross_validation.cross_val_score(self.regressor, train_data, train_labels, cv=folds) def output_cross_validate(self, folds=5): print('Performing cross-validation on {} folds'.format(folds)) results = self.cross_validate(folds) print('Folds: {}'.format(results)) print('Average: {}'.format(results.mean())) print('Standard deviation: {}'.format(results.std())) return results def split(self, data): if self.group != "score": self.test_group.append(data['group']) return data['message'] def filter(self, data): return data['id'] not in self.train_ids def predict(self, file): self.test_group = [] self.test_data = itertools.imap(self.split, itertools.ifilter(self.filter, Utilities.read_json(file, 'id', self.group))) if self.display: self.test_data = list(self.test_data) return self.regressor.predict(self.test_data) def output(self, predictions): for i in xrange(len(predictions)): group = self.test_group[i] if self.group != "score" else "" prediction = predictions[i] message = "" if self.display: message = "\t" + Utilities.get_colored_text(prediction, self.test_data[i]).replace('\n', ' ') g = "{}\t".format(group) if group != "" else "" print("{}{:.2f}{}".format(g, prediction, message)) def main(argv): group = argv[0] if len(argv) > 0 else "id" model_file = argv[1] if len(argv) > 1 else "" cv_folds = 0 if model_file.isdigit(): cv_folds = int(model_file) if model_file != '0' else 5 model_file = "" algorithm_class = RandomForestRegressor algorithm_parameters = { 'n_estimators': 100, 'n_jobs': 2, 'min_samples_split': 10 } classifier = Classifier(group, model_file) classifier.create_model(train=not cv_folds, class_name=algorithm_class, parameters=algorithm_parameters) if cv_folds > 0: classifier.output_cross_validate(cv_folds) else: if sys.stdin.isatty(): glob_pattern = 'commit_comments-dump.[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9].json' files = glob('[0-9]*/' + glob_pattern) + glob(glob_pattern) if not files: print("No commit comments JSON files found, cannot classify.") else: for name in files: with open(name, 'rb') as file: classifier.output(classifier.predict(file)) else: classifier.output(classifier.predict(sys.stdin)) if __name__ == "__main__": main(sys.argv[1:])
Python
0
@@ -5331,24 +5331,68 @@ %3E 1 else %22%22%0A + path = argv%5B2%5D if len(argv) %3E 2 else %22%22%0A cv_folds @@ -5939,24 +5939,106 @@ n.isatty():%0A + if path != %22%22 and path%5B-1%5D != %22/%22:%0A path = path + %22/%22%0A%0A @@ -6148,16 +6148,23 @@ = glob( +path + '%5B0-9%5D*/ @@ -6188,16 +6188,23 @@ + glob( +path + glob_pat
f375bef2a4f98abdc1ded20474d6b2538d3ed8c3
Fix typo
airbag_program/runner.py
airbag_program/runner.py
from subprocess import Popen, PIPE, DEVNULL, TimeoutExpired from airbag.status import ExitStatus from os import environ class ProgramTest(object): """docstring for Test""" def __init__( self, program, name='', arguments=[], expected=None, stdin=None, timeout=15, emptyenv=False, env=None, reference=None ): super(ProgramsTest, self).__init__() if program == '': raise ValueError('Missing program path') self.program = program self.name = name self.arguments = arguments self.expected = expected self.assertions = True self.input = stdin self.timeout = timeout self.reference = reference if emptyenv is True: self.env = env if env is not None else None else: self.env = environ.copy() if env is not None: for (k, v) in env: self.env[k] = v def run(self): self.arguments.insert(0, self.program) stdout = DEVNULL if 'output' in self.expected.keys() and len(self.expected['output']): stdout = PIPE stderr = DEVNULL if 'errors' in self.expected.keys() and len(self.expected['errors']): stderr = PIPE stdin = None if self.input is not None: stdin = PIPE if self.input.startswith('file:'): self.input = open(self.input[5:], 'rb').read() else: self.input = bytes(self.input, 'utf-8') try: p = Popen( self.arguments, env=self.env, stdin=stdin, stdout=stdout, stderr=stderr ) except FileNotFoundError: self.output('Couldn\'t find program {0}'.format(self.program)) return ExitStatus.noexec except PermissionError: self.output('Couldn\'t execute program {0}'.format(self.program)) return ExitStatus.noexec try: outs, errs = p.communicate(self.input, timeout=self.timeout) except TimeoutExpired: p.kill() if 'timeout' not in self.expected.keys(): if self.expected['timeout'] is not True: self.output('Exceeding {0}s timeout'.format(self.timeout)) return ExitStatus.timeout self.OK() return ExitStatus.ok if self.reference is not None and len(self.expected) is 0: pass if p.returncode < 0: self.output('Killed by signal {0}'.format(p.returncode * -1)) return ExitStatus.killed if 'output' in self.expected.keys(): if self.expected['output'].startswith('file:'): expected = open(self.expected['output'][5:], 'r').read() else: expected = self.expected['output'] if outs is not None and outs.decode('utf-8') != expected: self.KO() print('\tStandard output differ') print('\tExpected:\n{0}'.format(expected)) print('\tOutput:\n{0}'.format(outs.decode("utf-8"))) if 'errors' in self.expected.keys(): if self.expected['errors'].startswith('file:'): expected = open(self.expected['errors'][5:], 'r').read() else: expected = self.expected['errors'] if errs.decode('utf-8') != expected: self.KO() print('\tStandard error differ') print('\tExpected:\n{0}'.format(expected)) print('\tOutput:\n{0}'.format(errs.decode('utf-8'))) if 'returncode' in self.expected.keys(): if self.expected['returncode'] != p.returncode: self.KO() print('\tReturn codes differ') print('\tExpected: {0}'.format(self.expected['returncode'])) print('\tReturned: {0}'.format(p.returncode)) if self.assertions is True: self.OK() return ExitStatus.ok else: self.KO() return ExitStatus.finished def OK(self): self.output('OK') def KO(self): if self.assertions is True: self.assertions = False self.output('KO') def output(self, message): print('[{0}]{1}: {2}'.format(self.program, self.name, message)) def get_type(): return 'program'
Python
0.999999
@@ -415,17 +415,16 @@ (Program -s Test, se
fe998a48be769f6a957611584145706b71385cc9
Fix airflow jobs check cmd for TriggererJob (#19185)
airflow/jobs/__init__.py
airflow/jobs/__init__.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import airflow.jobs.backfill_job import airflow.jobs.base_job import airflow.jobs.local_task_job import airflow.jobs.scheduler_job # noqa
Python
0
@@ -912,16 +912,50 @@ uler_job +%0Aimport airflow.jobs.triggerer_job # noqa
d266de64cbcc7ed8672e9bb61cdb966870fccfdc
Use random.choice() & reduce len() duplication
alg_percentile_select.py
alg_percentile_select.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import random def percentile_select(ls, k): """Kth percentile selection algorithm. Just select the kth element, without caring about the relative ordering of the rest of them. The algorithm performs in place without allocating new memory for the three sublists using three pointers. Time complexity: O(n). """ v = random.sample(ls, 1)[0] idx_eq_v = [i for i, a in enumerate(ls) if a == v] idx_le_v = [i for i, a in enumerate(ls) if a < v] idx_ge_v = [i for i, a in enumerate(ls) if a > v] if k <= len(idx_le_v): le_v_ls = [ls[idx] for idx in idx_le_v] return percentile_select(le_v_ls, k) elif len(idx_le_v) < k <= len(idx_le_v) + len(idx_eq_v): return v elif k > len(idx_le_v) + len(idx_eq_v): ge_v_ls = [ls[idx] for idx in idx_ge_v] return percentile_select(ge_v_ls, k - len(idx_le_v) - len(idx_eq_v)) def main(): n = 100 ls = range(n) random.shuffle(ls) print('List: {}'.format(ls)) print('Get median by selection:') print(percentile_select(ls, n // 2)) print('Get min by selection:') print(percentile_select(ls, 1)) print('Get max by selection:') print(percentile_select(ls, n)) if __name__ == '__main__': main()
Python
0.000677
@@ -456,24 +456,18 @@ dom. -sample(ls, 1)%5B0%5D +choice(ls) %0A%09id @@ -621,16 +621,36 @@ v%5D%0A -%0A%09if k %3C +%09n_le = len(idx_le_v)%0A%09n_eq = le @@ -655,21 +655,36 @@ len(idx_ -l e +q _v) +%0A%0A%09if k %3C= n_le :%0A%09%09le_v @@ -768,110 +768,65 @@ lif -len(idx_le_v) %3C k %3C= len(idx_le_v) + len(idx_eq_v):%0A%09%09return v%0A%09elif k %3E len(idx_le_v) + len(idx_eq_v) +n_le %3C k %3C= n_le + n_eq:%0A%09%09return v%0A%09elif k %3E n_le + n_eq :%0A%09%09 @@ -909,37 +909,19 @@ k - -len(idx_le_v) - len(idx_eq_v) +n_le - n_eq )%0A%0A%0A @@ -980,37 +980,8 @@ ls)%0A -%09print('List: %7B%7D'.format(ls)) %0A%09pr
d1c16f90ca86bc1bd11a81f021d8317a82902a69
print annotation
ui/app/models.py
ui/app/models.py
from . import db class Spans(db.Model): __tablename__ = 'zipkin_spans' span_id = db.Column(db.Integer) parent_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) debug = db.Column(db.Integer) duration = db.Column(db.Integer) created_ts = db.Column(db.Integer) def __repr__(self): return '<Span %r>' % self.span_name class Annotations(db.Model): __tablename__ = 'zipkin_annotations' span_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) service_name = db.Column(db.String(255)) value = db.Column(db.Text) ipv4 = db.Column(db.Integer) port = db.Column(db.Integer) a_timestamp = db.Column(db.Integer) duration = db.Column(db.Integer)
Python
0.000009
@@ -786,28 +786,129 @@ ion = db.Column(db.Integer)%0A +%0A def __repr__(self):%0A return '%3CAnnotation %25r - %25r%3E' %25 (self.span_name, self.service_name)%0A
b38555ff465f59333f32c2bb556f6b7a236e288b
disable traceview for now
seabus/web/web.py
seabus/web/web.py
from flask import Flask import oboe from oboeware import OboeMiddleware from seabus.web.blueprint import blueprint from seabus.common.database import db from seabus.web.socketio import socketio def create_app(config=None): app = Flask(__name__) if config is not None: app.config.from_object('seabus.web.config.{}'.format(config)) else: app.config.from_object('seabus.web.config.Dev') socketio.init_app(app) app.register_blueprint(blueprint) db.init_app(app) #TODO: tv_app = OboeMiddleware(app) return app
Python
0
@@ -501,15 +501,8 @@ %0A - #TODO: tv_
57b5aa4acb3878916368eeb24d62f1508c9a9a43
Remove useless staticmethods.
linguist/models/base.py
linguist/models/base.py
# -*- coding: utf-8 -*- from django.db import models, IntegrityError from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from .. import settings class TranslationManager(models.Manager): def get_object_translations(self, obj, language=None): """ Shorcut method to retrieve translations for a given object. """ lookup = { 'identifier': obj.linguist_identifier, 'object_id': obj.pk, } if language is not None: lookup['language'] = language return self.get_queryset().filter(**lookup) def delete_object_translations(self, obj, language=None): """ Shortcut method to delete translations for a given object. """ self.get_object_translations(obj, language).delete() def get_languages(self): """ Returns all available languages. """ return (self.get_queryset() .values_list('language', flat=True) .distinct() .order_by('language')) @staticmethod def _sanitize_cached_translations(instances): """ Sanitizes cache by assigning instance pk in object_id field. """ for instance in instances: new_objects_keys = [] for key in instance._linguist.translations: object_id = key.split('_')[1] # identifier_objectid_language_fieldname if object_id == 'new-%s' % id(instance): new_objects_keys.append(key) for key in new_objects_keys: parts = key.split('_') parts[1] = '%s' % instance.pk new_key = '_'.join(parts) cached_obj = instance._linguist.translations.get(key) cached_obj.object_id = '%s' % instance.pk instance._linguist.translations[new_key] = cached_obj del instance._linguist.translations[key] keys_to_remove = [] for key, cached_obj in instance._linguist.translations.iteritems(): if not cached_obj.field_value: keys_to_remove.append(key) for key in keys_to_remove: del instance._linguist.translations[key] return instances @staticmethod def _filter_translations_to_save(instances): """ Takes a list of model instances and returns a tuple ``(to_create, to_update)``. """ to_create, to_update = [], [] for instance in instances: for key, cached_obj in instance._linguist.translations.iteritems(): if cached_obj.is_new: to_create.append((key, cached_obj)) else: to_update.append((key, cached_obj)) return (to_create, to_update) def _prepare_translations_to_save(self, to_create, to_update): """ Prepare objects for bulk create and update. """ create, update = [], [] if to_create: for key, cached_obj in to_create: create.append(self.model(**cached_obj.attrs)) if to_update: for key, cached_obj in to_update: update.append((cached_obj.lookup, cached_obj.attrs)) return create, update def save_translations(self, instances): """ Saves cached translations (cached in model instances as dictionaries). """ if not isinstance(instances, (list, tuple)): instances = [instances] instances = self._sanitize_cached_translations(instances) to_create, to_update = self._filter_translations_to_save(instances) create_objects, update_objects = self._prepare_translations_to_save(to_create, to_update) created = True if create_objects: try: self.bulk_create(create_objects) except IntegrityError: created = False if update_objects: for key, cached_obj in to_update: self.filter(**cached_obj.lookup).update(**cached_obj.attrs) if created: for key, cached_obj in to_create: cached_obj.is_new = False @python_2_unicode_compatible class Translation(models.Model): """ A Translation. """ identifier = models.CharField( max_length=100, verbose_name=_('identifier'), help_text=_('The registered model identifier.')) object_id = models.IntegerField( verbose_name=_('The object ID'), null=True, help_text=_('The object ID of this translation')) language = models.CharField( max_length=10, verbose_name=_('language'), choices=settings.SUPPORTED_LANGUAGES, default=settings.DEFAULT_LANGUAGE, help_text=_('The language for this translation')) field_name = models.CharField( max_length=100, verbose_name=_('field name'), help_text=_('The model field name for this translation.')) field_value = models.TextField( verbose_name=_('field value'), null=True, help_text=_('The translated content for the field.')) objects = TranslationManager() class Meta: abstract = True app_label = 'linguist' verbose_name = _('translation') verbose_name_plural = _('translations') unique_together = (('identifier', 'object_id', 'language', 'field_name'),) def __str__(self): return '%s:%s:%s:%s' % ( self.identifier, self.object_id, self.field_name, self.language)
Python
0.000002
@@ -1121,34 +1121,16 @@ age'))%0A%0A - @staticmethod%0A def @@ -1151,32 +1151,38 @@ ed_translations( +self, instances):%0A @@ -2377,26 +2377,8 @@ es%0A%0A - @staticmethod%0A @@ -2402,32 +2402,38 @@ lations_to_save( +self, instances):%0A
87fecc3c1ed8d25ce20159ff000e3c4228333642
Use whichever autocomplete we currently have, since it's better
taggit/forms.py
taggit/forms.py
import os from cStringIO import StringIO as SIO from django import forms from django.forms.util import flatatt from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from taggit.utils import parse_tags, edit_string_for_tags, clean_tag_string class TagWidget(forms.TextInput): tag_suggest = None def __init__(self, generate_tags=None): super(TagWidget, self).__init__() self.generate_tags = generate_tags if generate_tags is not None: from taggit.settings import TAGGIT_TAG_GENERATE_FUNC if TAGGIT_TAG_GENERATE_FUNC is None: raise LookupError('TAGGIT_TAG_GENERATE_FUNC is not defined!') def get_media(self): """ A method used to dynamically generate the media property, since we may not have the urls ready at the time of import, and then the reverse() call would fail. """ from django.forms.widgets import Media as _Media from django.core.urlresolvers import NoReverseMatch, reverse media = super(TagWidget, self).media try: media_url = reverse('taggit-static', kwargs={'path': ''}) except NoReverseMatch: # Nothing to add pass else: media.add_js([os.path.join(media_url, 'js', 'tagit.js'), os.path.join(media_url, 'js', 'jquery-ui.1.8.20.min.js'), os.path.join(media_url, 'js', 'taggit.js') ]) media.add_css({'all': ( os.path.join(media_url, 'css', 'tagit-dark-grey.css'), )}) return media media = property(get_media) def render_values(self, tags, attrs): builder = [] builder.append(u'<ul%s>' % flatatt(attrs)) for t in tags: t_name = t.tag.name t_attr = {'tagValue':t_name, 'value': t_name} builder.append(u'<li%s>%s</li>' %(flatatt(t_attr),t_name)) builder.append(u'</ul>') return ''.join(builder) def default_render(self, name, value, attrs): # Get the initial rendered box raw_value = value if value is not None and not isinstance(value, basestring): value = edit_string_for_tags([o.tag for o in value.select_related("tag")]) return super(TagWidget, self).render(name, value, attrs) def render(self, name, value, attrs=None): if attrs is None: attrs = {} # Get the original input field, which we will hide on render attrs.update({'class': 'taggit-tags'}) rendered = self.default_render(name, value, attrs) if self.generate_tags is not None: attrs.update({'data-tag-content-field': self.generate_tags}); # We need to get rid of the id if it's in attrs if 'id' in attrs: attrs['data-field-id' ] = attrs.pop('id') tag_list = self.render_values(value or [], attrs) if self.generate_tags is not None: attrs = flatatt({'data-field': name, 'class':'taggit-tag-suggest', 'type': 'button'}) tag_list += u"<button%s>Generate</button>" % attrs return mark_safe(rendered + tag_list) def _has_changed(self, initial, data): """ Whether the input value has changed. Used for recording in django_admin_log. Because initial is passed as a queryset, and data is a string, we need to turn the former into a string and run the latter through a function which cleans it up and sorts the tags in it. """ if initial is None: initial = "" elif hasattr(initial, 'select_related'): initial_vals = [o.tag for o in initial.select_related("tag")] initial = edit_string_for_tags(initial_vals) else: try: if len(initial) == 0: initial = "" else: initial = edit_string_for_tags(initial) except TypeError, ValueError: initial = "" data = clean_tag_string(data) return super(TagWidget, self)._has_changed(initial, data) class TagField(forms.CharField): widget = TagWidget def clean(self, value): value = super(TagField, self).clean(value) value = ','.join(SIO(value)) try: return parse_tags(value) except ValueError: raise forms.ValidationError(_("Please provide a comma-separated list of tags."))
Python
0.000001
@@ -1350,92 +1350,8 @@ '),%0A - os.path.join(media_url, 'js', 'jquery-ui.1.8.20.min.js'),%0A
1fb6dbd0fa674119d360a42cebdd77a421eae9f1
Add log message if core threshold is used in fallback policy
rasa/core/policies/fallback.py
rasa/core/policies/fallback.py
import json import logging import os from typing import Any, List, Text from rasa.core.actions.action import ACTION_LISTEN_NAME from rasa.core import utils from rasa.core.domain import Domain from rasa.core.policies.policy import Policy from rasa.core.trackers import DialogueStateTracker logger = logging.getLogger(__name__) class FallbackPolicy(Policy): """Policy which predicts fallback actions. A fallback can be triggered by a low confidence score on a NLU prediction or by a low confidence score on an action prediction. """ @staticmethod def _standard_featurizer(): return None def __init__(self, priority: int = 3, nlu_threshold: float = 0.3, core_threshold: float = 0.3, fallback_action_name: Text = "action_default_fallback" ) -> None: """Create a new Fallback policy. Args: core_threshold: if NLU confidence threshold is met, predict fallback action with confidence `core_threshold`. If this is the highest confidence in the ensemble, the fallback action will be executed. nlu_threshold: minimum threshold for NLU confidence. If intent prediction confidence is lower than this, predict fallback action with confidence 1.0. fallback_action_name: name of the action to execute as a fallback """ super(FallbackPolicy, self).__init__(priority=priority) self.nlu_threshold = nlu_threshold self.core_threshold = core_threshold self.fallback_action_name = fallback_action_name def train(self, training_trackers: List[DialogueStateTracker], domain: Domain, **kwargs: Any ) -> None: """Does nothing. This policy is deterministic.""" pass def should_nlu_fallback(self, nlu_confidence: float, last_action_name: Text ) -> bool: """Checks if fallback action should be predicted. Checks for: - predicted NLU confidence is lower than ``nlu_threshold`` - last action is action listen """ return (nlu_confidence < self.nlu_threshold and last_action_name == ACTION_LISTEN_NAME) def fallback_scores(self, domain, fallback_score=1.0): """Prediction scores used if a fallback is necessary.""" result = [0.0] * domain.num_actions idx = domain.index_for_action(self.fallback_action_name) result[idx] = fallback_score return result def predict_action_probabilities(self, tracker: DialogueStateTracker, domain: Domain) -> List[float]: """Predicts a fallback action. The fallback action is predicted if the NLU confidence is low or no other policy has a high-confidence prediction. """ nlu_data = tracker.latest_message.parse_data # if NLU interpreter does not provide confidence score, # it is set to 1.0 here in order # to not override standard behaviour nlu_confidence = nlu_data.get("intent", {}).get("confidence", 1.0) if tracker.latest_action_name == self.fallback_action_name: result = [0.0] * domain.num_actions idx = domain.index_for_action(ACTION_LISTEN_NAME) result[idx] = 1.0 elif self.should_nlu_fallback(nlu_confidence, tracker.latest_action_name): logger.debug("NLU confidence {} is lower " "than NLU threshold {}. " "".format(nlu_confidence, self.nlu_threshold)) result = self.fallback_scores(domain) else: # NLU confidence threshold is met, so # predict fallback action with confidence `core_threshold` # if this is the highest confidence in the ensemble, # the fallback action will be executed. result = self.fallback_scores(domain, self.core_threshold) return result def persist(self, path: Text) -> None: """Persists the policy to storage.""" config_file = os.path.join(path, 'fallback_policy.json') meta = { "priority": self.priority, "nlu_threshold": self.nlu_threshold, "core_threshold": self.core_threshold, "fallback_action_name": self.fallback_action_name } utils.create_dir_for_file(config_file) utils.dump_obj_as_json_to_file(config_file, meta) @classmethod def load(cls, path: Text) -> 'FallbackPolicy': meta = {} if os.path.exists(path): meta_path = os.path.join(path, "fallback_policy.json") if os.path.isfile(meta_path): meta = json.loads(utils.read_file(meta_path)) return cls(**meta)
Python
0.000001
@@ -4142,32 +4142,208 @@ ll be executed.%0A + logger.debug(%22Predict fallback action with confidence %22%0A %22'core_threshold' (%7B%7D). %22%0A %22%22.format(self.core_threshold))%0A resu
b5241e62cb7cc09b5d469f1cf3908fa1d7cedc21
Tweak the settings.
gobble/settings.py
gobble/settings.py
"""User configurable settings""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from os import getenv from logging import DEBUG, INFO from os.path import expanduser, join, abspath _home = abspath(join(expanduser('~'))) class Production(object): CONSOLE_LOG_LEVEL = INFO FILE_LOG_LEVEL = DEBUG FILE_LOG_FORMAT = '[%(asctime)s] [%(module)s] [%(levelname)s] %(message)s' CONSOLE_LOG_FORMAT = '[%(name)s] [%(module)s] [%(levelname)s] %(message)s' OS_URL = 'http://next.openspending.org' DATAPACKAGE_DETECTION_THRESHOLD = 1 VALIDATION_FEEDBACK_OPTIONS = ['message'] DATAFILE_HASHING_BLOCK_SIZE = 65536 CONFIG_DIR = join(_home, '.gobble') CONFIG_FILE = join(_home, '.gobble', 'settings.json') TOKEN_FILE = join(_home, '.gobble', 'token.json') LOG_FILE = join(_home, '.gobble', 'user.log') MOCK_REQUESTS = False LOCALHOST = ('127.0.0.1', 8001) class Development(Production): CONSOLE_LOG_LEVEL = DEBUG FILE_LOG_LEVEL = None LOG_FILE = None OS_URL = 'http://dev.openspending.org' CONFIG_DIR = join(_home, '.gobble.dev') CONFIG_FILE = join(_home, '.gobble.dev', 'config.json') TOKEN_FILE = join(_home, '.gobble.dev', 'token.json') MOCK_REQUESTS = bool(getenv('GOBBLE_MOCK_REQUESTS', False)) class Testing(Production): MOCK_REQUESTS = True
Python
0
@@ -418,16 +418,66 @@ bject):%0A + JSON_INDENT = 4%0A EXPANDED_LOG_STYLE = True%0A CONS @@ -492,20 +492,21 @@ LEVEL = -INFO +DEBUG %0A FIL @@ -636,37 +636,24 @@ '%5B%25(name)s%5D - %5B%25(module)s%5D %5B%25(levelnam @@ -1482,16 +1482,313 @@ False))%0A + CONSOLE_LOG_FORMAT = ('%5B%25(name)s%5D '%0A '%5B%25(asctime)s%5D '%0A '%5B%25(module)s%5D '%0A '%5B%25(funcName)s%5D '%0A '%5B%25(lineno)d%5D '%0A '%5B%25(levelname)s%5D '%0A '%25(message)s')%0A %0A%0Aclass
2050017ced613f5c0282dcfaf07494b8dbcc8e41
Update ipc_lista2.05.py
lista2/ipc_lista2.05.py
lista2/ipc_lista2.05.py
#ipc_lista2.05 #Professor: Jucimar Junior #Any Mendes Carvalho - 1615310044 # # # # #Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar: #--A mensagem "Aprovado", se a média alcançada for maior ou igual a sete; #--A mensagem "Reprovado", se a média for menor que sete; #--A mensagem "Aprovado com Distincao", se a média for igual a dez. n1 = int(input("Insira a primeira nota: ")) n2 = int(input("Insira a segunda nota: ")) media = (n1+n2)
Python
0
@@ -517,9 +517,11 @@ (n1+n2) +/2 %0A
e1fb17476770620546d0bd244b35591b99ba6ea7
Revert 7392f01f for pkg_resources/extern. 3.3 is the right signal there.
pkg_resources/extern/__init__.py
pkg_resources/extern/__init__.py
import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def find_module(self, fullname, path=None): """ Return self when fullname starts with root_name and the target module is one vendored through this importer. """ root, base, target = fullname.partition(self.root_name + '.') if root: return if not any(map(target.startswith, self.vendored_names)): return return self def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod # mysterious hack: # Remove the reference to the extant package/module # on later Python versions to cause relative imports # in the vendor package to resolve the same modules # as those going through this importer. if sys.version_info.major >= 3: del sys.modules[extant] return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install()
Python
0
@@ -1753,19 +1753,17 @@ info -.major %3E= 3 + %3E (3, 3) :%0A
f3da1fab9af2279182a09922aae00fcee73a92ee
Fix imports for Django >= 1.6
goog/middleware.py
goog/middleware.py
from django.conf import settings from django.conf.urls.defaults import patterns, include import goog.urls from goog import utils class GoogDevelopmentMiddleware(object): def devmode_enabled(self, request): """Returns True iff the devmode is enabled.""" return utils.is_devmode() def process_request(self, request): # This urlconf patching is inspired by debug_toolbar. # https://github.com/robhudson/django-debug-toolbar if self.devmode_enabled(request): original_urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if original_urlconf != 'goog.urls': goog.urls.urlpatterns += patterns( '', ('', include(original_urlconf)), ) request.urlconf = 'goog.urls'
Python
0
@@ -26,16 +26,25 @@ ettings%0A +try:%0A from dja @@ -90,16 +90,104 @@ include +%0Aexcept ImportError: # Django %3E= 1.6%0A from django.conf.urls import patterns, include %0A%0Aimport
473d4c16d7865e608bc3732ba82e448e245d8339
Version Bump
littlepython/version.py
littlepython/version.py
version = '0.4.8'
Python
0.000001
@@ -12,7 +12,7 @@ 0.4. -8 +9 '%0A