file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = os.getenv('APP_SECRET_KEY', '')
# db config
DB_PORT = os.getenv('DB_PORT', '')
DB_HOST = os.getenv('DB_HOST', '')
DB_ROLE = os.getenv('DB_ROLE', '')
# TODO: abstract auth stuff to kubernetes manifests
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_NAME = os.getenv('DB_NAME', '')
SQLALCHEMY_DATABASE_URI = 'postgresql://{}:{}@{}:{}/{}'.format(
DB_ROLE, DB_PASSWORD, DB_HOST, str(DB_PORT), DB_NAME)
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
|
class TestingConfig(Config):
TESTING = True
| DEVELOPMENT = True
DEBUG = True | identifier_body |
project_compassion.py | ##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from ..tools.wp_sync import WPSync
class CompassionProject(models.Model):
| _inherit = "compassion.project"
def suspend_funds(self):
""" Remove children from the website when FCP Suspension occurs. """
children = self.env["compassion.child"].search(
[("project_id", "in", self.ids), ("state", "=", "I")]
)
if children:
wp_config = self.env["wordpress.configuration"].get_config()
wp = WPSync(wp_config)
wp.remove_children(children)
return super(CompassionProject, self).suspend_funds() | identifier_body |
|
project_compassion.py | ##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
############################################################################## |
class CompassionProject(models.Model):
_inherit = "compassion.project"
def suspend_funds(self):
""" Remove children from the website when FCP Suspension occurs. """
children = self.env["compassion.child"].search(
[("project_id", "in", self.ids), ("state", "=", "I")]
)
if children:
wp_config = self.env["wordpress.configuration"].get_config()
wp = WPSync(wp_config)
wp.remove_children(children)
return super(CompassionProject, self).suspend_funds() | from odoo import models
from ..tools.wp_sync import WPSync
| random_line_split |
project_compassion.py | ##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from ..tools.wp_sync import WPSync
class CompassionProject(models.Model):
_inherit = "compassion.project"
def suspend_funds(self):
""" Remove children from the website when FCP Suspension occurs. """
children = self.env["compassion.child"].search(
[("project_id", "in", self.ids), ("state", "=", "I")]
)
if children:
|
return super(CompassionProject, self).suspend_funds()
| wp_config = self.env["wordpress.configuration"].get_config()
wp = WPSync(wp_config)
wp.remove_children(children) | conditional_block |
project_compassion.py | ##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from ..tools.wp_sync import WPSync
class CompassionProject(models.Model):
_inherit = "compassion.project"
def | (self):
""" Remove children from the website when FCP Suspension occurs. """
children = self.env["compassion.child"].search(
[("project_id", "in", self.ids), ("state", "=", "I")]
)
if children:
wp_config = self.env["wordpress.configuration"].get_config()
wp = WPSync(wp_config)
wp.remove_children(children)
return super(CompassionProject, self).suspend_funds()
| suspend_funds | identifier_name |
example.ts | // βͺ Initialization
let canvas = document.getElementById('game') as HTMLCanvasElement;
var gl = canvas.getContext('webgl');
if (!gl) {
throw new Error('Could not create WebGL Context!');
}
// π² Create NDC Space Quad (attribute vec2 position)
let ndcQuad = [ 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ];
let indices = [ 0, 1, 2, 1, 2, 3 ];
// Create Buffers
let dataBuffer = gl.createBuffer();
let indexBuffer = gl.createBuffer();
// Bind Data/Indices to Buffers
gl.bindBuffer(gl.ARRAY_BUFFER, dataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(ndcQuad), gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
function createProgram(vsSource: string, fsSource: string) {
let vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vs, vsSource);
gl.compileShader(vs);
if (!gl.getShaderParameter(vs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(vs));
}
let fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fs, fsSource);
gl.compileShader(fs);
if (!gl.getShaderParameter(fs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(fs));
}
let program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
}
return { vs, fs, program };
}
let vs = `
attribute vec2 aPosition;
varying vec2 vFragCoord;
void main()
{
vFragCoord = (0.5 * aPosition) + vec2(0.5, 0.5);
vFragCoord.y = 1.0 - vFragCoord.y;
gl_Position = vec4(aPosition, 0.0, 1.0);
}
`;
let fs = `
precision mediump float;
varying vec2 vFragCoord;
uniform sampler2D tBottomLayer;
uniform sampler2D tTopLayer;
// π
Color Dodge
vec4 colorDodge(vec4 col, vec4 blend)
{
return vec4(mix(col.rgb / clamp(1.0 - blend.rgb, 0.00001, 1.0), col.rgb, blend.a), col.a);
}
void main()
{
vec2 uv = vFragCoord;
vec4 outColor = vec4(0.0, 0.0, 0.0, 0.0);
vec4 bottomColor = texture2D(tBottomLayer, uv);
vec4 topColor = texture2D(tTopLayer, uv);
outColor = colorDodge(bottomColor, topColor);
gl_FragColor = outColor;
}
`;
let { program } = createProgram(vs, fs);
// πΌοΈ Load Textures
function loadTexture(url: string) {
let tex = | ayer = loadTexture('https://alain.xyz/blog/image-editor-effects/assets/cover.jpg');
let topLayer = loadTexture('https://alain.xyz/blog/unreal-engine-architecture/assets/cover.png');
// π Draw
function draw() {
// Bind Shaders
gl.useProgram(program);
// Bind Vertex Layout
let loc = gl.getAttribLocation(program, 'aPosition');
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 4 * 2, 0);
gl.enableVertexAttribArray(loc);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Bind Uniforms
var shaderTexNumber = 0;
let bottomLayerLoc = gl.getUniformLocation(program, 'tBottomLayer');
gl.uniform1i(bottomLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, bottomLayer);
shaderTexNumber++;
let topLayerLoc = gl.getUniformLocation(program, 'tTopLayer');
gl.uniform1i(topLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, topLayer);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
}
let resizeHandler = () => {
canvas.width = innerWidth;
canvas.height = innerHeight;
gl.viewport(0, 0, innerWidth, innerHeight);
draw();
};
window.addEventListener('resize', resizeHandler);
resizeHandler();
function update()
{
draw();
requestAnimationFrame(update)
}
requestAnimationFrame(update); | gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
const pixel = new Uint8Array([ 0, 0, 0, 255 ]);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixel);
let img = new Image();
img.src = url;
img.onload = () => {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.generateMipmap(gl.TEXTURE_2D);
};
return tex;
}
let bottomL | identifier_body |
example.ts | // βͺ Initialization
let canvas = document.getElementById('game') as HTMLCanvasElement;
var gl = canvas.getContext('webgl');
if (!gl) {
throw new Error('Could not create WebGL Context!');
}
// π² Create NDC Space Quad (attribute vec2 position)
let ndcQuad = [ 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ];
let indices = [ 0, 1, 2, 1, 2, 3 ];
// Create Buffers
let dataBuffer = gl.createBuffer();
let indexBuffer = gl.createBuffer();
// Bind Data/Indices to Buffers
gl.bindBuffer(gl.ARRAY_BUFFER, dataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(ndcQuad), gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
function createProgram(vsSource: string, fsSource: string) {
let vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vs, vsSource);
gl.compileShader(vs);
if (!gl.getShaderParameter(vs, gl.COMPILE_STATUS)) {
c | t fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fs, fsSource);
gl.compileShader(fs);
if (!gl.getShaderParameter(fs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(fs));
}
let program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
}
return { vs, fs, program };
}
let vs = `
attribute vec2 aPosition;
varying vec2 vFragCoord;
void main()
{
vFragCoord = (0.5 * aPosition) + vec2(0.5, 0.5);
vFragCoord.y = 1.0 - vFragCoord.y;
gl_Position = vec4(aPosition, 0.0, 1.0);
}
`;
let fs = `
precision mediump float;
varying vec2 vFragCoord;
uniform sampler2D tBottomLayer;
uniform sampler2D tTopLayer;
// π
Color Dodge
vec4 colorDodge(vec4 col, vec4 blend)
{
return vec4(mix(col.rgb / clamp(1.0 - blend.rgb, 0.00001, 1.0), col.rgb, blend.a), col.a);
}
void main()
{
vec2 uv = vFragCoord;
vec4 outColor = vec4(0.0, 0.0, 0.0, 0.0);
vec4 bottomColor = texture2D(tBottomLayer, uv);
vec4 topColor = texture2D(tTopLayer, uv);
outColor = colorDodge(bottomColor, topColor);
gl_FragColor = outColor;
}
`;
let { program } = createProgram(vs, fs);
// πΌοΈ Load Textures
function loadTexture(url: string) {
let tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
const pixel = new Uint8Array([ 0, 0, 0, 255 ]);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixel);
let img = new Image();
img.src = url;
img.onload = () => {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.generateMipmap(gl.TEXTURE_2D);
};
return tex;
}
let bottomLayer = loadTexture('https://alain.xyz/blog/image-editor-effects/assets/cover.jpg');
let topLayer = loadTexture('https://alain.xyz/blog/unreal-engine-architecture/assets/cover.png');
// π Draw
function draw() {
// Bind Shaders
gl.useProgram(program);
// Bind Vertex Layout
let loc = gl.getAttribLocation(program, 'aPosition');
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 4 * 2, 0);
gl.enableVertexAttribArray(loc);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Bind Uniforms
var shaderTexNumber = 0;
let bottomLayerLoc = gl.getUniformLocation(program, 'tBottomLayer');
gl.uniform1i(bottomLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, bottomLayer);
shaderTexNumber++;
let topLayerLoc = gl.getUniformLocation(program, 'tTopLayer');
gl.uniform1i(topLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, topLayer);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
}
let resizeHandler = () => {
canvas.width = innerWidth;
canvas.height = innerHeight;
gl.viewport(0, 0, innerWidth, innerHeight);
draw();
};
window.addEventListener('resize', resizeHandler);
resizeHandler();
function update()
{
draw();
requestAnimationFrame(update)
}
requestAnimationFrame(update); | onsole.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(vs));
}
le | conditional_block |
example.ts | // βͺ Initialization
let canvas = document.getElementById('game') as HTMLCanvasElement;
var gl = canvas.getContext('webgl');
if (!gl) {
throw new Error('Could not create WebGL Context!');
}
// π² Create NDC Space Quad (attribute vec2 position)
let ndcQuad = [ 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ];
let indices = [ 0, 1, 2, 1, 2, 3 ];
// Create Buffers
let dataBuffer = gl.createBuffer();
let indexBuffer = gl.createBuffer();
// Bind Data/Indices to Buffers
gl.bindBuffer(gl.ARRAY_BUFFER, dataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(ndcQuad), gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
function createProgram(vsSource: string, fsSource: string) {
let vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vs, vsSource);
gl.compileShader(vs);
if (!gl.getShaderParameter(vs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(vs));
}
let fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fs, fsSource);
gl.compileShader(fs);
if (!gl.getShaderParameter(fs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(fs));
}
let program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
}
return { vs, fs, program };
}
let vs = `
attribute vec2 aPosition;
varying vec2 vFragCoord;
void main()
{
vFragCoord = (0.5 * aPosition) + vec2(0.5, 0.5);
vFragCoord.y = 1.0 - vFragCoord.y;
gl_Position = vec4(aPosition, 0.0, 1.0);
}
`;
let fs = `
precision mediump float;
varying vec2 vFragCoord;
uniform sampler2D tBottomLayer;
uniform sampler2D tTopLayer;
// π
Color Dodge
vec4 colorDodge(vec4 col, vec4 blend)
{
return vec4(mix(col.rgb / clamp(1.0 - blend.rgb, 0.00001, 1.0), col.rgb, blend.a), col.a);
}
void main()
{
vec2 uv = vFragCoord;
vec4 outColor = vec4(0.0, 0.0, 0.0, 0.0);
vec4 bottomColor = texture2D(tBottomLayer, uv);
vec4 topColor = texture2D(tTopLayer, uv);
outColor = colorDodge(bottomColor, topColor);
gl_FragColor = outColor;
}
`;
let { program } = createProgram(vs, fs);
// πΌοΈ Load Textures
function loadTexture(url: string) {
let tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
const pixel = new Uint8Array([ 0, 0, 0, 255 ]);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixel);
let img = new Image();
img.src = url;
img.onload = () => {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.generateMipmap(gl.TEXTURE_2D);
};
return tex;
}
let bottomLayer = loadTexture('https://alain.xyz/blog/image-editor-effects/assets/cover.jpg');
let topLayer = loadTexture('https://alain.xyz/blog/unreal-engine-architecture/assets/cover.png');
// π Draw
function draw() {
// Bin | aders
gl.useProgram(program);
// Bind Vertex Layout
let loc = gl.getAttribLocation(program, 'aPosition');
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 4 * 2, 0);
gl.enableVertexAttribArray(loc);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Bind Uniforms
var shaderTexNumber = 0;
let bottomLayerLoc = gl.getUniformLocation(program, 'tBottomLayer');
gl.uniform1i(bottomLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, bottomLayer);
shaderTexNumber++;
let topLayerLoc = gl.getUniformLocation(program, 'tTopLayer');
gl.uniform1i(topLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, topLayer);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
}
let resizeHandler = () => {
canvas.width = innerWidth;
canvas.height = innerHeight;
gl.viewport(0, 0, innerWidth, innerHeight);
draw();
};
window.addEventListener('resize', resizeHandler);
resizeHandler();
function update()
{
draw();
requestAnimationFrame(update)
}
requestAnimationFrame(update); | d Sh | identifier_name |
example.ts | // βͺ Initialization
let canvas = document.getElementById('game') as HTMLCanvasElement;
var gl = canvas.getContext('webgl');
if (!gl) {
throw new Error('Could not create WebGL Context!');
}
// π² Create NDC Space Quad (attribute vec2 position)
let ndcQuad = [ 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ];
let indices = [ 0, 1, 2, 1, 2, 3 ];
// Create Buffers
let dataBuffer = gl.createBuffer();
let indexBuffer = gl.createBuffer();
// Bind Data/Indices to Buffers
gl.bindBuffer(gl.ARRAY_BUFFER, dataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(ndcQuad), gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
function createProgram(vsSource: string, fsSource: string) {
let vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vs, vsSource);
gl.compileShader(vs);
if (!gl.getShaderParameter(vs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(vs));
}
let fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fs, fsSource);
gl.compileShader(fs);
if (!gl.getShaderParameter(fs, gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shader: ' + gl.getShaderInfoLog(fs));
}
let program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
}
return { vs, fs, program };
}
let vs = `
attribute vec2 aPosition;
varying vec2 vFragCoord;
void main()
{
vFragCoord = (0.5 * aPosition) + vec2(0.5, 0.5);
vFragCoord.y = 1.0 - vFragCoord.y;
gl_Position = vec4(aPosition, 0.0, 1.0);
}
`;
let fs = `
precision mediump float;
varying vec2 vFragCoord;
uniform sampler2D tBottomLayer; | vec4 colorDodge(vec4 col, vec4 blend)
{
return vec4(mix(col.rgb / clamp(1.0 - blend.rgb, 0.00001, 1.0), col.rgb, blend.a), col.a);
}
void main()
{
vec2 uv = vFragCoord;
vec4 outColor = vec4(0.0, 0.0, 0.0, 0.0);
vec4 bottomColor = texture2D(tBottomLayer, uv);
vec4 topColor = texture2D(tTopLayer, uv);
outColor = colorDodge(bottomColor, topColor);
gl_FragColor = outColor;
}
`;
let { program } = createProgram(vs, fs);
// πΌοΈ Load Textures
function loadTexture(url: string) {
let tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
const pixel = new Uint8Array([ 0, 0, 0, 255 ]);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixel);
let img = new Image();
img.src = url;
img.onload = () => {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.generateMipmap(gl.TEXTURE_2D);
};
return tex;
}
let bottomLayer = loadTexture('https://alain.xyz/blog/image-editor-effects/assets/cover.jpg');
let topLayer = loadTexture('https://alain.xyz/blog/unreal-engine-architecture/assets/cover.png');
// π Draw
function draw() {
// Bind Shaders
gl.useProgram(program);
// Bind Vertex Layout
let loc = gl.getAttribLocation(program, 'aPosition');
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 4 * 2, 0);
gl.enableVertexAttribArray(loc);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Bind Uniforms
var shaderTexNumber = 0;
let bottomLayerLoc = gl.getUniformLocation(program, 'tBottomLayer');
gl.uniform1i(bottomLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, bottomLayer);
shaderTexNumber++;
let topLayerLoc = gl.getUniformLocation(program, 'tTopLayer');
gl.uniform1i(topLayerLoc, shaderTexNumber);
gl.activeTexture(gl.TEXTURE0 + shaderTexNumber);
gl.bindTexture(gl.TEXTURE_2D, topLayer);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
}
let resizeHandler = () => {
canvas.width = innerWidth;
canvas.height = innerHeight;
gl.viewport(0, 0, innerWidth, innerHeight);
draw();
};
window.addEventListener('resize', resizeHandler);
resizeHandler();
function update()
{
draw();
requestAnimationFrame(update)
}
requestAnimationFrame(update); | uniform sampler2D tTopLayer;
// π
Color Dodge | random_line_split |
jsonSchema.js | define(['../Property', '../Model', 'dojo/_base/declare', 'json-schema/lib/validate'],
function (Property, Model, declare, jsonSchemaValidator) {
// module:
// dstore/extensions/JsonSchema
// summary:
// This module generates a dstore schema from a JSON Schema to enabled validation of objects
// and property changes with JSON Schema
return function (jsonSchema) {
// create the schema that can be used by dstore/Model
var modelSchema = {};
var properties = jsonSchema.properties || jsonSchema;
// the validation function, this can be used for all the properties
function checkForErrors() {
var value = this.valueOf();
var key = this.name;
// get the current value and test it against the property's definition
var validation = jsonSchemaValidator.validate(value, properties[key]);
// set any errors
var errors = validation.errors;
if (errors) {
// assign the property names to the errors
for (var i = 0; i < errors.length; i++) {
errors[i].property = key;
}
}
return errors;
}
// iterate through the schema properties, creating property validators
for (var i in properties) {
var jsDefinition = properties[i];
var definition = modelSchema[i] = new Property({ | checkForErrors: checkForErrors
});
if (typeof jsDefinition.type === 'string') {
// copy the type so it can be used for coercion
definition.type = jsDefinition.type;
}
if (typeof jsDefinition['default'] === 'string') {
// and copy the default
definition['default'] = jsDefinition['default'];
}
}
return declare(Model, {
schema: modelSchema
});
};
}); | random_line_split |
|
jsonSchema.js | define(['../Property', '../Model', 'dojo/_base/declare', 'json-schema/lib/validate'],
function (Property, Model, declare, jsonSchemaValidator) {
// module:
// dstore/extensions/JsonSchema
// summary:
// This module generates a dstore schema from a JSON Schema to enabled validation of objects
// and property changes with JSON Schema
return function (jsonSchema) {
// create the schema that can be used by dstore/Model
var modelSchema = {};
var properties = jsonSchema.properties || jsonSchema;
// the validation function, this can be used for all the properties
function | () {
var value = this.valueOf();
var key = this.name;
// get the current value and test it against the property's definition
var validation = jsonSchemaValidator.validate(value, properties[key]);
// set any errors
var errors = validation.errors;
if (errors) {
// assign the property names to the errors
for (var i = 0; i < errors.length; i++) {
errors[i].property = key;
}
}
return errors;
}
// iterate through the schema properties, creating property validators
for (var i in properties) {
var jsDefinition = properties[i];
var definition = modelSchema[i] = new Property({
checkForErrors: checkForErrors
});
if (typeof jsDefinition.type === 'string') {
// copy the type so it can be used for coercion
definition.type = jsDefinition.type;
}
if (typeof jsDefinition['default'] === 'string') {
// and copy the default
definition['default'] = jsDefinition['default'];
}
}
return declare(Model, {
schema: modelSchema
});
};
}); | checkForErrors | identifier_name |
jsonSchema.js | define(['../Property', '../Model', 'dojo/_base/declare', 'json-schema/lib/validate'],
function (Property, Model, declare, jsonSchemaValidator) {
// module:
// dstore/extensions/JsonSchema
// summary:
// This module generates a dstore schema from a JSON Schema to enabled validation of objects
// and property changes with JSON Schema
return function (jsonSchema) {
// create the schema that can be used by dstore/Model
var modelSchema = {};
var properties = jsonSchema.properties || jsonSchema;
// the validation function, this can be used for all the properties
function checkForErrors() {
var value = this.valueOf();
var key = this.name;
// get the current value and test it against the property's definition
var validation = jsonSchemaValidator.validate(value, properties[key]);
// set any errors
var errors = validation.errors;
if (errors) {
// assign the property names to the errors
for (var i = 0; i < errors.length; i++) |
}
return errors;
}
// iterate through the schema properties, creating property validators
for (var i in properties) {
var jsDefinition = properties[i];
var definition = modelSchema[i] = new Property({
checkForErrors: checkForErrors
});
if (typeof jsDefinition.type === 'string') {
// copy the type so it can be used for coercion
definition.type = jsDefinition.type;
}
if (typeof jsDefinition['default'] === 'string') {
// and copy the default
definition['default'] = jsDefinition['default'];
}
}
return declare(Model, {
schema: modelSchema
});
};
}); | {
errors[i].property = key;
} | conditional_block |
jsonSchema.js | define(['../Property', '../Model', 'dojo/_base/declare', 'json-schema/lib/validate'],
function (Property, Model, declare, jsonSchemaValidator) {
// module:
// dstore/extensions/JsonSchema
// summary:
// This module generates a dstore schema from a JSON Schema to enabled validation of objects
// and property changes with JSON Schema
return function (jsonSchema) {
// create the schema that can be used by dstore/Model
var modelSchema = {};
var properties = jsonSchema.properties || jsonSchema;
// the validation function, this can be used for all the properties
function checkForErrors() |
// iterate through the schema properties, creating property validators
for (var i in properties) {
var jsDefinition = properties[i];
var definition = modelSchema[i] = new Property({
checkForErrors: checkForErrors
});
if (typeof jsDefinition.type === 'string') {
// copy the type so it can be used for coercion
definition.type = jsDefinition.type;
}
if (typeof jsDefinition['default'] === 'string') {
// and copy the default
definition['default'] = jsDefinition['default'];
}
}
return declare(Model, {
schema: modelSchema
});
};
}); | {
var value = this.valueOf();
var key = this.name;
// get the current value and test it against the property's definition
var validation = jsonSchemaValidator.validate(value, properties[key]);
// set any errors
var errors = validation.errors;
if (errors) {
// assign the property names to the errors
for (var i = 0; i < errors.length; i++) {
errors[i].property = key;
}
}
return errors;
} | identifier_body |
setup.py | #!/usr/bin/env python
"""
Hiveary
https://hiveary.com
Licensed under Simplified BSD License (see LICENSE)
(C) Hiveary, Inc. 2013-2014 all rights reserved
"""
import platform
import sys
from hiveary import __version__ as version
current_platform = platform.system()
FROZEN_NAME = 'hiveary-agent'
AUTHOR = "Hiveary"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = "Hiveary Monitoring Agent"
LICENSE = "Simplified BSD"
URL = "http://hiveary.com"
# OS-specific setup
if 'bdist_esky' in sys.argv and current_platform == 'Windows':
# Use esky/cxfreeze to build the agent and py2exe to build the service
from esky.bdist_esky import Executable
from glob import glob
import os
import py2exe # noqa
import setuptools
import shutil
modules = [
'kombu.transport.pyamqp',
'kombu.transport.base',
'kombu.transport.amqplib',
]
sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT')
# Add in Visual Studio C++ compiler library
data_files = [
('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')),
r'hiveary\ca-bundle.pem',
('monitors', glob(r'monitors\*.py'))
]
script = Executable('hiveary-agent', gui_only=False)
options = {
'bdist_esky': {
'freezer_module': 'cxfreeze',
'includes': modules,
}
}
# Build the agent
setuptools.setup(name=FROZEN_NAME,
version=version,
scripts=[script],
options=options,
data_files=data_files,
)
sys.argv.remove('bdist_esky')
sys.argv.append('py2exe')
# used for the versioninfo resource
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
self.company_name = 'Hiveary'
self.name = "HivearyService"
script = Target(
description='Hiveary Agent Service Launcher',
modules=["HivearyService"],
cmdline_style='pywin32')
data_files = []
# Build the service
setuptools.setup(name='HivearyService',
version=version,
options={'py2exe': {}},
service=[script]
)
# python27.dll will be available at the root once the esky zip is extracted,
# so we can remove it now
os.remove(r'dist\python27.dll')
shutil.rmtree('build')
else:
| ('/etc/hiveary', ['hiveary.conf.example', 'README.md']),
('/etc/hiveary/init', ['initd/hiveary-agent']),
('/etc/hiveary/systemd', ['arch/hiveary-agent.service']),
('/usr/lib/hiveary', ['monitors/resources.py']),
]
setup(name=FROZEN_NAME,
version=version,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
include_package_data=True,
data_files=data_files,
install_requires=install_requires,
packages=find_packages(),
scripts=['hiveary-agent']
)
| try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Include all files from the package.
install_requires = [
'amqplib>=1.0.2',
'kombu>=3.0.8',
'netifaces-merged>=0.9.0',
'oauth2>=1.5.211',
'psutil>=1.1.0',
'simplejson>=3.0.5',
'Twisted>=13.2.0',
'impala>=0.1.1',
]
data_files = [ | conditional_block |
setup.py | #!/usr/bin/env python
"""
Hiveary
https://hiveary.com
Licensed under Simplified BSD License (see LICENSE)
(C) Hiveary, Inc. 2013-2014 all rights reserved
"""
import platform
import sys
from hiveary import __version__ as version
current_platform = platform.system()
FROZEN_NAME = 'hiveary-agent'
AUTHOR = "Hiveary"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = "Hiveary Monitoring Agent"
LICENSE = "Simplified BSD"
URL = "http://hiveary.com"
# OS-specific setup
if 'bdist_esky' in sys.argv and current_platform == 'Windows':
# Use esky/cxfreeze to build the agent and py2exe to build the service
from esky.bdist_esky import Executable
from glob import glob
import os
import py2exe # noqa
import setuptools
import shutil
modules = [
'kombu.transport.pyamqp',
'kombu.transport.base',
'kombu.transport.amqplib',
]
sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT')
# Add in Visual Studio C++ compiler library
data_files = [
('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')),
r'hiveary\ca-bundle.pem',
('monitors', glob(r'monitors\*.py'))
]
script = Executable('hiveary-agent', gui_only=False)
options = {
'bdist_esky': {
'freezer_module': 'cxfreeze',
'includes': modules,
}
}
# Build the agent
setuptools.setup(name=FROZEN_NAME,
version=version,
scripts=[script],
options=options,
data_files=data_files,
)
sys.argv.remove('bdist_esky')
sys.argv.append('py2exe')
# used for the versioninfo resource
class Target(object):
def __init__(self, **kw):
|
script = Target(
description='Hiveary Agent Service Launcher',
modules=["HivearyService"],
cmdline_style='pywin32')
data_files = []
# Build the service
setuptools.setup(name='HivearyService',
version=version,
options={'py2exe': {}},
service=[script]
)
# python27.dll will be available at the root once the esky zip is extracted,
# so we can remove it now
os.remove(r'dist\python27.dll')
shutil.rmtree('build')
else:
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Include all files from the package.
install_requires = [
'amqplib>=1.0.2',
'kombu>=3.0.8',
'netifaces-merged>=0.9.0',
'oauth2>=1.5.211',
'psutil>=1.1.0',
'simplejson>=3.0.5',
'Twisted>=13.2.0',
'impala>=0.1.1',
]
data_files = [
('/etc/hiveary', ['hiveary.conf.example', 'README.md']),
('/etc/hiveary/init', ['initd/hiveary-agent']),
('/etc/hiveary/systemd', ['arch/hiveary-agent.service']),
('/usr/lib/hiveary', ['monitors/resources.py']),
]
setup(name=FROZEN_NAME,
version=version,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
include_package_data=True,
data_files=data_files,
install_requires=install_requires,
packages=find_packages(),
scripts=['hiveary-agent']
)
| self.__dict__.update(kw)
self.version = version
self.company_name = 'Hiveary'
self.name = "HivearyService" | identifier_body |
setup.py | #!/usr/bin/env python
"""
Hiveary
https://hiveary.com
Licensed under Simplified BSD License (see LICENSE)
(C) Hiveary, Inc. 2013-2014 all rights reserved
"""
import platform
import sys
from hiveary import __version__ as version
current_platform = platform.system()
FROZEN_NAME = 'hiveary-agent'
AUTHOR = "Hiveary"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = "Hiveary Monitoring Agent"
LICENSE = "Simplified BSD"
URL = "http://hiveary.com"
# OS-specific setup
if 'bdist_esky' in sys.argv and current_platform == 'Windows':
# Use esky/cxfreeze to build the agent and py2exe to build the service
from esky.bdist_esky import Executable
from glob import glob
import os
import py2exe # noqa
import setuptools
import shutil
modules = [
'kombu.transport.pyamqp',
'kombu.transport.base',
'kombu.transport.amqplib',
]
| ('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')),
r'hiveary\ca-bundle.pem',
('monitors', glob(r'monitors\*.py'))
]
script = Executable('hiveary-agent', gui_only=False)
options = {
'bdist_esky': {
'freezer_module': 'cxfreeze',
'includes': modules,
}
}
# Build the agent
setuptools.setup(name=FROZEN_NAME,
version=version,
scripts=[script],
options=options,
data_files=data_files,
)
sys.argv.remove('bdist_esky')
sys.argv.append('py2exe')
# used for the versioninfo resource
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
self.company_name = 'Hiveary'
self.name = "HivearyService"
script = Target(
description='Hiveary Agent Service Launcher',
modules=["HivearyService"],
cmdline_style='pywin32')
data_files = []
# Build the service
setuptools.setup(name='HivearyService',
version=version,
options={'py2exe': {}},
service=[script]
)
# python27.dll will be available at the root once the esky zip is extracted,
# so we can remove it now
os.remove(r'dist\python27.dll')
shutil.rmtree('build')
else:
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Include all files from the package.
install_requires = [
'amqplib>=1.0.2',
'kombu>=3.0.8',
'netifaces-merged>=0.9.0',
'oauth2>=1.5.211',
'psutil>=1.1.0',
'simplejson>=3.0.5',
'Twisted>=13.2.0',
'impala>=0.1.1',
]
data_files = [
('/etc/hiveary', ['hiveary.conf.example', 'README.md']),
('/etc/hiveary/init', ['initd/hiveary-agent']),
('/etc/hiveary/systemd', ['arch/hiveary-agent.service']),
('/usr/lib/hiveary', ['monitors/resources.py']),
]
setup(name=FROZEN_NAME,
version=version,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
include_package_data=True,
data_files=data_files,
install_requires=install_requires,
packages=find_packages(),
scripts=['hiveary-agent']
) | sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT')
# Add in Visual Studio C++ compiler library
data_files = [ | random_line_split |
setup.py | #!/usr/bin/env python
"""
Hiveary
https://hiveary.com
Licensed under Simplified BSD License (see LICENSE)
(C) Hiveary, Inc. 2013-2014 all rights reserved
"""
import platform
import sys
from hiveary import __version__ as version
current_platform = platform.system()
FROZEN_NAME = 'hiveary-agent'
AUTHOR = "Hiveary"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = "Hiveary Monitoring Agent"
LICENSE = "Simplified BSD"
URL = "http://hiveary.com"
# OS-specific setup
if 'bdist_esky' in sys.argv and current_platform == 'Windows':
# Use esky/cxfreeze to build the agent and py2exe to build the service
from esky.bdist_esky import Executable
from glob import glob
import os
import py2exe # noqa
import setuptools
import shutil
modules = [
'kombu.transport.pyamqp',
'kombu.transport.base',
'kombu.transport.amqplib',
]
sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT')
# Add in Visual Studio C++ compiler library
data_files = [
('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')),
r'hiveary\ca-bundle.pem',
('monitors', glob(r'monitors\*.py'))
]
script = Executable('hiveary-agent', gui_only=False)
options = {
'bdist_esky': {
'freezer_module': 'cxfreeze',
'includes': modules,
}
}
# Build the agent
setuptools.setup(name=FROZEN_NAME,
version=version,
scripts=[script],
options=options,
data_files=data_files,
)
sys.argv.remove('bdist_esky')
sys.argv.append('py2exe')
# used for the versioninfo resource
class Target(object):
def | (self, **kw):
self.__dict__.update(kw)
self.version = version
self.company_name = 'Hiveary'
self.name = "HivearyService"
script = Target(
description='Hiveary Agent Service Launcher',
modules=["HivearyService"],
cmdline_style='pywin32')
data_files = []
# Build the service
setuptools.setup(name='HivearyService',
version=version,
options={'py2exe': {}},
service=[script]
)
# python27.dll will be available at the root once the esky zip is extracted,
# so we can remove it now
os.remove(r'dist\python27.dll')
shutil.rmtree('build')
else:
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Include all files from the package.
install_requires = [
'amqplib>=1.0.2',
'kombu>=3.0.8',
'netifaces-merged>=0.9.0',
'oauth2>=1.5.211',
'psutil>=1.1.0',
'simplejson>=3.0.5',
'Twisted>=13.2.0',
'impala>=0.1.1',
]
data_files = [
('/etc/hiveary', ['hiveary.conf.example', 'README.md']),
('/etc/hiveary/init', ['initd/hiveary-agent']),
('/etc/hiveary/systemd', ['arch/hiveary-agent.service']),
('/usr/lib/hiveary', ['monitors/resources.py']),
]
setup(name=FROZEN_NAME,
version=version,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
include_package_data=True,
data_files=data_files,
install_requires=install_requires,
packages=find_packages(),
scripts=['hiveary-agent']
)
| __init__ | identifier_name |
lib.rs | //! # Elektra
//! Safe bindings for [libelektra](https://www.libelektra.org).
//!
//! See the [project's readme](https://master.libelektra.org/src/bindings/rust) for an introduction and examples.
//!
//! The crate consists of three major parts.
//!
//! - The [keys](key/index.html) that encapsulate name, value and metainfo
//! - A [`KeySet`](keyset/index.html) holds a set of `StringKey`s, since these are the most common type of key
//! - [`KDB`](kdb/index.html) allows access to the persistent key database by reading or writing `KeySet`s
//!
//! Refer to the documentation of the modules to learn more about each.
extern crate bitflags;
extern crate elektra_sys;
pub mod key;
pub mod keybuilder;
/// Trait to read values from a key.
pub mod readable;
/// A wrapper Trait to make keys readonly.
pub mod readonly; | pub mod kdb;
pub use self::key::{BinaryKey, StringKey, MetaIter, NameIter, KeyNameInvalidError, KeyNameReadOnlyError, KeyNotFoundError, CopyOption};
pub use self::keybuilder::KeyBuilder;
pub use self::readable::ReadableKey;
pub use self::readonly::ReadOnly;
pub use self::writeable::WriteableKey;
pub use self::keyset::{KeySet, ReadOnlyStringKeyIter, StringKeyIter, Cursor, LookupOption};
pub use self::kdb::{KDB, KDBError}; | /// Trait to write values to a key.
pub mod writeable;
pub mod keyset; | random_line_split |
readers.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class ImageReader(BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
return self.width, self.height
def set_image_size(self, width, height):
self.width = width |
def read(self, filename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images | self.height = height | random_line_split |
readers.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class ImageReader(BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
|
def set_image_size(self, width, height):
self.width = width
self.height = height
def read(self, filename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images
| return self.width, self.height | identifier_body |
readers.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class ImageReader(BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
return self.width, self.height
def set_image_size(self, width, height):
self.width = width
self.height = height
def read(self, filename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
|
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images
| labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images | conditional_block |
readers.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class | (BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
return self.width, self.height
def set_image_size(self, width, height):
self.width = width
self.height = height
def read(self, filename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images
| ImageReader | identifier_name |
ConfirmXferPacket.ts | import { Collection } from '../../utilities'
import Packet from './Packet'
import * as Types from '../types'
/**
* ConfirmXferPacket Packet
*/
class ConfirmXferPacket extends Packet { | * method of Packet, plus the buffer helper of the network namespace for
* generating a lookup codes.
*
* @type {number}
*/
public static id: number = 19
/**
* Packet frequency. This value determines whether the message ID is 8, 16, or
* 32 bits. There can be unique 254 messages IDs in the "High" or "Medium"
* frequencies and 32,000 in "Low". A message with a "Fixed" frequency also
* defines its own ID and is considered to be a signal.
*
* @type {number}
*/
public static frequency: number = 2
/**
* If this value is true, the client cannot send this packet as circuits only
* accept trusted packets from internal connections (to utility servers etc).
*
* @type {boolean}
*/
public static trusted: boolean = false
/**
* States if this packet should use or be using zerocoding, to attempt to
* compress the sequences of zeros in the message in order to reduce network
* load.
*
* @type {boolean}
*/
public static compression: boolean = false
/**
* Determines the blocks that are are contained in the message and it's
* required parameters.
*
* @see {@link http://wiki.secondlife.com/wiki/Message_Layout}
* @type {Collection}
*/
public static format: Collection<string, any> = new Collection([
// tslint:disable-next-line:max-line-length
['xfer', { quantity: 1, parameters: new Collection<string, any>([['id', Types.U64], ['packet', Types.U32]]) }]
])
/**
* ConfirmXferPacket constructor, can be passed either a fully
* initialized Packet Buffer or an object containing this Objects required
* parameters from {@link ConfirmXferPacket.format}. Note that
* "agentData" blocks may be excluded if {@link build} is able to fetch the
* requirements itself.
*
* @param {object|Buffer} [data] Packet block data to be seralized, may be optional
* @param {U64} [data.xfer.id] ID
* @param {U32} [data.xfer.packet] Packet
*/
constructor(data = {}) {
super(data)
}
}
export default ConfirmXferPacket | /**
* Packet ID, this value is only unique per-frequency range, see key get | random_line_split |
ConfirmXferPacket.ts | import { Collection } from '../../utilities'
import Packet from './Packet'
import * as Types from '../types'
/**
* ConfirmXferPacket Packet
*/
class ConfirmXferPacket extends Packet {
/**
* Packet ID, this value is only unique per-frequency range, see key get
* method of Packet, plus the buffer helper of the network namespace for
* generating a lookup codes.
*
* @type {number}
*/
public static id: number = 19
/**
* Packet frequency. This value determines whether the message ID is 8, 16, or
* 32 bits. There can be unique 254 messages IDs in the "High" or "Medium"
* frequencies and 32,000 in "Low". A message with a "Fixed" frequency also
* defines its own ID and is considered to be a signal.
*
* @type {number}
*/
public static frequency: number = 2
/**
* If this value is true, the client cannot send this packet as circuits only
* accept trusted packets from internal connections (to utility servers etc).
*
* @type {boolean}
*/
public static trusted: boolean = false
/**
* States if this packet should use or be using zerocoding, to attempt to
* compress the sequences of zeros in the message in order to reduce network
* load.
*
* @type {boolean}
*/
public static compression: boolean = false
/**
* Determines the blocks that are are contained in the message and it's
* required parameters.
*
* @see {@link http://wiki.secondlife.com/wiki/Message_Layout}
* @type {Collection}
*/
public static format: Collection<string, any> = new Collection([
// tslint:disable-next-line:max-line-length
['xfer', { quantity: 1, parameters: new Collection<string, any>([['id', Types.U64], ['packet', Types.U32]]) }]
])
/**
* ConfirmXferPacket constructor, can be passed either a fully
* initialized Packet Buffer or an object containing this Objects required
* parameters from {@link ConfirmXferPacket.format}. Note that
* "agentData" blocks may be excluded if {@link build} is able to fetch the
* requirements itself.
*
* @param {object|Buffer} [data] Packet block data to be seralized, may be optional
* @param {U64} [data.xfer.id] ID
* @param {U32} [data.xfer.packet] Packet
*/
| (data = {}) {
super(data)
}
}
export default ConfirmXferPacket
| constructor | identifier_name |
create.js | import { Duration, isDuration } from './constructor';
import toInt from '../utils/to-int';
import hasOwnProp from '../utils/has-own-prop';
import { DATE, HOUR, MINUTE, SECOND, MILLISECOND } from '../units/constants';
import { cloneWithOffset } from '../units/offset';
import { createLocal } from '../create/local';
// ASP.NET json date format regex
var aspNetRegex = /^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?\d*)?$/;
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
var isoRegex = /^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;
export function createDuration (input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms : input._milliseconds,
d : input._days,
M : input._months
};
} else if (typeof input === 'number') {
duration = {};
if (key) {
duration[key] = input;
} else {
duration.milliseconds = input;
}
} else if (!!(match = aspNetRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : 0,
d : toInt(match[DATE]) * sign,
h : toInt(match[HOUR]) * sign,
m : toInt(match[MINUTE]) * sign,
s : toInt(match[SECOND]) * sign,
ms : toInt(match[MILLISECOND]) * sign
};
} else if (!!(match = isoRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : parseIso(match[2], sign),
M : parseIso(match[3], sign),
w : parseIso(match[4], sign),
d : parseIso(match[5], sign),
h : parseIso(match[6], sign),
m : parseIso(match[7], sign),
s : parseIso(match[8], sign)
};
} else if (duration == null) {// checks for null or undefined
duration = {};
} else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {
diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
return ret;
}
createDuration.fn = Duration.prototype;
function parseIso (inp, sign) |
function positiveMomentsDifference(base, other) {
var res = {milliseconds: 0, months: 0};
res.months = other.month() - base.month() +
(other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +(base.clone().add(res.months, 'M'));
return res;
}
function momentsDifference(base, other) {
var res;
if (!(base.isValid() && other.isValid())) {
return {milliseconds: 0, months: 0};
}
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
}
| {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
} | identifier_body |
create.js | import { Duration, isDuration } from './constructor';
import toInt from '../utils/to-int';
import hasOwnProp from '../utils/has-own-prop';
import { DATE, HOUR, MINUTE, SECOND, MILLISECOND } from '../units/constants';
import { cloneWithOffset } from '../units/offset';
import { createLocal } from '../create/local';
// ASP.NET json date format regex
var aspNetRegex = /^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?\d*)?$/;
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
var isoRegex = /^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;
export function createDuration (input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms : input._milliseconds,
d : input._days,
M : input._months
};
} else if (typeof input === 'number') {
duration = {};
if (key) {
duration[key] = input;
} else {
duration.milliseconds = input;
}
} else if (!!(match = aspNetRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : 0,
d : toInt(match[DATE]) * sign,
h : toInt(match[HOUR]) * sign,
m : toInt(match[MINUTE]) * sign,
s : toInt(match[SECOND]) * sign,
ms : toInt(match[MILLISECOND]) * sign
};
} else if (!!(match = isoRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : parseIso(match[2], sign),
M : parseIso(match[3], sign),
w : parseIso(match[4], sign),
d : parseIso(match[5], sign),
h : parseIso(match[6], sign),
m : parseIso(match[7], sign),
s : parseIso(match[8], sign)
};
} else if (duration == null) {// checks for null or undefined
duration = {};
} else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {
diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
return ret;
}
createDuration.fn = Duration.prototype;
function parseIso (inp, sign) {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
}
function positiveMomentsDifference(base, other) {
var res = {milliseconds: 0, months: 0};
res.months = other.month() - base.month() +
(other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +(base.clone().add(res.months, 'M'));
return res;
}
function | (base, other) {
var res;
if (!(base.isValid() && other.isValid())) {
return {milliseconds: 0, months: 0};
}
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
}
| momentsDifference | identifier_name |
create.js | import { Duration, isDuration } from './constructor';
import toInt from '../utils/to-int';
import hasOwnProp from '../utils/has-own-prop';
import { DATE, HOUR, MINUTE, SECOND, MILLISECOND } from '../units/constants';
import { cloneWithOffset } from '../units/offset';
import { createLocal } from '../create/local';
// ASP.NET json date format regex
var aspNetRegex = /^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?\d*)?$/;
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
var isoRegex = /^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;
export function createDuration (input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms : input._milliseconds,
d : input._days,
M : input._months
};
} else if (typeof input === 'number') | else if (!!(match = aspNetRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : 0,
d : toInt(match[DATE]) * sign,
h : toInt(match[HOUR]) * sign,
m : toInt(match[MINUTE]) * sign,
s : toInt(match[SECOND]) * sign,
ms : toInt(match[MILLISECOND]) * sign
};
} else if (!!(match = isoRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : parseIso(match[2], sign),
M : parseIso(match[3], sign),
w : parseIso(match[4], sign),
d : parseIso(match[5], sign),
h : parseIso(match[6], sign),
m : parseIso(match[7], sign),
s : parseIso(match[8], sign)
};
} else if (duration == null) {// checks for null or undefined
duration = {};
} else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {
diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
return ret;
}
createDuration.fn = Duration.prototype;
function parseIso (inp, sign) {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
}
function positiveMomentsDifference(base, other) {
var res = {milliseconds: 0, months: 0};
res.months = other.month() - base.month() +
(other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +(base.clone().add(res.months, 'M'));
return res;
}
function momentsDifference(base, other) {
var res;
if (!(base.isValid() && other.isValid())) {
return {milliseconds: 0, months: 0};
}
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
}
| {
duration = {};
if (key) {
duration[key] = input;
} else {
duration.milliseconds = input;
}
} | conditional_block |
create.js | import { Duration, isDuration } from './constructor';
import toInt from '../utils/to-int';
import hasOwnProp from '../utils/has-own-prop';
import { DATE, HOUR, MINUTE, SECOND, MILLISECOND } from '../units/constants';
import { cloneWithOffset } from '../units/offset';
import { createLocal } from '../create/local';
// ASP.NET json date format regex
var aspNetRegex = /^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?\d*)?$/;
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
var isoRegex = /^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;
export function createDuration (input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms : input._milliseconds,
d : input._days,
M : input._months
};
} else if (typeof input === 'number') {
duration = {};
if (key) {
duration[key] = input;
} else {
duration.milliseconds = input;
}
} else if (!!(match = aspNetRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : 0,
d : toInt(match[DATE]) * sign,
h : toInt(match[HOUR]) * sign,
m : toInt(match[MINUTE]) * sign,
s : toInt(match[SECOND]) * sign,
ms : toInt(match[MILLISECOND]) * sign
};
} else if (!!(match = isoRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : parseIso(match[2], sign),
M : parseIso(match[3], sign),
w : parseIso(match[4], sign),
d : parseIso(match[5], sign),
h : parseIso(match[6], sign),
m : parseIso(match[7], sign),
s : parseIso(match[8], sign)
};
} else if (duration == null) {// checks for null or undefined
duration = {};
} else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {
diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
return ret;
}
createDuration.fn = Duration.prototype;
function parseIso (inp, sign) {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
}
function positiveMomentsDifference(base, other) {
var res = {milliseconds: 0, months: 0};
res.months = other.month() - base.month() +
(other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +(base.clone().add(res.months, 'M'));
return res;
}
function momentsDifference(base, other) {
var res;
if (!(base.isValid() && other.isValid())) { | }
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
} | return {milliseconds: 0, months: 0}; | random_line_split |
publisher.py | from __future__ import absolute_import
from werkzeug.exceptions import ServiceUnavailable, NotFound
from r5d4.flask_redis import get_conf_db
def publish_transaction(channel, tr_type, payload):
conf_db = get_conf_db()
if tr_type not in ["insert", "delete"]:
|
subscribed = conf_db.scard("Subscriptions:%s:ActiveAnalytics" % channel)
if subscribed == 0:
raise NotFound(("Channel not found",
"Channel '%(channel)s' is not found or has 0 "
"subscriptions" % locals()))
listened = conf_db.publish(
channel,
'{'
' "tr_type" : "' + tr_type + '", '
' "payload" : ' + payload +
'}'
)
if listened != subscribed:
raise ServiceUnavailable((
"Subscription-Listened mismatch",
"Listened count = %d doesn't match Subscribed count = %d" % (
listened,
subscribed
)
))
| raise ValueError("Unknown transaction type", tr_type) | conditional_block |
publisher.py | from __future__ import absolute_import
from werkzeug.exceptions import ServiceUnavailable, NotFound
from r5d4.flask_redis import get_conf_db
def publish_transaction(channel, tr_type, payload):
conf_db = get_conf_db()
if tr_type not in ["insert", "delete"]:
raise ValueError("Unknown transaction type", tr_type)
subscribed = conf_db.scard("Subscriptions:%s:ActiveAnalytics" % channel)
if subscribed == 0:
raise NotFound(("Channel not found",
"Channel '%(channel)s' is not found or has 0 "
"subscriptions" % locals()))
listened = conf_db.publish(
channel,
'{'
' "tr_type" : "' + tr_type + '", '
' "payload" : ' + payload +
'}'
) | "Subscription-Listened mismatch",
"Listened count = %d doesn't match Subscribed count = %d" % (
listened,
subscribed
)
)) | if listened != subscribed:
raise ServiceUnavailable(( | random_line_split |
publisher.py | from __future__ import absolute_import
from werkzeug.exceptions import ServiceUnavailable, NotFound
from r5d4.flask_redis import get_conf_db
def publish_transaction(channel, tr_type, payload):
| subscribed
)
))
| conf_db = get_conf_db()
if tr_type not in ["insert", "delete"]:
raise ValueError("Unknown transaction type", tr_type)
subscribed = conf_db.scard("Subscriptions:%s:ActiveAnalytics" % channel)
if subscribed == 0:
raise NotFound(("Channel not found",
"Channel '%(channel)s' is not found or has 0 "
"subscriptions" % locals()))
listened = conf_db.publish(
channel,
'{'
' "tr_type" : "' + tr_type + '", '
' "payload" : ' + payload +
'}'
)
if listened != subscribed:
raise ServiceUnavailable((
"Subscription-Listened mismatch",
"Listened count = %d doesn't match Subscribed count = %d" % (
listened, | identifier_body |
publisher.py | from __future__ import absolute_import
from werkzeug.exceptions import ServiceUnavailable, NotFound
from r5d4.flask_redis import get_conf_db
def | (channel, tr_type, payload):
conf_db = get_conf_db()
if tr_type not in ["insert", "delete"]:
raise ValueError("Unknown transaction type", tr_type)
subscribed = conf_db.scard("Subscriptions:%s:ActiveAnalytics" % channel)
if subscribed == 0:
raise NotFound(("Channel not found",
"Channel '%(channel)s' is not found or has 0 "
"subscriptions" % locals()))
listened = conf_db.publish(
channel,
'{'
' "tr_type" : "' + tr_type + '", '
' "payload" : ' + payload +
'}'
)
if listened != subscribed:
raise ServiceUnavailable((
"Subscription-Listened mismatch",
"Listened count = %d doesn't match Subscribed count = %d" % (
listened,
subscribed
)
))
| publish_transaction | identifier_name |
FeatureAKGApp.tsx | import { Col, Row } from "@artsy/palette"
import { FeatureAKGApp_viewer } from "v2/__generated__/FeatureAKGApp_viewer.graphql"
import { Footer } from "v2/Components/Footer"
import * as React from "react";
import { Title } from "react-head"
import { createFragmentContainer, graphql } from "react-relay"
import { FeatureFragmentContainer as Feature } from "./Components/Feature"
interface FeatureAKGAppProps {
viewer: FeatureAKGApp_viewer
}
export const FeatureAKGApp: React.FC<FeatureAKGAppProps> = props => {
return (
<>
<Title>Art Keeps Going</Title>
<Feature viewer={props.viewer} />
<Row>
<Col>
<Footer />
</Col>
</Row>
</>
)
}
export const FeatureAKGAppFragmentContainer = createFragmentContainer(
FeatureAKGApp,
{
viewer: graphql`
fragment FeatureAKGApp_viewer on Viewer
@argumentDefinitions(
articleIDs: { type: "[String]!" }
selectedWorksSetID: { type: "String!" }
collectionRailItemIDs: { type: "[String!]" }
auctionRailItemIDs: { type: "[String!]" }
fairRailItemIDs: { type: "[String!]" }
hasCollectionRailItems: { type: "Boolean!" }
hasAuctionRailItems: { type: "Boolean!" }
hasFairRailItems: { type: "Boolean!" } | ...Feature_viewer
@arguments(
articleIDs: $articleIDs
selectedWorksSetID: $selectedWorksSetID
collectionRailItemIDs: $collectionRailItemIDs
auctionRailItemIDs: $auctionRailItemIDs
fairRailItemIDs: $fairRailItemIDs
hasCollectionRailItems: $hasCollectionRailItems
hasAuctionRailItems: $hasAuctionRailItems
hasFairRailItems: $hasFairRailItems
)
}
`,
}
) | ) { | random_line_split |
FeedbackWindow.js | Ext.ns('CMS');
/**
* A window with a form that users can utilize for sending feedback
*/
CMS.FeedbackWindow = Ext.extend(Ext.Window, {
modal: true,
width: 500,
height: 400,
resizable: false,
layout: 'fit',
closable: false,
padding: 10,
initComponent: function () {
this.items = {
xtype: 'form',
ref: 'formpanel',
bodyStyle: 'background: transparent',
monitorValid: true,
border: false,
defaults: {
anchor: '-' + Ext.getPreciseScrollBarWidth()
},
labelWidth: 130,
items: [{
xtype: 'textfield',
emptyText: CMS.i18n('Betreff'),
name: 'subject',
hideLabel: true,
allowBlank: true
}, {
xtype: 'textarea',
emptyText: CMS.i18n('Mitteilung') + '*',
name: 'body',
height: 250,
hideLabel: true,
allowBlank: false
}, {
xtype: 'textfield',
name: 'email',
fieldLabel: CMS.i18n('E-Mail fΓΌr RΓΌckfragen'),
vtype: 'email',
value: CMS.app.userInfo.get('email'),
allowBlank: true
}],
buttons: [{
xtype: 'label',
text: CMS.i18n('{marker} = Pflichtfeld').replace('{marker}', '*')
}, '->', {
text: CMS.i18n('Abschicken'),
iconCls: 'send',
formBind: true,
handler: this.sendHandler,
scope: this
}, {
text: CMS.i18n('SchlieΓen'),
iconCls: 'cancel',
handler: this.closeHandler,
scope: this
}],
buttonAlign: 'left'
};
Ext.Window.prototype.initComponent.apply(this, arguments);
},
/**
* @private
* Handler for send button
*/
sendHandler: function () {
CMS.app.trafficManager.sendRequest({
action: 'sendFeedback',
data: this.gatherData(),
success: function () {
CMS.Message.toast(CMS.i18n('Vielen Dank'), CMS.i18n('Mitteilung wurde gesendet'));
},
failureTitle: CMS.i18n('Konnte Feedback nicht versenden')
});
this.destroy();
},
/**
* @private
* Handler for close button
*/
closeHandler: function () {
if (!this.formpanel.getForm().isValid()) {
this.destroy();
} else {
Ext.MessageBox.confirm(CMS.i18n('Mitteilung senden?'), CMS.i18n('Die Mitteilung wurde noch nicht versandt. Soll sie jetzt versendet werden?'), function (btnId) {
if (btnId == 'yes') {
this.sendHandler();
} else {
| }, this);
}
},
/**
* @private
*/
gatherData: function () {
var result = this.formpanel.getForm().getFieldValues();
result.errors = CMS.app.ErrorManager.getErrorHistory();
result.userAgent = navigator.userAgent;
result.platform = navigator.platform;
return result;
}
});
| this.destroy();
}
| conditional_block |
FeedbackWindow.js | Ext.ns('CMS');
/**
* A window with a form that users can utilize for sending feedback
*/
CMS.FeedbackWindow = Ext.extend(Ext.Window, {
modal: true,
width: 500,
height: 400,
resizable: false,
layout: 'fit',
closable: false,
padding: 10,
initComponent: function () {
this.items = {
xtype: 'form',
ref: 'formpanel',
bodyStyle: 'background: transparent',
monitorValid: true,
border: false,
defaults: {
anchor: '-' + Ext.getPreciseScrollBarWidth()
},
labelWidth: 130,
items: [{
xtype: 'textfield',
emptyText: CMS.i18n('Betreff'),
name: 'subject',
hideLabel: true,
allowBlank: true
}, {
xtype: 'textarea',
emptyText: CMS.i18n('Mitteilung') + '*',
name: 'body',
height: 250,
hideLabel: true,
allowBlank: false
}, {
xtype: 'textfield',
name: 'email',
fieldLabel: CMS.i18n('E-Mail fΓΌr RΓΌckfragen'),
vtype: 'email',
value: CMS.app.userInfo.get('email'),
allowBlank: true
}],
buttons: [{
xtype: 'label',
text: CMS.i18n('{marker} = Pflichtfeld').replace('{marker}', '*')
}, '->', {
text: CMS.i18n('Abschicken'),
iconCls: 'send',
formBind: true,
handler: this.sendHandler,
scope: this
}, {
text: CMS.i18n('SchlieΓen'),
iconCls: 'cancel',
handler: this.closeHandler,
scope: this
}],
buttonAlign: 'left'
};
Ext.Window.prototype.initComponent.apply(this, arguments);
},
/**
* @private
* Handler for send button
*/
sendHandler: function () {
CMS.app.trafficManager.sendRequest({
action: 'sendFeedback',
data: this.gatherData(),
success: function () {
CMS.Message.toast(CMS.i18n('Vielen Dank'), CMS.i18n('Mitteilung wurde gesendet'));
}, | },
/**
* @private
* Handler for close button
*/
closeHandler: function () {
if (!this.formpanel.getForm().isValid()) {
this.destroy();
} else {
Ext.MessageBox.confirm(CMS.i18n('Mitteilung senden?'), CMS.i18n('Die Mitteilung wurde noch nicht versandt. Soll sie jetzt versendet werden?'), function (btnId) {
if (btnId == 'yes') {
this.sendHandler();
} else {
this.destroy();
}
}, this);
}
},
/**
* @private
*/
gatherData: function () {
var result = this.formpanel.getForm().getFieldValues();
result.errors = CMS.app.ErrorManager.getErrorHistory();
result.userAgent = navigator.userAgent;
result.platform = navigator.platform;
return result;
}
}); | failureTitle: CMS.i18n('Konnte Feedback nicht versenden')
});
this.destroy(); | random_line_split |
common.py | #
# common.py
#
# Copyright (C) 2009 Justin Noah <[email protected]>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <[email protected]>
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
# Copyright (C) 2009 Damien Churchill <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def | (filename):
import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename))
| get_resource | identifier_name |
common.py | #
# common.py
#
# Copyright (C) 2009 Justin Noah <[email protected]>
# | # Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <[email protected]>
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
# Copyright (C) 2009 Damien Churchill <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def get_resource(filename):
import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename)) | random_line_split |
|
common.py | #
# common.py
#
# Copyright (C) 2009 Justin Noah <[email protected]>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <[email protected]>
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
# Copyright (C) 2009 Damien Churchill <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def get_resource(filename):
| import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename)) | identifier_body |
|
lookups.py | from django.db.models import Q
from django.utils.html import escape
from django.contrib.auth.models import User
from ajax_select import LookupChannel
class BuyerLookup(LookupChannel):
"""
This class suggests user names (AJAX Effect) while filling client name for a purchase order
"""
model = User
def get_query(self, q, request):
user = User.objects.all()
for value in q.split():
user = user.filter(Q(username__icontains=value)| \
Q(first_name__icontains=value) \
| Q(last_name__icontains=value) \
|Q(customer__address__street_address__icontains=value)\
|Q(customer__address__district__icontains=value)\
|Q(customer__address__province__icontains=value)
|Q(customer__title__icontains=value)
|Q(customer__company__icontains=value))
return user[0:15]
def get_result(self, obj):
return unicode(obj.username)
def format_match(self, obj):
return self.format_item_display(obj)
def | (self, obj):
result = User.objects.values('first_name','last_name',
'customer__title','customer__address__street_address',
'customer__address__district','customer__company').filter(id = obj.id)[0]
return "<b>Name or Title:</b> %s <br> <b>Company:</b> %s <br> <b>Address:</b> %s <br> %s \
<hr>" %((result['first_name'] + ' ' + result['last_name'] + ' ' + \
result['customer__title']), \
(result['customer__company']),\
(result['customer__address__street_address']), \
(result['customer__address__district']))
| format_item_display | identifier_name |
lookups.py | from django.db.models import Q
from django.utils.html import escape
from django.contrib.auth.models import User
from ajax_select import LookupChannel
class BuyerLookup(LookupChannel):
"""
This class suggests user names (AJAX Effect) while filling client name for a purchase order
"""
model = User
def get_query(self, q, request):
user = User.objects.all()
for value in q.split():
|
return user[0:15]
def get_result(self, obj):
return unicode(obj.username)
def format_match(self, obj):
return self.format_item_display(obj)
def format_item_display(self, obj):
result = User.objects.values('first_name','last_name',
'customer__title','customer__address__street_address',
'customer__address__district','customer__company').filter(id = obj.id)[0]
return "<b>Name or Title:</b> %s <br> <b>Company:</b> %s <br> <b>Address:</b> %s <br> %s \
<hr>" %((result['first_name'] + ' ' + result['last_name'] + ' ' + \
result['customer__title']), \
(result['customer__company']),\
(result['customer__address__street_address']), \
(result['customer__address__district']))
| user = user.filter(Q(username__icontains=value)| \
Q(first_name__icontains=value) \
| Q(last_name__icontains=value) \
|Q(customer__address__street_address__icontains=value)\
|Q(customer__address__district__icontains=value)\
|Q(customer__address__province__icontains=value)
|Q(customer__title__icontains=value)
|Q(customer__company__icontains=value)) | conditional_block |
lookups.py | from django.db.models import Q
from django.utils.html import escape
from django.contrib.auth.models import User
from ajax_select import LookupChannel
class BuyerLookup(LookupChannel):
"""
This class suggests user names (AJAX Effect) while filling client name for a purchase order
"""
model = User
def get_query(self, q, request):
user = User.objects.all()
for value in q.split():
user = user.filter(Q(username__icontains=value)| \
Q(first_name__icontains=value) \
| Q(last_name__icontains=value) \
|Q(customer__address__street_address__icontains=value)\
|Q(customer__address__district__icontains=value)\
|Q(customer__address__province__icontains=value)
|Q(customer__title__icontains=value)
|Q(customer__company__icontains=value))
return user[0:15] | def get_result(self, obj):
return unicode(obj.username)
def format_match(self, obj):
return self.format_item_display(obj)
def format_item_display(self, obj):
result = User.objects.values('first_name','last_name',
'customer__title','customer__address__street_address',
'customer__address__district','customer__company').filter(id = obj.id)[0]
return "<b>Name or Title:</b> %s <br> <b>Company:</b> %s <br> <b>Address:</b> %s <br> %s \
<hr>" %((result['first_name'] + ' ' + result['last_name'] + ' ' + \
result['customer__title']), \
(result['customer__company']),\
(result['customer__address__street_address']), \
(result['customer__address__district'])) | random_line_split |
|
lookups.py | from django.db.models import Q
from django.utils.html import escape
from django.contrib.auth.models import User
from ajax_select import LookupChannel
class BuyerLookup(LookupChannel):
"""
This class suggests user names (AJAX Effect) while filling client name for a purchase order
"""
model = User
def get_query(self, q, request):
user = User.objects.all()
for value in q.split():
user = user.filter(Q(username__icontains=value)| \
Q(first_name__icontains=value) \
| Q(last_name__icontains=value) \
|Q(customer__address__street_address__icontains=value)\
|Q(customer__address__district__icontains=value)\
|Q(customer__address__province__icontains=value)
|Q(customer__title__icontains=value)
|Q(customer__company__icontains=value))
return user[0:15]
def get_result(self, obj):
return unicode(obj.username)
def format_match(self, obj):
return self.format_item_display(obj)
def format_item_display(self, obj):
| result = User.objects.values('first_name','last_name',
'customer__title','customer__address__street_address',
'customer__address__district','customer__company').filter(id = obj.id)[0]
return "<b>Name or Title:</b> %s <br> <b>Company:</b> %s <br> <b>Address:</b> %s <br> %s \
<hr>" %((result['first_name'] + ' ' + result['last_name'] + ' ' + \
result['customer__title']), \
(result['customer__company']),\
(result['customer__address__street_address']), \
(result['customer__address__district'])) | identifier_body |
|
lda_preprocessing.py | from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def main():
usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name) | pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'}
transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main() |
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"} | random_line_split |
lda_preprocessing.py | from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def main():
| transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main()
| usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name)
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"}
pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'} | identifier_body |
lda_preprocessing.py | from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def main():
usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name)
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"}
pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'}
transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
|
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main()
| v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1 | conditional_block |
lda_preprocessing.py | from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def | ():
usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name)
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"}
pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'}
transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main()
| main | identifier_name |
trackModel.js | var WO = WO || {};
WO.Track = Backbone.Model.extend({
urlRoot: '/api/tracks',
idAttribute: '_id',
defaults: {
notes: "",
title: 'Acoustic Piano',
isMuted: false,
solo: false,
octave: 4,
volume: 0.75,
instrument: "",
type: 'MIDI'
},
initialize : function(){
this.set('notes', []);
this.set('instrument', WO.InstrumentFactory( "Acoustic Piano", this.cid));
WO.instrumentKeyHandler.create(this.get('instrument'));
this.on('changeInstrument', function(instrumentName){this.changeInstrument(instrumentName);}, this);
},
genObjectId: (function() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return function() {
return s4() + s4() + s4();
};
})(),
changeInstrument: function(instrumentName) {
var instType = {
'Acoustic Piano': 'MIDI',
'Audio File': 'Audio',
'Microphone': 'Microphone',
'Acoustic Guitar Steel': 'MIDI',
'Alto Sax': 'MIDI',
'Church Organ': 'MIDI',
'Distortion Guitar': 'MIDI',
'Electric Piano 1': 'MIDI',
'Flute': 'MIDI',
'Muted Trumpet': 'MIDI',
'Oboe': 'MIDI',
'Overdriven Guitar': 'MIDI',
'Pad 3 Polysynth': 'MIDI',
'Synth': 'MIDI',
'Synth Bass 1': 'MIDI',
'Synth Strings 2': 'MIDI',
'Viola': 'MIDI',
'Violin': 'MIDI',
'Xylophone': 'MIDI'
};
var previousInstrumentType = this.get('type');
WO.appView.unbindKeys();
this.set('type', instType[instrumentName]);
this.set('title', instrumentName);
if (this.get('type') === 'MIDI') {
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
WO.instrumentKeyHandler.create(this.get('instrument'));
if (previousInstrumentType !== 'MIDI') {
$('.active-track .track-notes').html('');
this.set('mRender', new WO.MidiRender(this.cid + ' .track-notes'));
}
} else {
this.set('notes', []);
$('.active-track .track-notes').html('');
this.set('instrument', WO.InstrumentFactory(instrumentName, this)); | }
},
saveTrack: function(){
var instrument = this.get('instrument');
var mRender = this.get('mRender');
this.set('instrument', '');
this.set('mRender', '');
var that = this;
var newlySaveTrack = $.when(that.save()).done(function(){
that.set('instrument', instrument);
that.set('mRender', mRender);
return that;
});
return newlySaveTrack;
}
});
//see what type of instrumetn current serection is
// midi -> mic => remove svg , add mic
// midi -> audio => remove svg , add audio
// midi -> midi => null
// mic -> audio => remove mic , add audio
// mic -> midi => remove mike, add svg
// audio -> mic => remove audio, add mic
// audio -> midi => remove audio, add svg
// keep notes only for midi change to hear different instruments. | random_line_split |
|
trackModel.js | var WO = WO || {};
WO.Track = Backbone.Model.extend({
urlRoot: '/api/tracks',
idAttribute: '_id',
defaults: {
notes: "",
title: 'Acoustic Piano',
isMuted: false,
solo: false,
octave: 4,
volume: 0.75,
instrument: "",
type: 'MIDI'
},
initialize : function(){
this.set('notes', []);
this.set('instrument', WO.InstrumentFactory( "Acoustic Piano", this.cid));
WO.instrumentKeyHandler.create(this.get('instrument'));
this.on('changeInstrument', function(instrumentName){this.changeInstrument(instrumentName);}, this);
},
genObjectId: (function() {
function | () {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return function() {
return s4() + s4() + s4();
};
})(),
changeInstrument: function(instrumentName) {
var instType = {
'Acoustic Piano': 'MIDI',
'Audio File': 'Audio',
'Microphone': 'Microphone',
'Acoustic Guitar Steel': 'MIDI',
'Alto Sax': 'MIDI',
'Church Organ': 'MIDI',
'Distortion Guitar': 'MIDI',
'Electric Piano 1': 'MIDI',
'Flute': 'MIDI',
'Muted Trumpet': 'MIDI',
'Oboe': 'MIDI',
'Overdriven Guitar': 'MIDI',
'Pad 3 Polysynth': 'MIDI',
'Synth': 'MIDI',
'Synth Bass 1': 'MIDI',
'Synth Strings 2': 'MIDI',
'Viola': 'MIDI',
'Violin': 'MIDI',
'Xylophone': 'MIDI'
};
var previousInstrumentType = this.get('type');
WO.appView.unbindKeys();
this.set('type', instType[instrumentName]);
this.set('title', instrumentName);
if (this.get('type') === 'MIDI') {
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
WO.instrumentKeyHandler.create(this.get('instrument'));
if (previousInstrumentType !== 'MIDI') {
$('.active-track .track-notes').html('');
this.set('mRender', new WO.MidiRender(this.cid + ' .track-notes'));
}
} else {
this.set('notes', []);
$('.active-track .track-notes').html('');
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
}
},
saveTrack: function(){
var instrument = this.get('instrument');
var mRender = this.get('mRender');
this.set('instrument', '');
this.set('mRender', '');
var that = this;
var newlySaveTrack = $.when(that.save()).done(function(){
that.set('instrument', instrument);
that.set('mRender', mRender);
return that;
});
return newlySaveTrack;
}
});
//see what type of instrumetn current serection is
// midi -> mic => remove svg , add mic
// midi -> audio => remove svg , add audio
// midi -> midi => null
// mic -> audio => remove mic , add audio
// mic -> midi => remove mike, add svg
// audio -> mic => remove audio, add mic
// audio -> midi => remove audio, add svg
// keep notes only for midi change to hear different instruments.
| s4 | identifier_name |
trackModel.js | var WO = WO || {};
WO.Track = Backbone.Model.extend({
urlRoot: '/api/tracks',
idAttribute: '_id',
defaults: {
notes: "",
title: 'Acoustic Piano',
isMuted: false,
solo: false,
octave: 4,
volume: 0.75,
instrument: "",
type: 'MIDI'
},
initialize : function(){
this.set('notes', []);
this.set('instrument', WO.InstrumentFactory( "Acoustic Piano", this.cid));
WO.instrumentKeyHandler.create(this.get('instrument'));
this.on('changeInstrument', function(instrumentName){this.changeInstrument(instrumentName);}, this);
},
genObjectId: (function() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return function() {
return s4() + s4() + s4();
};
})(),
changeInstrument: function(instrumentName) {
var instType = {
'Acoustic Piano': 'MIDI',
'Audio File': 'Audio',
'Microphone': 'Microphone',
'Acoustic Guitar Steel': 'MIDI',
'Alto Sax': 'MIDI',
'Church Organ': 'MIDI',
'Distortion Guitar': 'MIDI',
'Electric Piano 1': 'MIDI',
'Flute': 'MIDI',
'Muted Trumpet': 'MIDI',
'Oboe': 'MIDI',
'Overdriven Guitar': 'MIDI',
'Pad 3 Polysynth': 'MIDI',
'Synth': 'MIDI',
'Synth Bass 1': 'MIDI',
'Synth Strings 2': 'MIDI',
'Viola': 'MIDI',
'Violin': 'MIDI',
'Xylophone': 'MIDI'
};
var previousInstrumentType = this.get('type');
WO.appView.unbindKeys();
this.set('type', instType[instrumentName]);
this.set('title', instrumentName);
if (this.get('type') === 'MIDI') {
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
WO.instrumentKeyHandler.create(this.get('instrument'));
if (previousInstrumentType !== 'MIDI') |
} else {
this.set('notes', []);
$('.active-track .track-notes').html('');
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
}
},
saveTrack: function(){
var instrument = this.get('instrument');
var mRender = this.get('mRender');
this.set('instrument', '');
this.set('mRender', '');
var that = this;
var newlySaveTrack = $.when(that.save()).done(function(){
that.set('instrument', instrument);
that.set('mRender', mRender);
return that;
});
return newlySaveTrack;
}
});
//see what type of instrumetn current serection is
// midi -> mic => remove svg , add mic
// midi -> audio => remove svg , add audio
// midi -> midi => null
// mic -> audio => remove mic , add audio
// mic -> midi => remove mike, add svg
// audio -> mic => remove audio, add mic
// audio -> midi => remove audio, add svg
// keep notes only for midi change to hear different instruments.
| {
$('.active-track .track-notes').html('');
this.set('mRender', new WO.MidiRender(this.cid + ' .track-notes'));
} | conditional_block |
trackModel.js | var WO = WO || {};
WO.Track = Backbone.Model.extend({
urlRoot: '/api/tracks',
idAttribute: '_id',
defaults: {
notes: "",
title: 'Acoustic Piano',
isMuted: false,
solo: false,
octave: 4,
volume: 0.75,
instrument: "",
type: 'MIDI'
},
initialize : function(){
this.set('notes', []);
this.set('instrument', WO.InstrumentFactory( "Acoustic Piano", this.cid));
WO.instrumentKeyHandler.create(this.get('instrument'));
this.on('changeInstrument', function(instrumentName){this.changeInstrument(instrumentName);}, this);
},
genObjectId: (function() {
function s4() |
return function() {
return s4() + s4() + s4();
};
})(),
changeInstrument: function(instrumentName) {
var instType = {
'Acoustic Piano': 'MIDI',
'Audio File': 'Audio',
'Microphone': 'Microphone',
'Acoustic Guitar Steel': 'MIDI',
'Alto Sax': 'MIDI',
'Church Organ': 'MIDI',
'Distortion Guitar': 'MIDI',
'Electric Piano 1': 'MIDI',
'Flute': 'MIDI',
'Muted Trumpet': 'MIDI',
'Oboe': 'MIDI',
'Overdriven Guitar': 'MIDI',
'Pad 3 Polysynth': 'MIDI',
'Synth': 'MIDI',
'Synth Bass 1': 'MIDI',
'Synth Strings 2': 'MIDI',
'Viola': 'MIDI',
'Violin': 'MIDI',
'Xylophone': 'MIDI'
};
var previousInstrumentType = this.get('type');
WO.appView.unbindKeys();
this.set('type', instType[instrumentName]);
this.set('title', instrumentName);
if (this.get('type') === 'MIDI') {
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
WO.instrumentKeyHandler.create(this.get('instrument'));
if (previousInstrumentType !== 'MIDI') {
$('.active-track .track-notes').html('');
this.set('mRender', new WO.MidiRender(this.cid + ' .track-notes'));
}
} else {
this.set('notes', []);
$('.active-track .track-notes').html('');
this.set('instrument', WO.InstrumentFactory(instrumentName, this));
}
},
saveTrack: function(){
var instrument = this.get('instrument');
var mRender = this.get('mRender');
this.set('instrument', '');
this.set('mRender', '');
var that = this;
var newlySaveTrack = $.when(that.save()).done(function(){
that.set('instrument', instrument);
that.set('mRender', mRender);
return that;
});
return newlySaveTrack;
}
});
//see what type of instrumetn current serection is
// midi -> mic => remove svg , add mic
// midi -> audio => remove svg , add audio
// midi -> midi => null
// mic -> audio => remove mic , add audio
// mic -> midi => remove mike, add svg
// audio -> mic => remove audio, add mic
// audio -> midi => remove audio, add svg
// keep notes only for midi change to hear different instruments.
| {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
} | identifier_body |
test_serialization.py | import os
import shutil
import tempfile
import numpy as np
import pytest
import torch
from spotlight.cross_validation import random_train_test_split
from spotlight.datasets import movielens
from spotlight.evaluation import mrr_score, sequence_mrr_score
from spotlight.evaluation import rmse_score
from spotlight.factorization.explicit import ExplicitFactorizationModel
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.sequence.implicit import ImplicitSequenceModel
from spotlight.sequence.representations import CNNNet
RANDOM_STATE = np.random.RandomState(42)
CUDA = bool(os.environ.get('SPOTLIGHT_CUDA', False))
def _reload(model):
dirname = tempfile.mkdtemp()
try:
fname = os.path.join(dirname, "model.pkl")
torch.save(model, fname)
model = torch.load(fname)
finally:
shutil.rmtree(dirname)
return model
@pytest.fixture(scope="module")
def data():
|
def test_explicit_serialization(data):
train, test = data
model = ExplicitFactorizationModel(loss='regression',
n_iter=3,
batch_size=1024,
learning_rate=1e-3,
l2=1e-5,
use_cuda=CUDA)
model.fit(train)
rmse_original = rmse_score(model, test)
rmse_recovered = rmse_score(_reload(model), test)
assert rmse_original == rmse_recovered
def test_implicit_serialization(data):
train, test = data
model = ImplicitFactorizationModel(loss='bpr',
n_iter=3,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr_original = mrr_score(model, test, train=train).mean()
mrr_recovered = mrr_score(_reload(model), test, train=train).mean()
assert mrr_original == mrr_recovered
def test_implicit_sequence_serialization(data):
train, test = data
train = train.to_sequence(max_sequence_length=128)
test = test.to_sequence(max_sequence_length=128)
model = ImplicitSequenceModel(loss='bpr',
representation=CNNNet(train.num_items,
embedding_dim=32,
kernel_width=3,
dilation=(1, ),
num_layers=1),
batch_size=128,
learning_rate=1e-1,
l2=0.0,
n_iter=5,
random_state=RANDOM_STATE,
use_cuda=CUDA)
model.fit(train)
mrr_original = sequence_mrr_score(model, test).mean()
mrr_recovered = sequence_mrr_score(_reload(model), test).mean()
assert mrr_original == mrr_recovered
| interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
return train, test | identifier_body |
test_serialization.py | import os
import shutil
import tempfile
import numpy as np
import pytest
import torch
from spotlight.cross_validation import random_train_test_split
from spotlight.datasets import movielens
from spotlight.evaluation import mrr_score, sequence_mrr_score
from spotlight.evaluation import rmse_score
from spotlight.factorization.explicit import ExplicitFactorizationModel
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.sequence.implicit import ImplicitSequenceModel
from spotlight.sequence.representations import CNNNet
RANDOM_STATE = np.random.RandomState(42)
CUDA = bool(os.environ.get('SPOTLIGHT_CUDA', False))
def _reload(model): |
try:
fname = os.path.join(dirname, "model.pkl")
torch.save(model, fname)
model = torch.load(fname)
finally:
shutil.rmtree(dirname)
return model
@pytest.fixture(scope="module")
def data():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
return train, test
def test_explicit_serialization(data):
train, test = data
model = ExplicitFactorizationModel(loss='regression',
n_iter=3,
batch_size=1024,
learning_rate=1e-3,
l2=1e-5,
use_cuda=CUDA)
model.fit(train)
rmse_original = rmse_score(model, test)
rmse_recovered = rmse_score(_reload(model), test)
assert rmse_original == rmse_recovered
def test_implicit_serialization(data):
train, test = data
model = ImplicitFactorizationModel(loss='bpr',
n_iter=3,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr_original = mrr_score(model, test, train=train).mean()
mrr_recovered = mrr_score(_reload(model), test, train=train).mean()
assert mrr_original == mrr_recovered
def test_implicit_sequence_serialization(data):
train, test = data
train = train.to_sequence(max_sequence_length=128)
test = test.to_sequence(max_sequence_length=128)
model = ImplicitSequenceModel(loss='bpr',
representation=CNNNet(train.num_items,
embedding_dim=32,
kernel_width=3,
dilation=(1, ),
num_layers=1),
batch_size=128,
learning_rate=1e-1,
l2=0.0,
n_iter=5,
random_state=RANDOM_STATE,
use_cuda=CUDA)
model.fit(train)
mrr_original = sequence_mrr_score(model, test).mean()
mrr_recovered = sequence_mrr_score(_reload(model), test).mean()
assert mrr_original == mrr_recovered | dirname = tempfile.mkdtemp() | random_line_split |
test_serialization.py | import os
import shutil
import tempfile
import numpy as np
import pytest
import torch
from spotlight.cross_validation import random_train_test_split
from spotlight.datasets import movielens
from spotlight.evaluation import mrr_score, sequence_mrr_score
from spotlight.evaluation import rmse_score
from spotlight.factorization.explicit import ExplicitFactorizationModel
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.sequence.implicit import ImplicitSequenceModel
from spotlight.sequence.representations import CNNNet
RANDOM_STATE = np.random.RandomState(42)
CUDA = bool(os.environ.get('SPOTLIGHT_CUDA', False))
def | (model):
dirname = tempfile.mkdtemp()
try:
fname = os.path.join(dirname, "model.pkl")
torch.save(model, fname)
model = torch.load(fname)
finally:
shutil.rmtree(dirname)
return model
@pytest.fixture(scope="module")
def data():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
return train, test
def test_explicit_serialization(data):
train, test = data
model = ExplicitFactorizationModel(loss='regression',
n_iter=3,
batch_size=1024,
learning_rate=1e-3,
l2=1e-5,
use_cuda=CUDA)
model.fit(train)
rmse_original = rmse_score(model, test)
rmse_recovered = rmse_score(_reload(model), test)
assert rmse_original == rmse_recovered
def test_implicit_serialization(data):
train, test = data
model = ImplicitFactorizationModel(loss='bpr',
n_iter=3,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr_original = mrr_score(model, test, train=train).mean()
mrr_recovered = mrr_score(_reload(model), test, train=train).mean()
assert mrr_original == mrr_recovered
def test_implicit_sequence_serialization(data):
train, test = data
train = train.to_sequence(max_sequence_length=128)
test = test.to_sequence(max_sequence_length=128)
model = ImplicitSequenceModel(loss='bpr',
representation=CNNNet(train.num_items,
embedding_dim=32,
kernel_width=3,
dilation=(1, ),
num_layers=1),
batch_size=128,
learning_rate=1e-1,
l2=0.0,
n_iter=5,
random_state=RANDOM_STATE,
use_cuda=CUDA)
model.fit(train)
mrr_original = sequence_mrr_score(model, test).mean()
mrr_recovered = sequence_mrr_score(_reload(model), test).mean()
assert mrr_original == mrr_recovered
| _reload | identifier_name |
runtime_display_panel.py | '''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class | (object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font)
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass
| MessagePump | identifier_name |
runtime_display_panel.py | '''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout() |
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font)
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass | random_line_split |
|
runtime_display_panel.py | '''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
|
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass
| self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font) | identifier_body |
runtime_display_panel.py | '''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
|
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass
| pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font) | conditional_block |
lib.rs | //! # Iron CMS
//! CMS based on Iron Framework for **Rust**.
#[macro_use] extern crate iron;
#[macro_use] extern crate router;
#[macro_use] extern crate maplit;
#[macro_use] extern crate diesel;
extern crate handlebars_iron as hbs;
extern crate handlebars;
extern crate rustc_serialize;
extern crate staticfile;
extern crate mount;
extern crate time;
extern crate params;
extern crate iron_diesel_middleware;
extern crate r2d2;
extern crate r2d2_diesel;
extern crate regex;
/// Base middleware for CMS
pub mod middleware;
mod admin;
mod frontend;
use router::Router;
use staticfile::Static;
#[cfg(feature = "cache")]
use staticfile::Cache;
use mount::Mount;
use std::path::Path;
#[cfg(feature = "cache")]
use time::Duration;
/// Routes aggregator.
/// It accumulate all posible routes for CMS.
/// ## How to use
/// ```
/// extern crate iron;
/// extern crate iron_cms;
/// use iron::{Iron, Chain};
/// fn main() {
/// // Add routers
/// let mut chain = Chain::new(iron_cms::routes());
/// // Add Template renderer and views path
/// let paths = vec!["./views/"];
/// chain.link_after(iron_cms::middleware::template_render(paths));
/// // Add error-404 handler
/// chain.link_after(iron_cms::middleware::Error404);
/// // Start applocation and other actions
/// // Iron::new(chain).http("localhost:3000").unwrap();
/// }
/// ```
pub fn routes() -> Mount | {
// Init router
let mut routes = Router::new();
// Add routes
frontend::add_routes(&mut routes);
admin::add_routes(&mut routes);
// Add static router
let mut mount = Mount::new();
mount
.mount("/", routes)
.mount("/assets/", Static::new(Path::new("static")));
// .cache(Duration::days(30)));
mount
} | identifier_body |
|
lib.rs | //! # Iron CMS
//! CMS based on Iron Framework for **Rust**.
#[macro_use] extern crate iron;
#[macro_use] extern crate router;
#[macro_use] extern crate maplit;
#[macro_use] extern crate diesel;
extern crate handlebars_iron as hbs;
extern crate handlebars;
extern crate rustc_serialize;
extern crate staticfile;
extern crate mount;
extern crate time;
extern crate params;
extern crate iron_diesel_middleware;
extern crate r2d2;
extern crate r2d2_diesel;
extern crate regex;
/// Base middleware for CMS
pub mod middleware;
mod admin;
mod frontend;
use router::Router;
use staticfile::Static;
#[cfg(feature = "cache")]
use staticfile::Cache;
use mount::Mount;
use std::path::Path;
#[cfg(feature = "cache")]
use time::Duration;
/// Routes aggregator.
/// It accumulate all posible routes for CMS.
/// ## How to use
/// ```
/// extern crate iron;
/// extern crate iron_cms;
/// use iron::{Iron, Chain};
/// fn main() {
/// // Add routers
/// let mut chain = Chain::new(iron_cms::routes());
/// // Add Template renderer and views path
/// let paths = vec!["./views/"]; | /// // Start applocation and other actions
/// // Iron::new(chain).http("localhost:3000").unwrap();
/// }
/// ```
pub fn routes() -> Mount {
// Init router
let mut routes = Router::new();
// Add routes
frontend::add_routes(&mut routes);
admin::add_routes(&mut routes);
// Add static router
let mut mount = Mount::new();
mount
.mount("/", routes)
.mount("/assets/", Static::new(Path::new("static")));
// .cache(Duration::days(30)));
mount
} | /// chain.link_after(iron_cms::middleware::template_render(paths));
/// // Add error-404 handler
/// chain.link_after(iron_cms::middleware::Error404); | random_line_split |
lib.rs | //! # Iron CMS
//! CMS based on Iron Framework for **Rust**.
#[macro_use] extern crate iron;
#[macro_use] extern crate router;
#[macro_use] extern crate maplit;
#[macro_use] extern crate diesel;
extern crate handlebars_iron as hbs;
extern crate handlebars;
extern crate rustc_serialize;
extern crate staticfile;
extern crate mount;
extern crate time;
extern crate params;
extern crate iron_diesel_middleware;
extern crate r2d2;
extern crate r2d2_diesel;
extern crate regex;
/// Base middleware for CMS
pub mod middleware;
mod admin;
mod frontend;
use router::Router;
use staticfile::Static;
#[cfg(feature = "cache")]
use staticfile::Cache;
use mount::Mount;
use std::path::Path;
#[cfg(feature = "cache")]
use time::Duration;
/// Routes aggregator.
/// It accumulate all posible routes for CMS.
/// ## How to use
/// ```
/// extern crate iron;
/// extern crate iron_cms;
/// use iron::{Iron, Chain};
/// fn main() {
/// // Add routers
/// let mut chain = Chain::new(iron_cms::routes());
/// // Add Template renderer and views path
/// let paths = vec!["./views/"];
/// chain.link_after(iron_cms::middleware::template_render(paths));
/// // Add error-404 handler
/// chain.link_after(iron_cms::middleware::Error404);
/// // Start applocation and other actions
/// // Iron::new(chain).http("localhost:3000").unwrap();
/// }
/// ```
pub fn | () -> Mount {
// Init router
let mut routes = Router::new();
// Add routes
frontend::add_routes(&mut routes);
admin::add_routes(&mut routes);
// Add static router
let mut mount = Mount::new();
mount
.mount("/", routes)
.mount("/assets/", Static::new(Path::new("static")));
// .cache(Duration::days(30)));
mount
}
| routes | identifier_name |
SpellInfo.js | import SPELLS from 'common/SPELLS';
/*
* Fields:
* int: spell scales with Intellect
* crit: spell scales with (is able to or procced from) Critical Strike
* hasteHpm: spell does more healing due to Haste, e.g. HoTs that gain more ticks
* hasteHpct: spell can be cast more frequently due to Haste, basically any spell except for non haste scaling CDs
* mastery: spell is boosted by Mastery
* masteryStack: spell's HoT counts as a Mastery Stack
* vers: spell scales with Versatility
* multiplier: spell scales with whatever procs it, should be ignored for purpose of weights and for 'total healing' number
* ignored: spell should be ignored for purpose of stat weights | export default {
[SPELLS.OCEANS_EMBRACE.id]: { // Sea Star of the Depthmother
int: false,
crit: true,
hasteHpct: true, // until LoD's CD is below 8 sec, this speeds up the deck cycle time
mastery: false,
vers: true,
},
[SPELLS.GUIDING_HAND.id]: { // The Deceiver's Grand Design
int: false,
crit: true,
hasteHpct: false, // static CD
hasteHpm: true,
mastery: false,
vers: true,
},
[SPELLS.HIGHFATHERS_TIMEKEEPING_HEAL.id]: { // Highfather's Machination
int: false,
crit: true,
hasteHpct: true,
hasteHpm: false,
mastery: false,
vers: true,
},
[SPELLS.LEECH.id]: { // procs a percent of all your healing, so we ignore for weights and total healing
multiplier: true,
},
[SPELLS.VELENS_FUTURE_SIGHT_HEAL.id]: { // while active procs from any healing, so we ignore for weights and total healing
multiplier: true,
},
[SPELLS.LIGHTS_EMBRACE_HEALING.id]: {
int: false,
crit: true,
hasteHpct: false,
mastery: false,
vers: true,
},
[SPELLS.INFUSION_OF_LIGHT_HEALING.id]: {
int: false,
crit: true,
hasteHpct: false,
mastery: false,
vers: true,
},
[SPELLS.CHAOTIC_DARKNESS_HEALING.id]: {
int: false,
crit: true,
hasteHpct: false,
mastery: false,
vers: true,
},
[SPELLS.EONARS_COMPASSION_HEAL.id]: {
int: false,
crit: true,
hasteHpct: false,
mastery: false,
vers: true,
},
[SPELLS.XAVARICS_MAGNUM_OPUS.id]: { // Prydaz
int: false,
crit: false,
hasteHpct: false,
mastery: false,
vers: true,
},
[SPELLS.HEALTHSTONE.id]: {
int: false,
crit: false,
hasteHpct: false,
mastery: false,
vers: false, // not 100% sure
},
[SPELLS.MARK_OF_THE_ANCIENT_PRIESTESS.id]: {
int: false,
crit: true,
hasteHpct: false,
mastery: false,
vers: true,
},
//TODO: Add Shadowbind, it scales from the crit on the damage part
}; | */
// This only works with actual healing events; casts are not recognized. | random_line_split |
harfbuzz.rs | one glyph, and updates the y-position of the pen.
pub fn get_entry_for_glyph(&self, i: int, y_pos: &mut Au) -> ShapedGlyphEntry {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i);
let pos_info_i = self.pos_infos.offset(i);
let x_offset = Shaper::fixed_to_float((*pos_info_i).x_offset);
let y_offset = Shaper::fixed_to_float((*pos_info_i).y_offset);
let x_advance = Shaper::fixed_to_float((*pos_info_i).x_advance);
let y_advance = Shaper::fixed_to_float((*pos_info_i).y_advance);
let x_offset = Au::from_frac_px(x_offset);
let y_offset = Au::from_frac_px(y_offset);
let x_advance = Au::from_frac_px(x_advance);
let y_advance = Au::from_frac_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
hb_funcs: *mut hb_font_funcs_t,
}
#[unsafe_destructor]
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(self.hb_face.is_not_null());
hb_face_destroy(self.hb_face);
assert!(self.hb_font.is_not_null());
hb_font_destroy(self.hb_font);
assert!(self.hb_funcs.is_not_null());
hb_font_funcs_destroy(self.hb_funcs);
}
}
}
impl Shaper {
pub fn | (font: &mut Font) -> Shaper {
unsafe {
// Indirection for Rust Issue #6248, dynamic freeze scope artifically extended
let font_ptr = font as *mut Font;
let hb_face: *mut hb_face_t = hb_face_create_for_tables(get_font_table_func,
font_ptr as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = font.actual_pt_size;
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
// NB. This funcs structure could be reused globally, as it never changes.
let hb_funcs: *mut hb_font_funcs_t = hb_font_funcs_create();
hb_font_funcs_set_glyph_func(hb_funcs, glyph_func, ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(hb_funcs, glyph_h_advance_func, ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(hb_funcs, glyph_h_kerning_func, ptr::null_mut(), ptr::null_mut());
hb_font_set_funcs(hb_font, hb_funcs, font_ptr as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
hb_funcs: hb_funcs,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when rendered in a specific
/// font.
fn shape_text(&self, text: &str, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, HB_DIRECTION_LTR);
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
hb_shape(self.hb_font, hb_buffer, ptr::null_mut(), 0);
self.save_glyph_results(text, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self, text: &str, glyphs: &mut GlyphStore, buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len() as int;
let char_max = text.char_len() as int;
// GlyphStore records are indexed by character, not byte offset.
// so, we must be careful to increment this when saving glyph entries.
let mut char_idx = CharIndex(0);
assert!(glyph_count <= char_max);
debug!("Shaped text[char count={}], got back {} glyph info records.",
char_max,
glyph_count);
if char_max != glyph_count {
debug!("NOTE: Since these are not equal, we probably have been given some complex \
glyphs.");
}
// make map of what chars have glyphs
let mut byte_to_glyph: Vec<i32>;
// fast path: all chars are single-byte.
if byte_max == char_max {
byte_to_glyph = Vec::from_elem(byte_max as uint, NO_GLYPH);
} else {
byte_to_glyph = Vec::from_elem(byte_max as uint, CONTINUATION_BYTE);
for (i, _) in text.char_indices() {
*byte_to_glyph.get_mut(i) = NO_GLYPH;
}
}
debug!("(glyph idx) -> (text byte offset)");
for i in range(0, glyph_data.len()) {
// loc refers to a *byte* offset within the utf8 string.
let loc = glyph_data.byte_offset_of_glyph(i);
if loc < byte_max {
assert!(byte_to_glyph[loc as uint] != CONTINUATION_BYTE);
*byte_to_glyph.get_mut(loc as uint) = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:s}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {} --> {:d}", i, ch, *byte_to_glyph.get(i) as int);
}
// some helpers
let mut glyph_span: Range<int> = Range::empty();
// this span contains first byte of first char, to last byte of last char in range.
// so, end() points to first byte of last+1 char, if it's less than byte_max.
let mut char_byte_span: Range<int> = Range::empty();
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character assocations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.begin() < glyph_count {
// start by looking at just one glyph.
glyph_span.extend_by(1);
debug!("Processing glyph at idx={}", glyph_span.begin());
let char_byte_start = glyph_data.byte_offset_of_glyph(glyph_span.begin());
char_byte_span.reset(char_byte_start, 0);
// find a range of chars corresponding to this glyph, plus
// any trailing chars that do not have associated glyphs.
while char_byte_span.end() < byte_max {
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
debug!("Processing char byte span: off={}, len={} for glyph idx={}",
char_byte_span.begin(), char_byte_span.length(), glyph_span.begin());
while char_byte_span.end() != byte_max &&
byte_to_glyph[char_byte_span.end() as uint] == NO_GLYPH {
debug!("Extending char byte span to include byte offset={} with no associated \
glyph", char_byte_span.end());
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
}
// extend glyph range to max glyph index covered by char_span,
// in cases where one char made several glyphs and left some unassociated chars.
let mut max_glyph_idx = glyph_span.end();
for i in char_byte_span.each_index() {
if byte_to_glyph[i as uint] > NO_GLYPH {
max_glyph_idx = cmp::max(byte_to_glyph[i as uint] as int + 1, max_glyph_idx);
}
| new | identifier_name |
harfbuzz.rs | one glyph, and updates the y-position of the pen.
pub fn get_entry_for_glyph(&self, i: int, y_pos: &mut Au) -> ShapedGlyphEntry {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i);
let pos_info_i = self.pos_infos.offset(i);
let x_offset = Shaper::fixed_to_float((*pos_info_i).x_offset);
let y_offset = Shaper::fixed_to_float((*pos_info_i).y_offset);
let x_advance = Shaper::fixed_to_float((*pos_info_i).x_advance);
let y_advance = Shaper::fixed_to_float((*pos_info_i).y_advance);
let x_offset = Au::from_frac_px(x_offset);
let y_offset = Au::from_frac_px(y_offset);
let x_advance = Au::from_frac_px(x_advance);
let y_advance = Au::from_frac_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
hb_funcs: *mut hb_font_funcs_t,
}
#[unsafe_destructor]
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(self.hb_face.is_not_null());
hb_face_destroy(self.hb_face);
assert!(self.hb_font.is_not_null());
hb_font_destroy(self.hb_font);
assert!(self.hb_funcs.is_not_null());
hb_font_funcs_destroy(self.hb_funcs);
}
}
}
impl Shaper {
pub fn new(font: &mut Font) -> Shaper {
unsafe {
// Indirection for Rust Issue #6248, dynamic freeze scope artifically extended
let font_ptr = font as *mut Font;
let hb_face: *mut hb_face_t = hb_face_create_for_tables(get_font_table_func,
font_ptr as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = font.actual_pt_size;
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
// NB. This funcs structure could be reused globally, as it never changes.
let hb_funcs: *mut hb_font_funcs_t = hb_font_funcs_create();
hb_font_funcs_set_glyph_func(hb_funcs, glyph_func, ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(hb_funcs, glyph_h_advance_func, ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(hb_funcs, glyph_h_kerning_func, ptr::null_mut(), ptr::null_mut());
hb_font_set_funcs(hb_font, hb_funcs, font_ptr as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
hb_funcs: hb_funcs,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when rendered in a specific
/// font.
fn shape_text(&self, text: &str, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, HB_DIRECTION_LTR);
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
hb_shape(self.hb_font, hb_buffer, ptr::null_mut(), 0);
self.save_glyph_results(text, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self, text: &str, glyphs: &mut GlyphStore, buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len() as int;
let char_max = text.char_len() as int;
// GlyphStore records are indexed by character, not byte offset.
// so, we must be careful to increment this when saving glyph entries.
let mut char_idx = CharIndex(0);
assert!(glyph_count <= char_max);
debug!("Shaped text[char count={}], got back {} glyph info records.",
char_max,
glyph_count);
if char_max != glyph_count {
debug!("NOTE: Since these are not equal, we probably have been given some complex \
glyphs.");
}
// make map of what chars have glyphs
let mut byte_to_glyph: Vec<i32>;
// fast path: all chars are single-byte.
if byte_max == char_max {
byte_to_glyph = Vec::from_elem(byte_max as uint, NO_GLYPH);
} else {
byte_to_glyph = Vec::from_elem(byte_max as uint, CONTINUATION_BYTE);
for (i, _) in text.char_indices() {
*byte_to_glyph.get_mut(i) = NO_GLYPH;
}
}
debug!("(glyph idx) -> (text byte offset)");
for i in range(0, glyph_data.len()) {
// loc refers to a *byte* offset within the utf8 string.
let loc = glyph_data.byte_offset_of_glyph(i);
if loc < byte_max {
assert!(byte_to_glyph[loc as uint] != CONTINUATION_BYTE);
*byte_to_glyph.get_mut(loc as uint) = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:s}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {} --> {:d}", i, ch, *byte_to_glyph.get(i) as int);
}
// some helpers
let mut glyph_span: Range<int> = Range::empty();
// this span contains first byte of first char, to last byte of last char in range.
// so, end() points to first byte of last+1 char, if it's less than byte_max.
let mut char_byte_span: Range<int> = Range::empty(); | // processed.
while glyph_span.begin() < glyph_count {
// start by looking at just one glyph.
glyph_span.extend_by(1);
debug!("Processing glyph at idx={}", glyph_span.begin());
let char_byte_start = glyph_data.byte_offset_of_glyph(glyph_span.begin());
char_byte_span.reset(char_byte_start, 0);
// find a range of chars corresponding to this glyph, plus
// any trailing chars that do not have associated glyphs.
while char_byte_span.end() < byte_max {
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
debug!("Processing char byte span: off={}, len={} for glyph idx={}",
char_byte_span.begin(), char_byte_span.length(), glyph_span.begin());
while char_byte_span.end() != byte_max &&
byte_to_glyph[char_byte_span.end() as uint] == NO_GLYPH {
debug!("Extending char byte span to include byte offset={} with no associated \
glyph", char_byte_span.end());
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
}
// extend glyph range to max glyph index covered by char_span,
// in cases where one char made several glyphs and left some unassociated chars.
let mut max_glyph_idx = glyph_span.end();
for i in char_byte_span.each_index() {
if byte_to_glyph[i as uint] > NO_GLYPH {
max_glyph_idx = cmp::max(byte_to_glyph[i as uint] as int + 1, max_glyph_idx);
}
}
| let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character assocations, 2+ glyphs and 1+ chars can be | random_line_split |
harfbuzz.rs | TR);
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
hb_shape(self.hb_font, hb_buffer, ptr::null_mut(), 0);
self.save_glyph_results(text, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self, text: &str, glyphs: &mut GlyphStore, buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len() as int;
let char_max = text.char_len() as int;
// GlyphStore records are indexed by character, not byte offset.
// so, we must be careful to increment this when saving glyph entries.
let mut char_idx = CharIndex(0);
assert!(glyph_count <= char_max);
debug!("Shaped text[char count={}], got back {} glyph info records.",
char_max,
glyph_count);
if char_max != glyph_count {
debug!("NOTE: Since these are not equal, we probably have been given some complex \
glyphs.");
}
// make map of what chars have glyphs
let mut byte_to_glyph: Vec<i32>;
// fast path: all chars are single-byte.
if byte_max == char_max {
byte_to_glyph = Vec::from_elem(byte_max as uint, NO_GLYPH);
} else {
byte_to_glyph = Vec::from_elem(byte_max as uint, CONTINUATION_BYTE);
for (i, _) in text.char_indices() {
*byte_to_glyph.get_mut(i) = NO_GLYPH;
}
}
debug!("(glyph idx) -> (text byte offset)");
for i in range(0, glyph_data.len()) {
// loc refers to a *byte* offset within the utf8 string.
let loc = glyph_data.byte_offset_of_glyph(i);
if loc < byte_max {
assert!(byte_to_glyph[loc as uint] != CONTINUATION_BYTE);
*byte_to_glyph.get_mut(loc as uint) = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:s}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {} --> {:d}", i, ch, *byte_to_glyph.get(i) as int);
}
// some helpers
let mut glyph_span: Range<int> = Range::empty();
// this span contains first byte of first char, to last byte of last char in range.
// so, end() points to first byte of last+1 char, if it's less than byte_max.
let mut char_byte_span: Range<int> = Range::empty();
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character assocations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.begin() < glyph_count {
// start by looking at just one glyph.
glyph_span.extend_by(1);
debug!("Processing glyph at idx={}", glyph_span.begin());
let char_byte_start = glyph_data.byte_offset_of_glyph(glyph_span.begin());
char_byte_span.reset(char_byte_start, 0);
// find a range of chars corresponding to this glyph, plus
// any trailing chars that do not have associated glyphs.
while char_byte_span.end() < byte_max {
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
debug!("Processing char byte span: off={}, len={} for glyph idx={}",
char_byte_span.begin(), char_byte_span.length(), glyph_span.begin());
while char_byte_span.end() != byte_max &&
byte_to_glyph[char_byte_span.end() as uint] == NO_GLYPH {
debug!("Extending char byte span to include byte offset={} with no associated \
glyph", char_byte_span.end());
let range = text.char_range_at(char_byte_span.end() as uint);
drop(range.ch);
char_byte_span.extend_to(range.next as int);
}
// extend glyph range to max glyph index covered by char_span,
// in cases where one char made several glyphs and left some unassociated chars.
let mut max_glyph_idx = glyph_span.end();
for i in char_byte_span.each_index() {
if byte_to_glyph[i as uint] > NO_GLYPH {
max_glyph_idx = cmp::max(byte_to_glyph[i as uint] as int + 1, max_glyph_idx);
}
}
if max_glyph_idx > glyph_span.end() {
glyph_span.extend_to(max_glyph_idx);
debug!("Extended glyph span (off={}, len={}) to cover char byte span's max \
glyph index",
glyph_span.begin(), glyph_span.length());
}
// if there's just one glyph, then we don't need further checks.
if glyph_span.length() == 1 { break; }
// if no glyphs were found yet, extend the char byte range more.
if glyph_span.length() == 0 { continue; }
debug!("Complex (multi-glyph to multi-char) association found. This case \
probably doesn't work.");
let mut all_glyphs_are_within_cluster: bool = true;
for j in glyph_span.each_index() {
let loc = glyph_data.byte_offset_of_glyph(j);
if !char_byte_span.contains(loc) {
all_glyphs_are_within_cluster = false;
break
}
}
debug!("All glyphs within char_byte_span cluster?: {}",
all_glyphs_are_within_cluster);
// found a valid range; stop extending char_span.
if all_glyphs_are_within_cluster {
break
}
}
// character/glyph clump must contain characters.
assert!(char_byte_span.length() > 0);
// character/glyph clump must contain glyphs.
assert!(glyph_span.length() > 0);
// now char_span is a ligature clump, formed by the glyphs in glyph_span.
// we need to find the chars that correspond to actual glyphs (char_extended_span),
//and set glyph info for those and empty infos for the chars that are continuations.
// a simple example:
// chars: 'f' 't' 't'
// glyphs: 'ftt' '' ''
// cgmap: t f f
// gspan: [-]
// cspan: [-]
// covsp: [---------------]
let mut covered_byte_span = char_byte_span.clone();
// extend, clipping at end of text range.
while covered_byte_span.end() < byte_max
&& byte_to_glyph[covered_byte_span.end() as uint] == NO_GLYPH {
let range = text.char_range_at(covered_byte_span.end() as uint);
drop(range.ch);
covered_byte_span.extend_to(range.next as int);
}
if covered_byte_span.begin() >= byte_max {
// oops, out of range. clip and forget this clump.
let end = glyph_span.end(); // FIXME: borrow checker workaround
glyph_span.reset(end, 0);
let end = char_byte_span.end(); // FIXME: borrow checker workaround
char_byte_span.reset(end, 0);
}
// clamp to end of text. (I don't think this will be necessary, but..)
let end = covered_byte_span.end(); // FIXME: borrow checker workaround
covered_byte_span.extend_to(cmp::min(end, byte_max));
// fast path: 1-to-1 mapping of single char and single glyph.
if glyph_span.length() == 1 {
// TODO(Issue #214): cluster ranges need to be computed before
// shaping, and then consulted here.
// for now, just pretend that every character is a cluster start.
// (i.e., pretend there are no combining character sequences).
// 1-to-1 mapping of character to glyph also treated as ligature start.
let shape = glyph_data.get_entry_for_glyph(glyph_span.begin(), &mut y_pos);
let data = GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
false,
true,
true);
glyphs.add_glyph_for_char_index(char_idx, &data);
} else | {
// collect all glyphs to be assigned to the first character.
let mut datas = vec!();
for glyph_i in glyph_span.each_index() {
let shape = glyph_data.get_entry_for_glyph(glyph_i, &mut y_pos);
datas.push(GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
false, // not missing
true, // treat as cluster start
glyph_i > glyph_span.begin()));
// all but first are ligature continuations
}
// now add the detailed glyph entry.
glyphs.add_glyphs_for_char_index(char_idx, datas.as_slice());
// set the other chars, who have no glyphs
let mut i = covered_byte_span.begin(); | conditional_block |
|
setup.py | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = 'ZenPacks.community.DistributedCollectors'
VERSION = '1.7'
AUTHOR = 'Egor Puzanov'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.DistributedCollectors']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.5'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup( | version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
) | # This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME, | random_line_split |
dispatcher.rs | /*
Copyright 2017 Jinjing Wang
This file is part of mtcp.
mtcp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
mtcp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mtcp. If not, see <http://www.gnu.org/licenses/>.
*/
use std::sync::mpsc;
use constant::*;
use structure::socket::udp::*;
use structure::socket::tcp::*;
use structure::socket::packet::*;
pub fn dispatch
(
tun_in_receiver: TunReceiver,
udp_sender: Option<mpsc::Sender<UDP>>,
tcp_sender: Option<mpsc::Sender<TCP>>,
)
| }
}
}
_ => {}
}
}
}
// fn skip_tun_incoming(connection: Connection) -> bool {
// let tun_ip: IpAddr = IpAddr::from_str("10.0.0.1").unwrap();
// let source_ip = connection.source.ip();
// let destination_ip = connection.destination.ip();
// debug!("comparing {:#?} -> {:#?}, {:#?}", source_ip, destination_ip, tun_ip);
// (source_ip == tun_ip) || (destination_ip == tun_ip);
// false
// }
| {
while let Ok(Some(received)) = tun_in_receiver.recv() {
match parse_packet(&received) {
Some(Packet::UDP(udp)) => {
// debug!("Dispatch UDP: {:#?}", udp.connection);
match udp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(udp);
}
}
}
Some(Packet::TCP(tcp)) => {
// debug!("Dispatch TCP: {:#?}", tcp.connection);
match tcp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(tcp); | identifier_body |
dispatcher.rs | /*
Copyright 2017 Jinjing Wang
This file is part of mtcp.
mtcp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
mtcp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mtcp. If not, see <http://www.gnu.org/licenses/>.
*/
use std::sync::mpsc;
use constant::*;
use structure::socket::udp::*;
use structure::socket::tcp::*;
use structure::socket::packet::*;
pub fn |
(
tun_in_receiver: TunReceiver,
udp_sender: Option<mpsc::Sender<UDP>>,
tcp_sender: Option<mpsc::Sender<TCP>>,
)
{
while let Ok(Some(received)) = tun_in_receiver.recv() {
match parse_packet(&received) {
Some(Packet::UDP(udp)) => {
// debug!("Dispatch UDP: {:#?}", udp.connection);
match udp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(udp);
}
}
}
Some(Packet::TCP(tcp)) => {
// debug!("Dispatch TCP: {:#?}", tcp.connection);
match tcp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(tcp);
}
}
}
_ => {}
}
}
}
// fn skip_tun_incoming(connection: Connection) -> bool {
// let tun_ip: IpAddr = IpAddr::from_str("10.0.0.1").unwrap();
// let source_ip = connection.source.ip();
// let destination_ip = connection.destination.ip();
// debug!("comparing {:#?} -> {:#?}, {:#?}", source_ip, destination_ip, tun_ip);
// (source_ip == tun_ip) || (destination_ip == tun_ip);
// false
// }
| dispatch | identifier_name |
dispatcher.rs | /*
Copyright 2017 Jinjing Wang
This file is part of mtcp.
mtcp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
mtcp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mtcp. If not, see <http://www.gnu.org/licenses/>.
*/
use std::sync::mpsc;
use constant::*;
use structure::socket::udp::*;
use structure::socket::tcp::*;
use structure::socket::packet::*;
pub fn dispatch
(
tun_in_receiver: TunReceiver,
udp_sender: Option<mpsc::Sender<UDP>>,
tcp_sender: Option<mpsc::Sender<TCP>>,
)
{
while let Ok(Some(received)) = tun_in_receiver.recv() {
match parse_packet(&received) {
Some(Packet::UDP(udp)) => {
// debug!("Dispatch UDP: {:#?}", udp.connection);
match udp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(udp);
} | }
}
Some(Packet::TCP(tcp)) => {
// debug!("Dispatch TCP: {:#?}", tcp.connection);
match tcp_sender {
None => {}
Some(ref sender) => {
let _ = sender.send(tcp);
}
}
}
_ => {}
}
}
}
// fn skip_tun_incoming(connection: Connection) -> bool {
// let tun_ip: IpAddr = IpAddr::from_str("10.0.0.1").unwrap();
// let source_ip = connection.source.ip();
// let destination_ip = connection.destination.ip();
// debug!("comparing {:#?} -> {:#?}, {:#?}", source_ip, destination_ip, tun_ip);
// (source_ip == tun_ip) || (destination_ip == tun_ip);
// false
// } | random_line_split |
|
shave.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.shave = factory());
}(this, (function () { 'use strict';
function shave(target, maxHeight, opts) | if (span) {
// Remove the ellipsis to recapture the original text
el.removeChild(el.querySelector('.js-shave-char'));
el.textContent = el.textContent; // nuke span, recombine text
}
// If already short enough, we're done
if (el.offsetHeight < maxHeight) continue;
var fullText = el.textContent;
var words = spaces ? fullText.split(' ') : fullText;
// If 0 or 1 words, we're done
if (words.length < 2) continue;
// Binary search for number of words which can fit in allotted height
var max = words.length - 1;
var min = 0;
var pivot = void 0;
while (min < max) {
pivot = min + max + 1 >> 1;
el.textContent = spaces ? words.slice(0, pivot).join(' ') : words.slice(0, pivot);
el.insertAdjacentHTML('beforeend', charHtml);
if (el.offsetHeight > maxHeight) max = spaces ? pivot - 1 : pivot - 2;else min = pivot;
}
el.textContent = spaces ? words.slice(0, max).join(' ') : words.slice(0, max);
el.insertAdjacentHTML('beforeend', charHtml);
var diff = spaces ? words.slice(max + 1).join(' ') : words.slice(max);
el.insertAdjacentHTML('beforeend', '<span class="' + classname + '" style="display:none;">' + diff + '</span>');
}
}
var plugin = window.$ || window.jQuery || window.Zepto;
if (plugin) {
plugin.fn.extend({
shave: function shaveFunc(maxHeight, opts) {
return shave(this, maxHeight, opts);
}
});
}
return shave;
}))); | {
if (!maxHeight) throw Error('maxHeight is required');
var els = typeof target === 'string' ? document.querySelectorAll(target) : target;
if (!('length' in els)) els = [els];
var defaults = {
character: 'β¦',
classname: 'js-shave',
spaces: true
};
var character = opts && opts.character || defaults.character;
var classname = opts && opts.classname || defaults.classname;
var spaces = opts && opts.spaces === false ? false : defaults.spaces;
var charHtml = '<span class="js-shave-char">' + character + '</span>';
for (var i = 0; i < els.length; i++) {
var el = els[i];
var span = el.querySelector('.' + classname);
// If element text has already been shaved | identifier_body |
shave.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.shave = factory());
}(this, (function () { 'use strict';
function | (target, maxHeight, opts) {
if (!maxHeight) throw Error('maxHeight is required');
var els = typeof target === 'string' ? document.querySelectorAll(target) : target;
if (!('length' in els)) els = [els];
var defaults = {
character: 'β¦',
classname: 'js-shave',
spaces: true
};
var character = opts && opts.character || defaults.character;
var classname = opts && opts.classname || defaults.classname;
var spaces = opts && opts.spaces === false ? false : defaults.spaces;
var charHtml = '<span class="js-shave-char">' + character + '</span>';
for (var i = 0; i < els.length; i++) {
var el = els[i];
var span = el.querySelector('.' + classname);
// If element text has already been shaved
if (span) {
// Remove the ellipsis to recapture the original text
el.removeChild(el.querySelector('.js-shave-char'));
el.textContent = el.textContent; // nuke span, recombine text
}
// If already short enough, we're done
if (el.offsetHeight < maxHeight) continue;
var fullText = el.textContent;
var words = spaces ? fullText.split(' ') : fullText;
// If 0 or 1 words, we're done
if (words.length < 2) continue;
// Binary search for number of words which can fit in allotted height
var max = words.length - 1;
var min = 0;
var pivot = void 0;
while (min < max) {
pivot = min + max + 1 >> 1;
el.textContent = spaces ? words.slice(0, pivot).join(' ') : words.slice(0, pivot);
el.insertAdjacentHTML('beforeend', charHtml);
if (el.offsetHeight > maxHeight) max = spaces ? pivot - 1 : pivot - 2;else min = pivot;
}
el.textContent = spaces ? words.slice(0, max).join(' ') : words.slice(0, max);
el.insertAdjacentHTML('beforeend', charHtml);
var diff = spaces ? words.slice(max + 1).join(' ') : words.slice(max);
el.insertAdjacentHTML('beforeend', '<span class="' + classname + '" style="display:none;">' + diff + '</span>');
}
}
var plugin = window.$ || window.jQuery || window.Zepto;
if (plugin) {
plugin.fn.extend({
shave: function shaveFunc(maxHeight, opts) {
return shave(this, maxHeight, opts);
}
});
}
return shave;
}))); | shave | identifier_name |
shave.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.shave = factory());
}(this, (function () { 'use strict';
function shave(target, maxHeight, opts) {
if (!maxHeight) throw Error('maxHeight is required');
var els = typeof target === 'string' ? document.querySelectorAll(target) : target;
if (!('length' in els)) els = [els];
var defaults = {
character: 'β¦',
classname: 'js-shave',
spaces: true
}; |
for (var i = 0; i < els.length; i++) {
var el = els[i];
var span = el.querySelector('.' + classname);
// If element text has already been shaved
if (span) {
// Remove the ellipsis to recapture the original text
el.removeChild(el.querySelector('.js-shave-char'));
el.textContent = el.textContent; // nuke span, recombine text
}
// If already short enough, we're done
if (el.offsetHeight < maxHeight) continue;
var fullText = el.textContent;
var words = spaces ? fullText.split(' ') : fullText;
// If 0 or 1 words, we're done
if (words.length < 2) continue;
// Binary search for number of words which can fit in allotted height
var max = words.length - 1;
var min = 0;
var pivot = void 0;
while (min < max) {
pivot = min + max + 1 >> 1;
el.textContent = spaces ? words.slice(0, pivot).join(' ') : words.slice(0, pivot);
el.insertAdjacentHTML('beforeend', charHtml);
if (el.offsetHeight > maxHeight) max = spaces ? pivot - 1 : pivot - 2;else min = pivot;
}
el.textContent = spaces ? words.slice(0, max).join(' ') : words.slice(0, max);
el.insertAdjacentHTML('beforeend', charHtml);
var diff = spaces ? words.slice(max + 1).join(' ') : words.slice(max);
el.insertAdjacentHTML('beforeend', '<span class="' + classname + '" style="display:none;">' + diff + '</span>');
}
}
var plugin = window.$ || window.jQuery || window.Zepto;
if (plugin) {
plugin.fn.extend({
shave: function shaveFunc(maxHeight, opts) {
return shave(this, maxHeight, opts);
}
});
}
return shave;
}))); | var character = opts && opts.character || defaults.character;
var classname = opts && opts.classname || defaults.classname;
var spaces = opts && opts.spaces === false ? false : defaults.spaces;
var charHtml = '<span class="js-shave-char">' + character + '</span>'; | random_line_split |
banner.ts | import { readFile, writeFile } from "fs-extra";
import log from "loglevel";
import { BuiltInParserName } from "prettier";
import { CopyConfig, COPY_BANNER } from "../../constants";
import { format } from "../format";
/**
* Copies a file with a banner showing it should not be manually updated.
*/
export async function copyFileWithBanner(
src: string,
dest: string,
parser?: BuiltInParserName
): Promise<void> {
const contents = await readFile(src, "utf8");
return writeFile(dest, format(`${COPY_BANNER}${contents}`, parser)).catch(
(e) => {
log.error(e);
process.exit(1);
}
);
}
export async function | (
files: readonly CopyConfig[]
): Promise<void> {
await Promise.all(
files.map(({ src, dest }) =>
copyFileWithBanner(src, dest).catch((e) => {
log.error(e);
process.exit(1);
})
)
);
}
| copyFilesWithBanner | identifier_name |
banner.ts | import { readFile, writeFile } from "fs-extra";
import log from "loglevel";
import { BuiltInParserName } from "prettier";
import { CopyConfig, COPY_BANNER } from "../../constants";
import { format } from "../format";
/**
* Copies a file with a banner showing it should not be manually updated.
*/
export async function copyFileWithBanner(
src: string,
dest: string,
parser?: BuiltInParserName | (e) => {
log.error(e);
process.exit(1);
}
);
}
export async function copyFilesWithBanner(
files: readonly CopyConfig[]
): Promise<void> {
await Promise.all(
files.map(({ src, dest }) =>
copyFileWithBanner(src, dest).catch((e) => {
log.error(e);
process.exit(1);
})
)
);
} | ): Promise<void> {
const contents = await readFile(src, "utf8");
return writeFile(dest, format(`${COPY_BANNER}${contents}`, parser)).catch( | random_line_split |
banner.ts | import { readFile, writeFile } from "fs-extra";
import log from "loglevel";
import { BuiltInParserName } from "prettier";
import { CopyConfig, COPY_BANNER } from "../../constants";
import { format } from "../format";
/**
* Copies a file with a banner showing it should not be manually updated.
*/
export async function copyFileWithBanner(
src: string,
dest: string,
parser?: BuiltInParserName
): Promise<void> {
const contents = await readFile(src, "utf8");
return writeFile(dest, format(`${COPY_BANNER}${contents}`, parser)).catch(
(e) => {
log.error(e);
process.exit(1);
}
);
}
export async function copyFilesWithBanner(
files: readonly CopyConfig[]
): Promise<void> | {
await Promise.all(
files.map(({ src, dest }) =>
copyFileWithBanner(src, dest).catch((e) => {
log.error(e);
process.exit(1);
})
)
);
} | identifier_body |
|
networks.py | )
for r in self._start_resolutions
])
def scale_factor(self, block_id):
"""Returns the scale factor for network block `block_id`."""
if block_id < 1 or block_id > self._num_resolutions:
raise ValueError('`block_id` must be in [1, {}]'.format(
self._num_resolutions))
return self._scale_base**(self._num_resolutions - block_id)
| return 'progressive_gan_block_{}'.format(block_id)
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks):
"""Returns the minimum total number of images.
Computes the minimum total number of images required to reach the desired
`resolution`.
Args:
stable_stage_num_images: Number of images in the stable stage.
transition_stage_num_images: Number of images in the transition stage.
num_blocks: Number of network blocks.
Returns:
An integer of the minimum total number of images.
"""
return (num_blocks * stable_stage_num_images +
(num_blocks - 1) * transition_stage_num_images)
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.cast(
tf.math.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images,
dtype=tf.float32) /
tf.cast(transition_stage_num_images, dtype=tf.float32))
return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction
def _generator_alpha(block_id, progress):
"""Returns the block output parameter for the generator network.
The generator has N blocks with `block_id` = 1,2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1)
x = layers.pixel_norm(x)
# Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
# with zeros | def block_name(block_id):
"""Returns the scope name for the network block `block_id`.""" | random_line_split |
networks.py | )
for r in self._start_resolutions
])
def scale_factor(self, block_id):
"""Returns the scale factor for network block `block_id`."""
if block_id < 1 or block_id > self._num_resolutions:
|
return self._scale_base**(self._num_resolutions - block_id)
def block_name(block_id):
"""Returns the scope name for the network block `block_id`."""
return 'progressive_gan_block_{}'.format(block_id)
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks):
"""Returns the minimum total number of images.
Computes the minimum total number of images required to reach the desired
`resolution`.
Args:
stable_stage_num_images: Number of images in the stable stage.
transition_stage_num_images: Number of images in the transition stage.
num_blocks: Number of network blocks.
Returns:
An integer of the minimum total number of images.
"""
return (num_blocks * stable_stage_num_images +
(num_blocks - 1) * transition_stage_num_images)
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.cast(
tf.math.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images,
dtype=tf.float32) /
tf.cast(transition_stage_num_images, dtype=tf.float32))
return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction
def _generator_alpha(block_id, progress):
"""Returns the block output parameter for the generator network.
The generator has N blocks with `block_id` = 1,2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1)
x = layers.pixel_norm(x)
# Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
# with | raise ValueError('`block_id` must be in [1, {}]'.format(
self._num_resolutions)) | conditional_block |
networks.py | 2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1)
x = layers.pixel_norm(x)
# Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
# with zeros for the next conv.
x = tf.pad(
tensor=x,
paddings=[[0] * 2, [start_h - 1] * 2, [start_w - 1] * 2, [0] * 2])
# The output is start_h x start_w x num_filters_fn(1).
x = _conv2d('conv0', x, (start_h, start_w), num_filters_fn(1), 'VALID')
x = _conv2d('conv1', x, kernel_size, num_filters_fn(1))
lods = [x]
for block_id in range(2, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
x = layers.upscale(x, resolution_schedule.scale_base)
x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id))
lods.append(x)
outputs = []
for block_id in range(1, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
lod = _to_rgb(lods[block_id - 1])
scale = resolution_schedule.scale_factor(block_id)
lod = layers.upscale(lod, scale)
end_points['upscaled_rgb_{}'.format(block_id)] = lod
# alpha_i is used to replace lod_select. Note sum(alpha_i) is
# garanteed to be 1.
alpha = _generator_alpha(block_id, progress)
end_points['alpha_{}'.format(block_id)] = alpha
outputs.append(lod * alpha)
predictions = tf.add_n(outputs)
batch_size = tf.compat.dimension_value(z.shape[0])
predictions.set_shape([batch_size, final_h, final_w, colors])
end_points['predictions'] = predictions
return predictions, end_points
def discriminator(x,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
scope='progressive_gan_discriminator',
reuse=None):
"""Discriminator network for the progressive GAN model.
Args:
x: A `Tensor`of NHWC format representing images of size `resolution`.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=tf.nn.leaky_relu,
he_initializer_slope=0.0,
scope=scope)
def | _from_rgb | identifier_name |
|
networks.py |
@property
def final_resolutions(self):
"""Returns the final resolutions."""
return tuple([
r * self._scale_base**(self._num_resolutions - 1)
for r in self._start_resolutions
])
def scale_factor(self, block_id):
"""Returns the scale factor for network block `block_id`."""
if block_id < 1 or block_id > self._num_resolutions:
raise ValueError('`block_id` must be in [1, {}]'.format(
self._num_resolutions))
return self._scale_base**(self._num_resolutions - block_id)
def block_name(block_id):
"""Returns the scope name for the network block `block_id`."""
return 'progressive_gan_block_{}'.format(block_id)
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks):
"""Returns the minimum total number of images.
Computes the minimum total number of images required to reach the desired
`resolution`.
Args:
stable_stage_num_images: Number of images in the stable stage.
transition_stage_num_images: Number of images in the transition stage.
num_blocks: Number of network blocks.
Returns:
An integer of the minimum total number of images.
"""
return (num_blocks * stable_stage_num_images +
(num_blocks - 1) * transition_stage_num_images)
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.cast(
tf.math.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images,
dtype=tf.float32) /
tf.cast(transition_stage_num_images, dtype=tf.float32))
return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction
def _generator_alpha(block_id, progress):
"""Returns the block output parameter for the generator network.
The generator has N blocks with `block_id` = 1,2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1 | return self._num_resolutions | identifier_body |
|
geopolygon.js | /**
* Pimcore
*
| * http://www.pimcore.org/license
*
* @copyright Copyright (c) 2009-2010 elements.at New Media Solutions GmbH (http://www.elements.at)
* @license http://www.pimcore.org/license New BSD License
*/
pimcore.registerNS('pimcore.object.classes.data.geopolygon');
pimcore.object.classes.data.geopolygon = Class.create(pimcore.object.classes.data.geo.abstract, {
type: 'geopolygon',
initialize: function (treeNode, initData) {
this.type = 'geopolygon';
this.initData(initData);
// overwrite default settings
this.availableSettingsFields = ['name','title','noteditable','invisible','style'];
this.treeNode = treeNode;
},
getTypeName: function () {
return t('geopolygon');
},
getGroup: function () {
return 'geo';
},
getIconClass: function () {
return 'pimcore_icon_geopolygon';
}
}); | * LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
| random_line_split |
test-stream-writable-samecb-singletick.js | 'use strict';
const common = require('../common');
const { Console } = require('console');
const { Writable } = require('stream');
const async_hooks = require('async_hooks');
// Make sure that repeated calls to console.log(), and by extension
// stream.write() for the underlying stream, allocate exactly 1 tick object.
// At the time of writing, that is enough to ensure a flat memory profile
// from repeated console.log() calls, rather than having callbacks pile up
// over time, assuming that data can be written synchronously.
// Refs: https://github.com/nodejs/node/issues/18013
// Refs: https://github.com/nodejs/node/issues/18367
const checkTickCreated = common.mustCall();
async_hooks.createHook({
init(id, type, triggerId, resoure) {
if (type === 'TickObject') checkTickCreated();
}
}).enable();
const console = new Console(new Writable({
write: common.mustCall((chunk, encoding, cb) => {
cb();
}, 100) | }));
for (let i = 0; i < 100; i++)
console.log(i); | random_line_split |
|
test-stream-writable-samecb-singletick.js | 'use strict';
const common = require('../common');
const { Console } = require('console');
const { Writable } = require('stream');
const async_hooks = require('async_hooks');
// Make sure that repeated calls to console.log(), and by extension
// stream.write() for the underlying stream, allocate exactly 1 tick object.
// At the time of writing, that is enough to ensure a flat memory profile
// from repeated console.log() calls, rather than having callbacks pile up
// over time, assuming that data can be written synchronously.
// Refs: https://github.com/nodejs/node/issues/18013
// Refs: https://github.com/nodejs/node/issues/18367
const checkTickCreated = common.mustCall();
async_hooks.createHook({
init(id, type, triggerId, resoure) |
}).enable();
const console = new Console(new Writable({
write: common.mustCall((chunk, encoding, cb) => {
cb();
}, 100)
}));
for (let i = 0; i < 100; i++)
console.log(i);
| {
if (type === 'TickObject') checkTickCreated();
} | identifier_body |
test-stream-writable-samecb-singletick.js | 'use strict';
const common = require('../common');
const { Console } = require('console');
const { Writable } = require('stream');
const async_hooks = require('async_hooks');
// Make sure that repeated calls to console.log(), and by extension
// stream.write() for the underlying stream, allocate exactly 1 tick object.
// At the time of writing, that is enough to ensure a flat memory profile
// from repeated console.log() calls, rather than having callbacks pile up
// over time, assuming that data can be written synchronously.
// Refs: https://github.com/nodejs/node/issues/18013
// Refs: https://github.com/nodejs/node/issues/18367
const checkTickCreated = common.mustCall();
async_hooks.createHook({
| (id, type, triggerId, resoure) {
if (type === 'TickObject') checkTickCreated();
}
}).enable();
const console = new Console(new Writable({
write: common.mustCall((chunk, encoding, cb) => {
cb();
}, 100)
}));
for (let i = 0; i < 100; i++)
console.log(i);
| init | identifier_name |
__init__.py | import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
|
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
| col.set_report_values(**column_config) | conditional_block |
__init__.py | import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug): | return True | random_line_split |
|
__init__.py | import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
| if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
| section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug) | identifier_body |
__init__.py | import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def | (cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
| get_url | identifier_name |
call-cache.ts | import { Call } from '@grpc/grpc-js';
// A call can also emit 'metadata' and 'status' events
let _calls: Record<string, Call> = {};
const activeCount = () => Object.keys(_calls).length;
const get = (requestId: string): Call | undefined => {
const call: Call = _calls[requestId];
if (!call) {
console.log(`[gRPC] client call for req=${requestId} not found`);
}
return call;
};
const set = (requestId: string, call: Call): void => {
_calls[requestId] = call;
};
const _tryCloseChannel = (requestId: string) => {
// @ts-expect-error -- TSCONVERSION channel not found in call
const channel = get(requestId)?.call?.call.channel; | channel.close();
} else {
console.log(`[gRPC] failed to close channel for req=${requestId} because it was not found`);
}
};
const clear = (requestId: string): void => {
_tryCloseChannel(requestId);
delete _calls[requestId];
};
const reset = (): void => {
_calls = {};
};
const callCache = {
activeCount,
get,
set,
clear,
reset,
};
export default callCache; |
if (channel) { | random_line_split |
call-cache.ts | import { Call } from '@grpc/grpc-js';
// A call can also emit 'metadata' and 'status' events
let _calls: Record<string, Call> = {};
const activeCount = () => Object.keys(_calls).length;
const get = (requestId: string): Call | undefined => {
const call: Call = _calls[requestId];
if (!call) |
return call;
};
const set = (requestId: string, call: Call): void => {
_calls[requestId] = call;
};
const _tryCloseChannel = (requestId: string) => {
// @ts-expect-error -- TSCONVERSION channel not found in call
const channel = get(requestId)?.call?.call.channel;
if (channel) {
channel.close();
} else {
console.log(`[gRPC] failed to close channel for req=${requestId} because it was not found`);
}
};
const clear = (requestId: string): void => {
_tryCloseChannel(requestId);
delete _calls[requestId];
};
const reset = (): void => {
_calls = {};
};
const callCache = {
activeCount,
get,
set,
clear,
reset,
};
export default callCache;
| {
console.log(`[gRPC] client call for req=${requestId} not found`);
} | conditional_block |
replace_wrapped.js | // Expects to be preceeded by javascript that creates a variable called selectRange that
// defines the segment to be wrapped and replaced.
// create custom range object for wrapSelection
var replaceRange = $.fn.range;
replaceRange.ClearVariables();
replaceRange.startContainer = selectRange.startContainer;
replaceRange.startOffset = selectRange.startOffset;
replaceRange.endContainer = selectRange.endContainer;
replaceRange.endOffset = selectRange.endOffset;
replaceRange.collapsed = selectRange.collapsed;
// Wrap the text to be replaced in a set of custom spans.
// This is done so we can operate on this text even if it extends over different
// inline tags.
var selectMarker = 'SigilReplace_' + new Date().getTime();
$().wrapSelection({fitToWord: false, selectClass: selectMarker, wrapRange: replaceRange});
// First, store the old contents so they can be undone, and then | if(n==0){
$(this).data('undo', $(this).html());
$(this).html("$ESCAPED_TEXT_HERE");
}
else {
$(this).data('undo', $(this).html());
// Assign an id so that this element isn't automatically deleted.
$(this).attr("id",selectMarker+n);
$(this).html('');
}
});
// We need to normalize the text nodes since they're screwed up now
selectRange.startContainer.parentNode.normalize();
// Set the cursor to point to the end of the replaced text.
selectRange.collapse( false );
var selection = window.getSelection();
selection.removeAllRanges();
selection.addRange(selectRange);
//Scroll to the cursor
var from_top = window.innerHeight / 2;
$.scrollTo( selectRange.startContainer, 0, {offset: {top:-from_top, left:0 } } );
// Return the unique class name used to identify these elements so the change can be undone.
selectMarker.valueOf(); | // insert the new text in the first element of the wrapped range and clear the rest.
$('.'+selectMarker).each(function(n) { | random_line_split |
replace_wrapped.js | // Expects to be preceeded by javascript that creates a variable called selectRange that
// defines the segment to be wrapped and replaced.
// create custom range object for wrapSelection
var replaceRange = $.fn.range;
replaceRange.ClearVariables();
replaceRange.startContainer = selectRange.startContainer;
replaceRange.startOffset = selectRange.startOffset;
replaceRange.endContainer = selectRange.endContainer;
replaceRange.endOffset = selectRange.endOffset;
replaceRange.collapsed = selectRange.collapsed;
// Wrap the text to be replaced in a set of custom spans.
// This is done so we can operate on this text even if it extends over different
// inline tags.
var selectMarker = 'SigilReplace_' + new Date().getTime();
$().wrapSelection({fitToWord: false, selectClass: selectMarker, wrapRange: replaceRange});
// First, store the old contents so they can be undone, and then
// insert the new text in the first element of the wrapped range and clear the rest.
$('.'+selectMarker).each(function(n) {
if(n==0){
$(this).data('undo', $(this).html());
$(this).html("$ESCAPED_TEXT_HERE");
}
else |
});
// We need to normalize the text nodes since they're screwed up now
selectRange.startContainer.parentNode.normalize();
// Set the cursor to point to the end of the replaced text.
selectRange.collapse( false );
var selection = window.getSelection();
selection.removeAllRanges();
selection.addRange(selectRange);
//Scroll to the cursor
var from_top = window.innerHeight / 2;
$.scrollTo( selectRange.startContainer, 0, {offset: {top:-from_top, left:0 } } );
// Return the unique class name used to identify these elements so the change can be undone.
selectMarker.valueOf();
| {
$(this).data('undo', $(this).html());
// Assign an id so that this element isn't automatically deleted.
$(this).attr("id",selectMarker+n);
$(this).html('');
} | conditional_block |
index.d.ts | // Type definitions for babel-core 6.25
// Project: https://github.com/babel/babel/tree/master/packages/babel-core
// Definitions by: Troy Gerwien <https://github.com/yortus>
// Marvin Hagemeister <https://github.com/marvinhagemeister>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.8
import * as t from 'babel-types';
export { t as types };
export type Node = t.Node;
export import template = require('babel-template');
export const version: string;
import traverse, { Visitor, NodePath } from "babel-traverse";
export { traverse, Visitor };
import { BabylonOptions } from "babylon";
export { BabylonOptions };
import { GeneratorOptions } from "babel-generator";
export { GeneratorOptions };
// A babel plugin is a simple function which must return an object matching
// the following interface. Babel will throw if it finds unknown properties.
// The list of allowed plugin keys is here:
// https://github.com/babel/babel/blob/4e50b2d9d9c376cee7a2cbf56553fe5b982ea53c/packages/babel-core/src/config/option-manager.js#L71
export interface PluginObj<S = {}> {
name?: string;
manipulateOptions?(opts: any, parserOpts: any): void;
pre?(this: S, state: any): void;
visitor: Visitor<S>;
post?(this: S, state: any): void;
inherits?: any;
}
/** Transforms the passed in `code`. Returning an object with the generated code, source map, and AST. */
export function transform(code: string, opts?: TransformOptions): BabelFileResult;
/** Asynchronously transforms the entire contents of a file. */
export function transformFile(filename: string, opts: TransformOptions, callback: (err: any, result: BabelFileResult) => void): void;
/** Synchronous version of `babel.transformFile`. Returns the transformed contents of the `filename`. */
export function transformFileSync(filename: string, opts?: TransformOptions): BabelFileResult;
export function transformFromAst(ast: Node, code?: string, opts?: TransformOptions): BabelFileResult;
export interface TransformOptions {
/** Include the AST in the returned object. Default: `true`. */
ast?: boolean;
/** Attach a comment after all non-user injected code. */
auxiliaryCommentAfter?: string;
/** Attach a comment before all non-user injected code. */
auxiliaryCommentBefore?: string;
/** Specify whether or not to use `.babelrc` and `.babelignore` files. Default: `true`. */
babelrc?: boolean;
/** Enable code generation. Default: `true`. */
code?: boolean;
/** write comments to generated output. Default: `true`. */
comments?: boolean;
/**
* Do not include superfluous whitespace characters and line terminators. When set to `"auto"`, `compact` is set to
* `true` on input sizes of >100KB.
*/
compact?: boolean | "auto";
/**
* This is an object of keys that represent different environments. For example, you may have:
* `{ env: { production: { / * specific options * / } } }`
* which will use those options when the enviroment variable `BABEL_ENV` is set to `"production"`.
* If `BABEL_ENV` isn't set then `NODE_ENV` will be used, if it's not set then it defaults to `"development"`.
*/
env?: object;
/** A path to an .babelrc file to extend. */
extends?: string;
/** Filename to use when reading from stdin - this will be used in source-maps, errors etc. Default: "unknown". */ | filename?: string;
/** Filename relative to `sourceRoot`. */
filenameRelative?: string;
/** An object containing the options to be passed down to the babel code generator, babel-generator. Default: `{}` */
generatorOpts?: GeneratorOptions;
/**
* Specify a custom callback to generate a module id with. Called as `getModuleId(moduleName)`.
* If falsy value is returned then the generated module id is used.
*/
getModuleId?(moduleName: string): string;
/** Enable/disable ANSI syntax highlighting of code frames. Default: `true`. */
highlightCode?: boolean;
/** list of glob paths to **not** compile. Opposite to the `only` option. */
ignore?: string[];
/** A source map object that the output source map will be based on. */
inputSourceMap?: object;
/** Should the output be minified. Default: `false` */
minified?: boolean;
/** Specify a custom name for module ids. */
moduleId?: string;
/**
* If truthy, insert an explicit id for modules. By default, all modules are anonymous.
* (Not available for `common` modules).
*/
moduleIds?: boolean;
/** Optional prefix for the AMD module formatter that will be prepend to the filename on module definitions. */
moduleRoot?: string;
/**
* A glob, regex, or mixed array of both, matching paths to only compile. Can also be an array of arrays containing
* paths to explicitly match. When attempting to compile a non-matching file it's returned verbatim.
*/
only?: string | RegExp | Array<string | RegExp>;
/** Babylon parser options. */
parserOpts?: BabylonOptions;
/** List of plugins to load and use. */
plugins?: any[];
/** List of presets (a set of plugins) to load and use. */
presets?: any[];
/** Retain line numbers - will result in really ugly code. Default: `false` */
retainLines?: boolean;
/** Resolve a module source ie. import "SOURCE"; to a custom value. */
resolveModuleSource?(source: string, filename: string): string;
/**
* An optional callback that controls whether a comment should be output or not. Called as
* `shouldPrintComment(commentContents)`. **NOTE**: This overrides the `comments` option when used.
*/
shouldPrintComment?(comment: string): boolean;
/** Set `sources[0]` on returned source map. */
sourceFileName?: string;
/**
* If truthy, adds a `map` property to returned output. If set to `"inline"`, a comment with a `sourceMappingURL`
* directive is added to the bottom of the returned code. If set to `"both"` then a map property is returned as well
* as a source map comment appended.
*/
sourceMaps?: boolean | "inline" | "both";
/** Set `file` on returned source map. */
sourceMapTarget?: string;
/** The root from which all sources are relative. */
sourceRoot?: string;
/** Indicate the mode the code should be parsed in. Can be either βscriptβ or βmoduleβ. Default: "module" */
sourceType?: "script" | "module";
/**
* An optional callback that can be used to wrap visitor methods.
* NOTE: This is useful for things like introspection, and not really needed for implementing anything.
*/
wrapPluginVisitorMethod?(pluginAlias: string, visitorType: 'enter' | 'exit', callback: (path: NodePath, state: any) => void): (path: NodePath, state: any) => void ;
}
export interface BabelFileModulesMetadata {
imports: object[];
exports: {
exported: object[],
specifiers: object[]
};
}
export interface BabelFileMetadata {
usedHelpers: string[];
marked: Array<{
type: string;
message: string;
loc: object;
}>;
modules: BabelFileModulesMetadata;
}
export interface BabelFileResult {
ast?: Node;
code?: string;
ignored?: boolean;
map?: object;
metadata?: BabelFileMetadata;
}
export as namespace babel; | random_line_split |
|
labelrenderer.ts | import { CSS2DRenderer } from "./css2drenderer";
import { CSS2DObject } from "./css2dobject";
export class LabelRenderer extends CSS2DRenderer {
renderer;
private _root: THREE.Group;
constructor(public element: HTMLElement) |
create() {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
this.domElement.style.position = "absolute";
this.domElement.style.top = "0";
this.domElement.style.pointerEvents = "none";
this.element.appendChild(this.domElement);
window.addEventListener("resize", () => {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
}, false );
}
add(text: string, position: THREE.Vector3): void {
let target = document.createElement("div");
target.className = "label-renderer";
target.style.color = "rgb(255,255,255)";
target.textContent = text;
let label = new CSS2DObject(target);
label.position.copy(position);
this._root.add(label);
}
get labels() {
return this._root;
}
} | {
super();
this._root = new THREE.Group();
} | identifier_body |
labelrenderer.ts | import { CSS2DRenderer } from "./css2drenderer";
import { CSS2DObject } from "./css2dobject";
export class LabelRenderer extends CSS2DRenderer {
renderer;
private _root: THREE.Group;
| (public element: HTMLElement) {
super();
this._root = new THREE.Group();
}
create() {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
this.domElement.style.position = "absolute";
this.domElement.style.top = "0";
this.domElement.style.pointerEvents = "none";
this.element.appendChild(this.domElement);
window.addEventListener("resize", () => {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
}, false );
}
add(text: string, position: THREE.Vector3): void {
let target = document.createElement("div");
target.className = "label-renderer";
target.style.color = "rgb(255,255,255)";
target.textContent = text;
let label = new CSS2DObject(target);
label.position.copy(position);
this._root.add(label);
}
get labels() {
return this._root;
}
} | constructor | identifier_name |
labelrenderer.ts | import { CSS2DRenderer } from "./css2drenderer";
import { CSS2DObject } from "./css2dobject";
| constructor(public element: HTMLElement) {
super();
this._root = new THREE.Group();
}
create() {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
this.domElement.style.position = "absolute";
this.domElement.style.top = "0";
this.domElement.style.pointerEvents = "none";
this.element.appendChild(this.domElement);
window.addEventListener("resize", () => {
let bounds = this.element.getBoundingClientRect();
this.setSize(bounds.width, bounds.height);
}, false );
}
add(text: string, position: THREE.Vector3): void {
let target = document.createElement("div");
target.className = "label-renderer";
target.style.color = "rgb(255,255,255)";
target.textContent = text;
let label = new CSS2DObject(target);
label.position.copy(position);
this._root.add(label);
}
get labels() {
return this._root;
}
} | export class LabelRenderer extends CSS2DRenderer {
renderer;
private _root: THREE.Group;
| random_line_split |
_guajacum.py | # -*- Mode:Python -*-
##########################################################################
# #
# Guacamole Tree printer #
# #
# Copyright 2014 Janek Bevendorff #
# VR Systems Group Bauhaus University Weimar #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
# USAGE: #
# For integrating the tree printer into your project, just import #
# this module. It will automatically monkey patch itself #
# into your scenegraph and tree node objects. Each of those objects #
# will be extended by a print_tree() and print_fields() method. #
# For a list of possible parameters read the pyDoc block of #
# GuajacumTreePrinter.printTree(). #
# #
##########################################################################
import re
import sys
import avango.gua
class GuajacumTreePrinter():
"""
Recursively print the scene graph or subtrees of certain nodes.
This class will be hooked into avango.gua._gua.SceneGraph and
avango.gua._gua.Node to provide a printTree() method for
SceneGraph and Node objects.
"""
def __init__(self, graph):
self._root = graph
def printTree(self, args):
| if i not in self._treeOpts:
print(self._colorize('error', "Invalid argument '" + i + "'"),
file=sys.stderr)
return
joined_args = dict(list(self._treeOpts.items()) + list(args.items()))
_root = self._root
if hasattr(self._root, 'Root'):
_root = self._root.Root.value
elif hasattr(self._root, 'Children'):
_root = self._root
else:
raise Exception(
"Invalid tree structure, missing attributes 'Root' or 'Children'")
self.__printRecursively(_root, 0, joined_args)
def __printRecursively(self, node, cur_depth, args,
cur_path=[],
is_grouped=False):
# return if current node name matches user-specified exclude pattern
if None != args['exclude_pattern'] and re.search(
args['exclude_pattern'], node.Name.value):
return
# push current basename to path stack
cur_path.append(node.Name.value)
obj_name = str(node)
# remove memory address from string representation if not needed
if not args['print_memory_addr']:
obj_name = re.sub(' object at 0x[0-9a-zA-Z]+>$', '>', obj_name)
print(self._indent(
cur_depth, 'Name: %s%s Obj: %s%s%s' %
(self._colorize('important', '"' + node.Name.value + '"'),
self._colorize('bold', ' (Group)')
if is_grouped else '', self._colorize('important', obj_name, ),
' Path: "' + '/'.join(cur_path).replace('//', '/', 1) + '"'
if args['print_full_path'] else '', ' Depth: ' + str(cur_depth)
if args['print_depth'] else '')))
if (args['print_field_values'] or args['print_field_names']
) and node.get_num_fields():
print(self._indent(cur_depth + 1, self._colorize('bold',
'Fields:')))
num_fields = node.get_num_fields()
for i in range(num_fields):
if args['print_field_values']:
print(self._indent(cur_depth + 2, '%s: %s = %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__,
str(node.get_field(i).value))))
else:
print(self._indent(cur_depth + 2, '%s: %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__)))
# if it's a leaf or max_depth is reached, pop current level from path stack and abort recursion
if 0 == len(node.Children.value) or cur_depth == args['max_depth']:
if len(node.Children.value):
print(self._indent(cur_depth + 1, self._colorize(
'bold', 'Node has children...')))
cur_path.pop()
return
counter = 0
used_name_count = 0
for i in node.Children.value:
# group by names if option 'group_by_name' is set
name_matches = False
if None != args['group_by_name'] and re.search(
args['group_by_name'], i.Name.value):
name_matches = True
used_name_count += 1
if 1 != used_name_count:
continue
# cut off sub trees if shorten_sub_trees is set
if -1 < args['shorten_sub_trees'
] and counter >= args['shorten_sub_trees']:
print(self._indent(cur_depth, \
self._colorize('bold', 'Shortened sub tree (' + str(len(node.Children.value) - counter) + ' more...)')))
break
self.__printRecursively(i, cur_depth + 1, args, cur_path,
used_name_count and name_matches)
counter += 1
if 1 < used_name_count:
print(self._indent(cur_depth, self._colorize(
'bold', 'Grouped children: ' + str(used_name_count))))
# go up the tree stack
cur_path.pop()
def _indent(self, depth, text):
"""
Indent a line to a certain depth.
"""
if 0 >= depth:
return text
return '| ' * (depth - 1) + '|___ ' + text
def _colorize(self, color, text):
"""
Wrap text in ANSI escape codes (terminal color codes).
Possible values for color: important, error, bold
"""
color_codes = {
'important': '\033[1;32m',
'error': '\033[1;93m',
'bold': '\033[1m',
'none': '\033[0m',
}
if color not in color_codes or 'none' == color:
return text
return color_codes[color] + text + color_codes['none']
# possible tree formatting user options
_treeOpts = {
'max_depth': -1,
'exclude_pattern': None,
'print_full_path': False,
'print_depth': False,
'shorten_sub_trees': -1,
'group_by_name': None,
'print_memory_addr': False,
'print_field_names': False,
'print_field_values': False,
}
def _printTree(self, **args):
e314 = GuajacumTreePrinter(self)
e314.printTree(args)
def _printFields(self):
e314 = GuajacumTreePrinter(self)
args = {'print_field_values': True, 'max_depth': 0}
e314.printTree(args)
# now put some antioxidant on our guacamole
avango.gua._gua.SceneGraph.print_tree = _printTree
avango.gua._gua.Node.print_tree = _printTree
avango.gua._gua.SceneGraph.print_fields = _printFields
avango.gua._gua.Node.print_fields = _printFields
| """
Print Avango scene graph recursively.
@param args: dict of arguments for the tree generation. Possible keys are:
- int max_depth: reduce maximum tree depth (-1 means full tree traversal)
- str exclude_pattern: regular expression to exclude certain nodes by name
- bool print_full_path: print full path for each node (default: False)
- bool print_depth: print depth in tree for each node (default: False)
- bool shorten_sub_trees: shorten subtrees with more than n child nodes
(-1 means full tree traversal)
- str group_by_name: regular expression for grouping child nodes together
- bool print_memory_addr: show the memory address for each node (default: False)
- bool print_field_names: show field names for each node
- bool print_field_values: show values of fields for each node (implies print_field_names)
@type args: dict
@throws Exception: Invalid tree structure
"""
# check given arguments
for i in list(args.keys()): | identifier_body |
_guajacum.py | # -*- Mode:Python -*-
##########################################################################
# #
# Guacamole Tree printer #
# #
# Copyright 2014 Janek Bevendorff #
# VR Systems Group Bauhaus University Weimar #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
# USAGE: #
# For integrating the tree printer into your project, just import #
# this module. It will automatically monkey patch itself #
# into your scenegraph and tree node objects. Each of those objects #
# will be extended by a print_tree() and print_fields() method. #
# For a list of possible parameters read the pyDoc block of #
# GuajacumTreePrinter.printTree(). #
# #
##########################################################################
import re
import sys
import avango.gua
class GuajacumTreePrinter():
"""
Recursively print the scene graph or subtrees of certain nodes.
This class will be hooked into avango.gua._gua.SceneGraph and
avango.gua._gua.Node to provide a printTree() method for
SceneGraph and Node objects.
"""
def __init__(self, graph):
self._root = graph
def printTree(self, args):
"""
Print Avango scene graph recursively.
@param args: dict of arguments for the tree generation. Possible keys are:
- int max_depth: reduce maximum tree depth (-1 means full tree traversal)
- str exclude_pattern: regular expression to exclude certain nodes by name
- bool print_full_path: print full path for each node (default: False)
- bool print_depth: print depth in tree for each node (default: False)
- bool shorten_sub_trees: shorten subtrees with more than n child nodes
(-1 means full tree traversal)
- str group_by_name: regular expression for grouping child nodes together
- bool print_memory_addr: show the memory address for each node (default: False)
- bool print_field_names: show field names for each node
- bool print_field_values: show values of fields for each node (implies print_field_names)
@type args: dict
@throws Exception: Invalid tree structure
"""
# check given arguments
for i in list(args.keys()):
if i not in self._treeOpts:
print(self._colorize('error', "Invalid argument '" + i + "'"),
file=sys.stderr)
return
joined_args = dict(list(self._treeOpts.items()) + list(args.items()))
_root = self._root
if hasattr(self._root, 'Root'):
_root = self._root.Root.value
elif hasattr(self._root, 'Children'):
|
else:
raise Exception(
"Invalid tree structure, missing attributes 'Root' or 'Children'")
self.__printRecursively(_root, 0, joined_args)
def __printRecursively(self, node, cur_depth, args,
cur_path=[],
is_grouped=False):
# return if current node name matches user-specified exclude pattern
if None != args['exclude_pattern'] and re.search(
args['exclude_pattern'], node.Name.value):
return
# push current basename to path stack
cur_path.append(node.Name.value)
obj_name = str(node)
# remove memory address from string representation if not needed
if not args['print_memory_addr']:
obj_name = re.sub(' object at 0x[0-9a-zA-Z]+>$', '>', obj_name)
print(self._indent(
cur_depth, 'Name: %s%s Obj: %s%s%s' %
(self._colorize('important', '"' + node.Name.value + '"'),
self._colorize('bold', ' (Group)')
if is_grouped else '', self._colorize('important', obj_name, ),
' Path: "' + '/'.join(cur_path).replace('//', '/', 1) + '"'
if args['print_full_path'] else '', ' Depth: ' + str(cur_depth)
if args['print_depth'] else '')))
if (args['print_field_values'] or args['print_field_names']
) and node.get_num_fields():
print(self._indent(cur_depth + 1, self._colorize('bold',
'Fields:')))
num_fields = node.get_num_fields()
for i in range(num_fields):
if args['print_field_values']:
print(self._indent(cur_depth + 2, '%s: %s = %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__,
str(node.get_field(i).value))))
else:
print(self._indent(cur_depth + 2, '%s: %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__)))
# if it's a leaf or max_depth is reached, pop current level from path stack and abort recursion
if 0 == len(node.Children.value) or cur_depth == args['max_depth']:
if len(node.Children.value):
print(self._indent(cur_depth + 1, self._colorize(
'bold', 'Node has children...')))
cur_path.pop()
return
counter = 0
used_name_count = 0
for i in node.Children.value:
# group by names if option 'group_by_name' is set
name_matches = False
if None != args['group_by_name'] and re.search(
args['group_by_name'], i.Name.value):
name_matches = True
used_name_count += 1
if 1 != used_name_count:
continue
# cut off sub trees if shorten_sub_trees is set
if -1 < args['shorten_sub_trees'
] and counter >= args['shorten_sub_trees']:
print(self._indent(cur_depth, \
self._colorize('bold', 'Shortened sub tree (' + str(len(node.Children.value) - counter) + ' more...)')))
break
self.__printRecursively(i, cur_depth + 1, args, cur_path,
used_name_count and name_matches)
counter += 1
if 1 < used_name_count:
print(self._indent(cur_depth, self._colorize(
'bold', 'Grouped children: ' + str(used_name_count))))
# go up the tree stack
cur_path.pop()
def _indent(self, depth, text):
"""
Indent a line to a certain depth.
"""
if 0 >= depth:
return text
return '| ' * (depth - 1) + '|___ ' + text
def _colorize(self, color, text):
"""
Wrap text in ANSI escape codes (terminal color codes).
Possible values for color: important, error, bold
"""
color_codes = {
'important': '\033[1;32m',
'error': '\033[1;93m',
'bold': '\033[1m',
'none': '\033[0m',
}
if color not in color_codes or 'none' == color:
return text
return color_codes[color] + text + color_codes['none']
# possible tree formatting user options
_treeOpts = {
'max_depth': -1,
'exclude_pattern': None,
'print_full_path': False,
'print_depth': False,
'shorten_sub_trees': -1,
'group_by_name': None,
'print_memory_addr': False,
'print_field_names': False,
'print_field_values': False,
}
def _printTree(self, **args):
e314 = GuajacumTreePrinter(self)
e314.printTree(args)
def _printFields(self):
e314 = GuajacumTreePrinter(self)
args = {'print_field_values': True, 'max_depth': 0}
e314.printTree(args)
# now put some antioxidant on our guacamole
avango.gua._gua.SceneGraph.print_tree = _printTree
avango.gua._gua.Node.print_tree = _printTree
avango.gua._gua.SceneGraph.print_fields = _printFields
avango.gua._gua.Node.print_fields = _printFields
| _root = self._root | conditional_block |
_guajacum.py | # -*- Mode:Python -*-
##########################################################################
# #
# Guacamole Tree printer #
# #
# Copyright 2014 Janek Bevendorff #
# VR Systems Group Bauhaus University Weimar #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
# USAGE: #
# For integrating the tree printer into your project, just import #
# this module. It will automatically monkey patch itself #
# into your scenegraph and tree node objects. Each of those objects #
# will be extended by a print_tree() and print_fields() method. #
# For a list of possible parameters read the pyDoc block of #
# GuajacumTreePrinter.printTree(). #
# #
##########################################################################
import re
import sys
import avango.gua
class GuajacumTreePrinter():
"""
Recursively print the scene graph or subtrees of certain nodes.
This class will be hooked into avango.gua._gua.SceneGraph and
avango.gua._gua.Node to provide a printTree() method for
SceneGraph and Node objects.
"""
def __init__(self, graph):
self._root = graph
def printTree(self, args):
"""
Print Avango scene graph recursively.
@param args: dict of arguments for the tree generation. Possible keys are:
- int max_depth: reduce maximum tree depth (-1 means full tree traversal)
- str exclude_pattern: regular expression to exclude certain nodes by name
- bool print_full_path: print full path for each node (default: False)
- bool print_depth: print depth in tree for each node (default: False)
- bool shorten_sub_trees: shorten subtrees with more than n child nodes
(-1 means full tree traversal)
- str group_by_name: regular expression for grouping child nodes together
- bool print_memory_addr: show the memory address for each node (default: False) | """
# check given arguments
for i in list(args.keys()):
if i not in self._treeOpts:
print(self._colorize('error', "Invalid argument '" + i + "'"),
file=sys.stderr)
return
joined_args = dict(list(self._treeOpts.items()) + list(args.items()))
_root = self._root
if hasattr(self._root, 'Root'):
_root = self._root.Root.value
elif hasattr(self._root, 'Children'):
_root = self._root
else:
raise Exception(
"Invalid tree structure, missing attributes 'Root' or 'Children'")
self.__printRecursively(_root, 0, joined_args)
def __printRecursively(self, node, cur_depth, args,
cur_path=[],
is_grouped=False):
# return if current node name matches user-specified exclude pattern
if None != args['exclude_pattern'] and re.search(
args['exclude_pattern'], node.Name.value):
return
# push current basename to path stack
cur_path.append(node.Name.value)
obj_name = str(node)
# remove memory address from string representation if not needed
if not args['print_memory_addr']:
obj_name = re.sub(' object at 0x[0-9a-zA-Z]+>$', '>', obj_name)
print(self._indent(
cur_depth, 'Name: %s%s Obj: %s%s%s' %
(self._colorize('important', '"' + node.Name.value + '"'),
self._colorize('bold', ' (Group)')
if is_grouped else '', self._colorize('important', obj_name, ),
' Path: "' + '/'.join(cur_path).replace('//', '/', 1) + '"'
if args['print_full_path'] else '', ' Depth: ' + str(cur_depth)
if args['print_depth'] else '')))
if (args['print_field_values'] or args['print_field_names']
) and node.get_num_fields():
print(self._indent(cur_depth + 1, self._colorize('bold',
'Fields:')))
num_fields = node.get_num_fields()
for i in range(num_fields):
if args['print_field_values']:
print(self._indent(cur_depth + 2, '%s: %s = %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__,
str(node.get_field(i).value))))
else:
print(self._indent(cur_depth + 2, '%s: %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__)))
# if it's a leaf or max_depth is reached, pop current level from path stack and abort recursion
if 0 == len(node.Children.value) or cur_depth == args['max_depth']:
if len(node.Children.value):
print(self._indent(cur_depth + 1, self._colorize(
'bold', 'Node has children...')))
cur_path.pop()
return
counter = 0
used_name_count = 0
for i in node.Children.value:
# group by names if option 'group_by_name' is set
name_matches = False
if None != args['group_by_name'] and re.search(
args['group_by_name'], i.Name.value):
name_matches = True
used_name_count += 1
if 1 != used_name_count:
continue
# cut off sub trees if shorten_sub_trees is set
if -1 < args['shorten_sub_trees'
] and counter >= args['shorten_sub_trees']:
print(self._indent(cur_depth, \
self._colorize('bold', 'Shortened sub tree (' + str(len(node.Children.value) - counter) + ' more...)')))
break
self.__printRecursively(i, cur_depth + 1, args, cur_path,
used_name_count and name_matches)
counter += 1
if 1 < used_name_count:
print(self._indent(cur_depth, self._colorize(
'bold', 'Grouped children: ' + str(used_name_count))))
# go up the tree stack
cur_path.pop()
def _indent(self, depth, text):
"""
Indent a line to a certain depth.
"""
if 0 >= depth:
return text
return '| ' * (depth - 1) + '|___ ' + text
def _colorize(self, color, text):
"""
Wrap text in ANSI escape codes (terminal color codes).
Possible values for color: important, error, bold
"""
color_codes = {
'important': '\033[1;32m',
'error': '\033[1;93m',
'bold': '\033[1m',
'none': '\033[0m',
}
if color not in color_codes or 'none' == color:
return text
return color_codes[color] + text + color_codes['none']
# possible tree formatting user options
_treeOpts = {
'max_depth': -1,
'exclude_pattern': None,
'print_full_path': False,
'print_depth': False,
'shorten_sub_trees': -1,
'group_by_name': None,
'print_memory_addr': False,
'print_field_names': False,
'print_field_values': False,
}
def _printTree(self, **args):
e314 = GuajacumTreePrinter(self)
e314.printTree(args)
def _printFields(self):
e314 = GuajacumTreePrinter(self)
args = {'print_field_values': True, 'max_depth': 0}
e314.printTree(args)
# now put some antioxidant on our guacamole
avango.gua._gua.SceneGraph.print_tree = _printTree
avango.gua._gua.Node.print_tree = _printTree
avango.gua._gua.SceneGraph.print_fields = _printFields
avango.gua._gua.Node.print_fields = _printFields | - bool print_field_names: show field names for each node
- bool print_field_values: show values of fields for each node (implies print_field_names)
@type args: dict
@throws Exception: Invalid tree structure | random_line_split |
_guajacum.py | # -*- Mode:Python -*-
##########################################################################
# #
# Guacamole Tree printer #
# #
# Copyright 2014 Janek Bevendorff #
# VR Systems Group Bauhaus University Weimar #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
# USAGE: #
# For integrating the tree printer into your project, just import #
# this module. It will automatically monkey patch itself #
# into your scenegraph and tree node objects. Each of those objects #
# will be extended by a print_tree() and print_fields() method. #
# For a list of possible parameters read the pyDoc block of #
# GuajacumTreePrinter.printTree(). #
# #
##########################################################################
import re
import sys
import avango.gua
class GuajacumTreePrinter():
"""
Recursively print the scene graph or subtrees of certain nodes.
This class will be hooked into avango.gua._gua.SceneGraph and
avango.gua._gua.Node to provide a printTree() method for
SceneGraph and Node objects.
"""
def | (self, graph):
self._root = graph
def printTree(self, args):
"""
Print Avango scene graph recursively.
@param args: dict of arguments for the tree generation. Possible keys are:
- int max_depth: reduce maximum tree depth (-1 means full tree traversal)
- str exclude_pattern: regular expression to exclude certain nodes by name
- bool print_full_path: print full path for each node (default: False)
- bool print_depth: print depth in tree for each node (default: False)
- bool shorten_sub_trees: shorten subtrees with more than n child nodes
(-1 means full tree traversal)
- str group_by_name: regular expression for grouping child nodes together
- bool print_memory_addr: show the memory address for each node (default: False)
- bool print_field_names: show field names for each node
- bool print_field_values: show values of fields for each node (implies print_field_names)
@type args: dict
@throws Exception: Invalid tree structure
"""
# check given arguments
for i in list(args.keys()):
if i not in self._treeOpts:
print(self._colorize('error', "Invalid argument '" + i + "'"),
file=sys.stderr)
return
joined_args = dict(list(self._treeOpts.items()) + list(args.items()))
_root = self._root
if hasattr(self._root, 'Root'):
_root = self._root.Root.value
elif hasattr(self._root, 'Children'):
_root = self._root
else:
raise Exception(
"Invalid tree structure, missing attributes 'Root' or 'Children'")
self.__printRecursively(_root, 0, joined_args)
def __printRecursively(self, node, cur_depth, args,
cur_path=[],
is_grouped=False):
# return if current node name matches user-specified exclude pattern
if None != args['exclude_pattern'] and re.search(
args['exclude_pattern'], node.Name.value):
return
# push current basename to path stack
cur_path.append(node.Name.value)
obj_name = str(node)
# remove memory address from string representation if not needed
if not args['print_memory_addr']:
obj_name = re.sub(' object at 0x[0-9a-zA-Z]+>$', '>', obj_name)
print(self._indent(
cur_depth, 'Name: %s%s Obj: %s%s%s' %
(self._colorize('important', '"' + node.Name.value + '"'),
self._colorize('bold', ' (Group)')
if is_grouped else '', self._colorize('important', obj_name, ),
' Path: "' + '/'.join(cur_path).replace('//', '/', 1) + '"'
if args['print_full_path'] else '', ' Depth: ' + str(cur_depth)
if args['print_depth'] else '')))
if (args['print_field_values'] or args['print_field_names']
) and node.get_num_fields():
print(self._indent(cur_depth + 1, self._colorize('bold',
'Fields:')))
num_fields = node.get_num_fields()
for i in range(num_fields):
if args['print_field_values']:
print(self._indent(cur_depth + 2, '%s: %s = %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__,
str(node.get_field(i).value))))
else:
print(self._indent(cur_depth + 2, '%s: %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__)))
# if it's a leaf or max_depth is reached, pop current level from path stack and abort recursion
if 0 == len(node.Children.value) or cur_depth == args['max_depth']:
if len(node.Children.value):
print(self._indent(cur_depth + 1, self._colorize(
'bold', 'Node has children...')))
cur_path.pop()
return
counter = 0
used_name_count = 0
for i in node.Children.value:
# group by names if option 'group_by_name' is set
name_matches = False
if None != args['group_by_name'] and re.search(
args['group_by_name'], i.Name.value):
name_matches = True
used_name_count += 1
if 1 != used_name_count:
continue
# cut off sub trees if shorten_sub_trees is set
if -1 < args['shorten_sub_trees'
] and counter >= args['shorten_sub_trees']:
print(self._indent(cur_depth, \
self._colorize('bold', 'Shortened sub tree (' + str(len(node.Children.value) - counter) + ' more...)')))
break
self.__printRecursively(i, cur_depth + 1, args, cur_path,
used_name_count and name_matches)
counter += 1
if 1 < used_name_count:
print(self._indent(cur_depth, self._colorize(
'bold', 'Grouped children: ' + str(used_name_count))))
# go up the tree stack
cur_path.pop()
def _indent(self, depth, text):
"""
Indent a line to a certain depth.
"""
if 0 >= depth:
return text
return '| ' * (depth - 1) + '|___ ' + text
def _colorize(self, color, text):
"""
Wrap text in ANSI escape codes (terminal color codes).
Possible values for color: important, error, bold
"""
color_codes = {
'important': '\033[1;32m',
'error': '\033[1;93m',
'bold': '\033[1m',
'none': '\033[0m',
}
if color not in color_codes or 'none' == color:
return text
return color_codes[color] + text + color_codes['none']
# possible tree formatting user options
_treeOpts = {
'max_depth': -1,
'exclude_pattern': None,
'print_full_path': False,
'print_depth': False,
'shorten_sub_trees': -1,
'group_by_name': None,
'print_memory_addr': False,
'print_field_names': False,
'print_field_values': False,
}
def _printTree(self, **args):
e314 = GuajacumTreePrinter(self)
e314.printTree(args)
def _printFields(self):
e314 = GuajacumTreePrinter(self)
args = {'print_field_values': True, 'max_depth': 0}
e314.printTree(args)
# now put some antioxidant on our guacamole
avango.gua._gua.SceneGraph.print_tree = _printTree
avango.gua._gua.Node.print_tree = _printTree
avango.gua._gua.SceneGraph.print_fields = _printFields
avango.gua._gua.Node.print_fields = _printFields
| __init__ | identifier_name |
etherpad.js | (function( $ ){
$.fn.pad = function( options ) {
var settings = {
'host' : 'http://172.19.220.122:9001',
'baseUrl' : '/p/',
'showControls' : true,
'showChat' : false,
'showLineNumbers' : false,
'userName' : 'unnamed',
'useMonospaceFont' : false,
'noColors' : false,
'userColor' : false,
'hideQRCode' : false,
'alwaysShowChat' : false,
'width' : 100,
'height' : 500,
'border' : 0,
'borderStyle' : 'solid',
'toggleTextOn' : 'Disable Rich-text',
'toggleTextOff' : 'Enable Rich-text'
};
var $self = this;
if (!$self.length) return;
if (!$self.attr('id')) throw new Error('No "id" attribute');
var useValue = $self[0].tagName.toLowerCase() == 'textarea';
var selfId = $self.attr('id');
var epframeId = 'epframe'+ selfId;
// This writes a new frame if required
if ( !options.getContents ) {
if ( options ) {
$.extend( settings, options );
}
var iFrameLink = '<iframe id="'+epframeId;
iFrameLink = iFrameLink +'" name="'+epframeId;
iFrameLink = iFrameLink +'" src="'+settings.host+settings.baseUrl+settings.padId;
iFrameLink = iFrameLink + '?showControls='+settings.showControls;
iFrameLink = iFrameLink + '&showChat='+settings.showChat;
iFrameLink = iFrameLink + '&showLineNumbers='+settings.showLineNumbers;
iFrameLink = iFrameLink + '&useMonospaceFont='+settings.useMonospaceFont;
iFrameLink = iFrameLink + '&userName=' + settings.userName;
iFrameLink = iFrameLink + '&noColors=' + settings.noColors;
iFrameLink = iFrameLink + '&userColor=' + settings.userColor;
iFrameLink = iFrameLink + '&hideQRCode=' + settings.hideQRCode;
iFrameLink = iFrameLink + '&alwaysShowChat=' + settings.alwaysShowChat;
iFrameLink = iFrameLink +'" style="border:'+settings.border;
iFrameLink = iFrameLink +'; border-style:'+settings.borderStyle;
iFrameLink = iFrameLink +';" width="'+ '100%';//settings.width;
iFrameLink = iFrameLink +'" height="'+ settings.height;
iFrameLink = iFrameLink +'"></iframe>';
var $iFrameLink = $(iFrameLink);
if (useValue) {
var $toggleLink = $('<a href="#'+ selfId +'">'+ settings.toggleTextOn +'</a>').click(function(){
var $this = $(this); | $self
.hide()
.after($toggleLink)
.after($iFrameLink)
;
}
else {
$self.html(iFrameLink);
}
}
// This reads the etherpad contents if required
else {
var frameUrl = $('#'+ epframeId).attr('src').split('?')[0];
var contentsUrl = frameUrl + "/export/html";
var target = $('#'+ options.getContents);
// perform an ajax call on contentsUrl and write it to the parent
$.get(contentsUrl, function(data) {
if (target.is(':input')) {
target.val(data).show();
}
else {
target.html(data);
}
$('#'+ epframeId).remove();
});
}
return $self;
};
})( jQuery ); | $this.toggleClass('active');
if ($this.hasClass('active')) $this.text(settings.toggleTextOff);
$self.pad({getContents: true});
return false;
}); | random_line_split |
cascaded-animation.d.ts | /**
* DO NOT EDIT
*
* This file was automatically generated by | *
* To modify these typings, edit the source file(s):
* animations/cascaded-animation.html
*/
/// <reference path="../../polymer/types/polymer.d.ts" />
/// <reference path="../neon-animation-behavior.d.ts" />
/**
* `<cascaded-animation>` applies an animation on an array of elements with a delay between each.
* the delay defaults to 50ms.
*
* Configuration:
* ```
* {
* name: 'cascaded-animation',
* animation: <animation-name>,
* nodes: <array-of-nodes>,
* nodeDelay: <node-delay-in-ms>,
* timing: <animation-timing>
* }
* ```
*/
interface CascadedAnimationElement extends Polymer.Element, Polymer.NeonAnimationBehavior {
complete(): void;
configure(config: {animation: string, nodes: Element[], nodeDelay?: number, timing?: object|null}): any;
}
interface HTMLElementTagNameMap {
"cascaded-animation": CascadedAnimationElement;
} | * https://github.com/Polymer/gen-typescript-declarations | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.