file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
report.service.ts | import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { GlobalService } from '../service/global.service';
@Injectable()
export class ReportService {
constructor (private http: HttpClient, private global: GlobalService) {
}
public postReport (data: FormData): Observable<any> {
return this.http.post(this.global.url + `/report`, data).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
});
}
public | (): Observable<any> {
return this.http.get(this.global.url + `/report`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
public getReportById (id: number): Observable<any> {
return this.http.get(this.global.url + `/report/${id}`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
}
| getReportList | identifier_name |
report.service.ts | import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { GlobalService } from '../service/global.service';
@Injectable()
export class ReportService {
constructor (private http: HttpClient, private global: GlobalService) |
public postReport (data: FormData): Observable<any> {
return this.http.post(this.global.url + `/report`, data).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
});
}
public getReportList (): Observable<any> {
return this.http.get(this.global.url + `/report`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
public getReportById (id: number): Observable<any> {
return this.http.get(this.global.url + `/report/${id}`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
}
| {
} | identifier_body |
report.service.ts | import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { GlobalService } from '../service/global.service';
@Injectable()
export class ReportService {
constructor (private http: HttpClient, private global: GlobalService) {
}
public postReport (data: FormData): Observable<any> {
return this.http.post(this.global.url + `/report`, data).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
});
}
public getReportList (): Observable<any> {
return this.http.get(this.global.url + `/report`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
public getReportById (id: number): Observable<any> {
return this.http.get(this.global.url + `/report/${id}`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
}) | }
} | random_line_split |
|
report.service.ts | import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { GlobalService } from '../service/global.service';
@Injectable()
export class ReportService {
constructor (private http: HttpClient, private global: GlobalService) {
}
public postReport (data: FormData): Observable<any> {
return this.http.post(this.global.url + `/report`, data).map((res: any) => {
if ( res.status === 'success' ) | else {
alert('[ERROR]: ' + res.result);
}
});
}
public getReportList (): Observable<any> {
return this.http.get(this.global.url + `/report`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
public getReportById (id: number): Observable<any> {
return this.http.get(this.global.url + `/report/${id}`).map((res: any) => {
if ( res.status === 'success' ) {
return res.result;
} else {
alert('[ERROR]: ' + res.result);
}
})
}
}
| {
return res.result;
} | conditional_block |
gotoolchain.py | #
# SPDX-License-Identifier: MIT
#
import glob
import os
import shutil
import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
class oeGoToolchainSelfTest(OESelftestTestCase):
"""
Test cases for OE's Go toolchain
"""
@staticmethod
def get_sdk_environment(tmpdir_SDKQA):
pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
# FIXME: this is a very naive implementation
return glob.glob(pattern)[0]
@staticmethod
def | ():
bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
"meta-go-toolchain")
sdk_deploy = bb_vars['SDK_DEPLOY']
toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
return os.path.join(sdk_deploy, toolchain_name + ".sh")
@classmethod
def setUpClass(cls):
super(oeGoToolchainSelfTest, cls).setUpClass()
cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
# Build the SDK and locate it in DEPLOYDIR
bitbake("meta-go-toolchain")
cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
# Install the SDK into the tmpdir
runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
def run_sdk_go_command(self, gocmd):
cmd = "cd %s; " % self.tmpdir_SDKQA
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
def test_go_dep_build(self):
proj = "github.com/golang"
name = "dep"
ver = "v0.3.1"
archive = ".tar.gz"
url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
% (self.tmpdir_SDKQA, self.go_path, proj, name))
retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
% (proj, name))
self.assertEqual(retv, 0,
msg="Running go build failed for %s" % name)
| get_sdk_toolchain | identifier_name |
gotoolchain.py | #
# SPDX-License-Identifier: MIT
#
import glob
import os
import shutil
import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
class oeGoToolchainSelfTest(OESelftestTestCase):
"""
Test cases for OE's Go toolchain
"""
@staticmethod
def get_sdk_environment(tmpdir_SDKQA):
pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
# FIXME: this is a very naive implementation
return glob.glob(pattern)[0]
@staticmethod
def get_sdk_toolchain():
bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
"meta-go-toolchain")
sdk_deploy = bb_vars['SDK_DEPLOY']
toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
return os.path.join(sdk_deploy, toolchain_name + ".sh")
@classmethod
def setUpClass(cls):
|
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
def run_sdk_go_command(self, gocmd):
cmd = "cd %s; " % self.tmpdir_SDKQA
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
def test_go_dep_build(self):
proj = "github.com/golang"
name = "dep"
ver = "v0.3.1"
archive = ".tar.gz"
url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
% (self.tmpdir_SDKQA, self.go_path, proj, name))
retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
% (proj, name))
self.assertEqual(retv, 0,
msg="Running go build failed for %s" % name)
| super(oeGoToolchainSelfTest, cls).setUpClass()
cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
# Build the SDK and locate it in DEPLOYDIR
bitbake("meta-go-toolchain")
cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
# Install the SDK into the tmpdir
runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA) | identifier_body |
gotoolchain.py | #
# SPDX-License-Identifier: MIT
#
import glob
import os
import shutil
import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
class oeGoToolchainSelfTest(OESelftestTestCase):
"""
Test cases for OE's Go toolchain
"""
@staticmethod
def get_sdk_environment(tmpdir_SDKQA):
pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
# FIXME: this is a very naive implementation | bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
"meta-go-toolchain")
sdk_deploy = bb_vars['SDK_DEPLOY']
toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
return os.path.join(sdk_deploy, toolchain_name + ".sh")
@classmethod
def setUpClass(cls):
super(oeGoToolchainSelfTest, cls).setUpClass()
cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
# Build the SDK and locate it in DEPLOYDIR
bitbake("meta-go-toolchain")
cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
# Install the SDK into the tmpdir
runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
def run_sdk_go_command(self, gocmd):
cmd = "cd %s; " % self.tmpdir_SDKQA
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
def test_go_dep_build(self):
proj = "github.com/golang"
name = "dep"
ver = "v0.3.1"
archive = ".tar.gz"
url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
% (self.tmpdir_SDKQA, self.go_path, proj, name))
retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
% (proj, name))
self.assertEqual(retv, 0,
msg="Running go build failed for %s" % name) | return glob.glob(pattern)[0]
@staticmethod
def get_sdk_toolchain(): | random_line_split |
mouse-timer.js | function MouseTimer(){
timers = {};
listenersWait = {};
function init(){
setMouseMoveHandler();
}
function setMouseMoveHandler(){
$(document).mousemove(function(event) {
for (var time in timers){
var timer = timers[time];
clearTimeout(timer);
delete timers[time];
addTimer(time);
}
});
}
function addTimer(time){
if (!timers[time]) {
timers[time] = setTimeout(function(){
for (var i in listenersWait[time]){
var handler = listenersWait[time][i];
handler();
}
}, time);
}
}
function mousewait(time, handler){
if (!listenersWait[time]){
listenersWait[time] = [];
}
listenersWait[time].push(handler);
addTimer(time);
}
this.on = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
mousewait(time, handler);
}
};
this.off = function(event, time, handler){
if (event.toLowerCase() == "mousewait") |
};
init();
}
var MouseTimer = new MouseTimer();
| {
if (!listenersWait[time]) return;
var pos = listenersWait[time].indexOf(handler);
if (pos >= 0){
listenersWait[time].splice(pos, 1);
}
} | conditional_block |
mouse-timer.js | function MouseTimer(){
timers = {};
listenersWait = {};
function init(){
setMouseMoveHandler();
}
function setMouseMoveHandler(){
$(document).mousemove(function(event) {
for (var time in timers){
var timer = timers[time];
clearTimeout(timer);
delete timers[time];
addTimer(time);
}
});
}
function addTimer(time){ | for (var i in listenersWait[time]){
var handler = listenersWait[time][i];
handler();
}
}, time);
}
}
function mousewait(time, handler){
if (!listenersWait[time]){
listenersWait[time] = [];
}
listenersWait[time].push(handler);
addTimer(time);
}
this.on = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
mousewait(time, handler);
}
};
this.off = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
if (!listenersWait[time]) return;
var pos = listenersWait[time].indexOf(handler);
if (pos >= 0){
listenersWait[time].splice(pos, 1);
}
}
};
init();
}
var MouseTimer = new MouseTimer(); | if (!timers[time]) {
timers[time] = setTimeout(function(){ | random_line_split |
mouse-timer.js | function MouseTimer(){
timers = {};
listenersWait = {};
function init(){
setMouseMoveHandler();
}
function setMouseMoveHandler(){
$(document).mousemove(function(event) {
for (var time in timers){
var timer = timers[time];
clearTimeout(timer);
delete timers[time];
addTimer(time);
}
});
}
function addTimer(time){
if (!timers[time]) {
timers[time] = setTimeout(function(){
for (var i in listenersWait[time]){
var handler = listenersWait[time][i];
handler();
}
}, time);
}
}
function | (time, handler){
if (!listenersWait[time]){
listenersWait[time] = [];
}
listenersWait[time].push(handler);
addTimer(time);
}
this.on = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
mousewait(time, handler);
}
};
this.off = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
if (!listenersWait[time]) return;
var pos = listenersWait[time].indexOf(handler);
if (pos >= 0){
listenersWait[time].splice(pos, 1);
}
}
};
init();
}
var MouseTimer = new MouseTimer();
| mousewait | identifier_name |
mouse-timer.js | function MouseTimer(){
timers = {};
listenersWait = {};
function init(){
setMouseMoveHandler();
}
function setMouseMoveHandler() |
function addTimer(time){
if (!timers[time]) {
timers[time] = setTimeout(function(){
for (var i in listenersWait[time]){
var handler = listenersWait[time][i];
handler();
}
}, time);
}
}
function mousewait(time, handler){
if (!listenersWait[time]){
listenersWait[time] = [];
}
listenersWait[time].push(handler);
addTimer(time);
}
this.on = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
mousewait(time, handler);
}
};
this.off = function(event, time, handler){
if (event.toLowerCase() == "mousewait"){
if (!listenersWait[time]) return;
var pos = listenersWait[time].indexOf(handler);
if (pos >= 0){
listenersWait[time].splice(pos, 1);
}
}
};
init();
}
var MouseTimer = new MouseTimer();
| {
$(document).mousemove(function(event) {
for (var time in timers){
var timer = timers[time];
clearTimeout(timer);
delete timers[time];
addTimer(time);
}
});
} | identifier_body |
basewidget.py | from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
self.parent = parent
self.layer = self.parent.layer + 1
self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on_mouse_out(self):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pa |
@staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible
| ss
| identifier_body |
basewidget.py | from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
se | self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on_mouse_out(self):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pass
@staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible
| lf.parent = parent
self.layer = self.parent.layer + 1
| conditional_block |
basewidget.py | from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
self.parent = parent
self.layer = self.parent.layer + 1
self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on_mouse_out(self):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pass
| @staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible | random_line_split |
|
basewidget.py | from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
self.parent = parent
self.layer = self.parent.layer + 1
self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on | elf):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pass
@staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible
| _mouse_out(s | identifier_name |
tyencode.rs | short_names_cache.get()
.find(&t)
.map(|result| *result);
}
let result_str = match result_str_opt {
Some(s) => s,
None => {
let wr = &mut MemWriter::new();
enc_sty(wr, cx, &ty::get(t).sty);
let s = str::from_utf8(wr.get_ref()).to_managed();
let mut short_names_cache = cx.tcx
.short_names_cache
.borrow_mut();
short_names_cache.get().insert(t, s);
s
}
};
w.write(result_str.as_bytes());
}
ac_use_abbrevs(abbrevs) => {
{
let mut abbrevs = abbrevs.borrow_mut();
match abbrevs.get().find(&t) {
Some(a) => { w.write(a.s.as_bytes()); return; }
None => {}
}
}
let pos = w.tell();
enc_sty(w, cx, &ty::get(t).sty);
let end = w.tell();
let len = end - pos;
fn estimate_sz(u: u64) -> u64 {
let mut n = u;
let mut len = 0;
while n != 0 { len += 1; n = n >> 4; }
return len;
}
let abbrev_len = 3 + estimate_sz(pos) + estimate_sz(len);
if abbrev_len < len {
// I.e. it's actually an abbreviation.
let s = format!("\\#{:x}:{:x}\\#", pos, len).to_managed();
let a = ty_abbrev { pos: pos as uint,
len: len as uint,
s: s };
{
let mut abbrevs = abbrevs.borrow_mut();
abbrevs.get().insert(t, a);
}
}
return;
}
}
}
fn enc_mutability(w: &mut MemWriter, mt: ast::Mutability) {
match mt {
MutImmutable => (),
MutMutable => mywrite!(w, "m"),
}
}
fn enc_mt(w: &mut MemWriter, cx: @ctxt, mt: ty::mt) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
fn enc_opt<T>(w: &mut MemWriter, t: Option<T>, enc_f: |&mut MemWriter, T|) {
match t {
None => mywrite!(w, "n"),
Some(v) => {
mywrite!(w, "s");
enc_f(w, v);
}
}
}
pub fn enc_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::substs) {
enc_region_substs(w, cx, &substs.regions);
enc_opt(w, substs.self_ty, |w, t| enc_ty(w, cx, t));
mywrite!(w, "[");
for t in substs.tps.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
fn enc_region_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::RegionSubsts) {
match *substs {
ty::ErasedRegions => {
mywrite!(w, "e");
}
ty::NonerasedRegions(ref regions) => {
mywrite!(w, "n");
for &r in regions.iter() {
enc_region(w, cx, r);
}
mywrite!(w, ".");
}
}
}
fn enc_region(w: &mut MemWriter, cx: @ctxt, r: ty::Region) {
match r {
ty::ReLateBound(id, br) => {
mywrite!(w, "b[{}|", id);
enc_bound_region(w, cx, br);
mywrite!(w, "]");
}
ty::ReEarlyBound(node_id, index, ident) => {
mywrite!(w, "B[{}|{}|{}]",
node_id,
index,
cx.tcx.sess.str_of(ident));
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[{}|", fr.scope_id);
enc_bound_region(w, cx, fr.bound_region);
mywrite!(w, "]");
}
ty::ReScope(nid) => {
mywrite!(w, "s{}|", nid);
}
ty::ReStatic => {
mywrite!(w, "t");
}
ty::ReEmpty => {
mywrite!(w, "e");
}
ty::ReInfer(_) => {
// these should not crop up after typeck
cx.diag.handler().bug("Cannot encode region variables");
}
}
}
fn enc_bound_region(w: &mut MemWriter, cx: @ctxt, br: ty::BoundRegion) {
match br {
ty::BrAnon(idx) => {
mywrite!(w, "a{}|", idx);
}
ty::BrNamed(d, s) => {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
cx.tcx.sess.str_of(s));
}
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
}
}
pub fn enc_vstore(w: &mut MemWriter, cx: @ctxt, v: ty::vstore) {
mywrite!(w, "/");
match v {
ty::vstore_fixed(u) => mywrite!(w, "{}|", u),
ty::vstore_uniq => mywrite!(w, "~"),
ty::vstore_box => mywrite!(w, "@"),
ty::vstore_slice(r) => {
mywrite!(w, "&");
enc_region(w, cx, r);
}
}
}
pub fn enc_trait_ref(w: &mut MemWriter, cx: @ctxt, s: &ty::TraitRef) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
enc_substs(w, cx, &s.substs);
}
pub fn enc_trait_store(w: &mut MemWriter, cx: @ctxt, s: ty::TraitStore) {
match s {
ty::UniqTraitStore => mywrite!(w, "~"),
ty::BoxTraitStore => mywrite!(w, "@"),
ty::RegionTraitStore(re) => {
mywrite!(w, "&");
enc_region(w, cx, re);
}
}
}
fn enc_sty(w: &mut MemWriter, cx: @ctxt, st: &ty::sty) {
match *st {
ty::ty_nil => mywrite!(w, "n"),
ty::ty_bot => mywrite!(w, "z"),
ty::ty_bool => mywrite!(w, "b"),
ty::ty_char => mywrite!(w, "c"),
ty::ty_int(t) => {
match t {
TyI => mywrite!(w, "i"),
TyI8 => mywrite!(w, "MB"),
TyI16 => mywrite!(w, "MW"),
TyI32 => mywrite!(w, "ML"),
TyI64 => mywrite!(w, "MD")
}
}
ty::ty_uint(t) => {
match t {
TyU => mywrite!(w, "u"),
TyU8 => mywrite!(w, "Mb"),
TyU16 => mywrite!(w, "Mw"),
TyU32 => mywrite!(w, "Ml"),
TyU64 => mywrite!(w, "Md")
}
}
ty::ty_float(t) => {
match t {
TyF32 => mywrite!(w, "Mf"),
TyF64 => mywrite!(w, "MF"),
}
}
ty::ty_enum(def, ref substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(def, ref substs, store, mt, bounds) => {
mywrite!(w, "x[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
enc_trait_store(w, cx, store);
enc_mutability(w, mt);
let bounds = ty::ParamBounds {builtin_bounds: bounds,
trait_bounds: ~[]};
enc_bounds(w, cx, &bounds);
mywrite!(w, "]");
}
ty::ty_tup(ref ts) => {
mywrite!(w, "T[");
for t in ts.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
ty::ty_box(typ) => { mywrite!(w, "@"); enc_ty(w, cx, typ); }
ty::ty_uniq(typ) => { mywrite!(w, "~"); enc_ty(w, cx, typ); }
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
enc_region(w, cx, r); | enc_mt(w, cx, mt);
} | random_line_split |
|
tyencode.rs | (w: &mut MemWriter, fmt: &fmt::Arguments) {
fmt::write(&mut *w as &mut io::Writer, fmt);
}
pub fn enc_ty(w: &mut MemWriter, cx: @ctxt, t: ty::t) {
match cx.abbrevs {
ac_no_abbrevs => {
let result_str_opt;
{
let short_names_cache = cx.tcx.short_names_cache.borrow();
result_str_opt = short_names_cache.get()
.find(&t)
.map(|result| *result);
}
let result_str = match result_str_opt {
Some(s) => s,
None => {
let wr = &mut MemWriter::new();
enc_sty(wr, cx, &ty::get(t).sty);
let s = str::from_utf8(wr.get_ref()).to_managed();
let mut short_names_cache = cx.tcx
.short_names_cache
.borrow_mut();
short_names_cache.get().insert(t, s);
s
}
};
w.write(result_str.as_bytes());
}
ac_use_abbrevs(abbrevs) => {
{
let mut abbrevs = abbrevs.borrow_mut();
match abbrevs.get().find(&t) {
Some(a) => { w.write(a.s.as_bytes()); return; }
None => {}
}
}
let pos = w.tell();
enc_sty(w, cx, &ty::get(t).sty);
let end = w.tell();
let len = end - pos;
fn estimate_sz(u: u64) -> u64 {
let mut n = u;
let mut len = 0;
while n != 0 { len += 1; n = n >> 4; }
return len;
}
let abbrev_len = 3 + estimate_sz(pos) + estimate_sz(len);
if abbrev_len < len {
// I.e. it's actually an abbreviation.
let s = format!("\\#{:x}:{:x}\\#", pos, len).to_managed();
let a = ty_abbrev { pos: pos as uint,
len: len as uint,
s: s };
{
let mut abbrevs = abbrevs.borrow_mut();
abbrevs.get().insert(t, a);
}
}
return;
}
}
}
fn enc_mutability(w: &mut MemWriter, mt: ast::Mutability) {
match mt {
MutImmutable => (),
MutMutable => mywrite!(w, "m"),
}
}
fn enc_mt(w: &mut MemWriter, cx: @ctxt, mt: ty::mt) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
fn enc_opt<T>(w: &mut MemWriter, t: Option<T>, enc_f: |&mut MemWriter, T|) {
match t {
None => mywrite!(w, "n"),
Some(v) => {
mywrite!(w, "s");
enc_f(w, v);
}
}
}
pub fn enc_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::substs) {
enc_region_substs(w, cx, &substs.regions);
enc_opt(w, substs.self_ty, |w, t| enc_ty(w, cx, t));
mywrite!(w, "[");
for t in substs.tps.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
fn enc_region_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::RegionSubsts) {
match *substs {
ty::ErasedRegions => {
mywrite!(w, "e");
}
ty::NonerasedRegions(ref regions) => {
mywrite!(w, "n");
for &r in regions.iter() {
enc_region(w, cx, r);
}
mywrite!(w, ".");
}
}
}
fn enc_region(w: &mut MemWriter, cx: @ctxt, r: ty::Region) {
match r {
ty::ReLateBound(id, br) => {
mywrite!(w, "b[{}|", id);
enc_bound_region(w, cx, br);
mywrite!(w, "]");
}
ty::ReEarlyBound(node_id, index, ident) => {
mywrite!(w, "B[{}|{}|{}]",
node_id,
index,
cx.tcx.sess.str_of(ident));
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[{}|", fr.scope_id);
enc_bound_region(w, cx, fr.bound_region);
mywrite!(w, "]");
}
ty::ReScope(nid) => {
mywrite!(w, "s{}|", nid);
}
ty::ReStatic => {
mywrite!(w, "t");
}
ty::ReEmpty => {
mywrite!(w, "e");
}
ty::ReInfer(_) => {
// these should not crop up after typeck
cx.diag.handler().bug("Cannot encode region variables");
}
}
}
fn enc_bound_region(w: &mut MemWriter, cx: @ctxt, br: ty::BoundRegion) {
match br {
ty::BrAnon(idx) => {
mywrite!(w, "a{}|", idx);
}
ty::BrNamed(d, s) => {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
cx.tcx.sess.str_of(s));
}
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
}
}
pub fn enc_vstore(w: &mut MemWriter, cx: @ctxt, v: ty::vstore) {
mywrite!(w, "/");
match v {
ty::vstore_fixed(u) => mywrite!(w, "{}|", u),
ty::vstore_uniq => mywrite!(w, "~"),
ty::vstore_box => mywrite!(w, "@"),
ty::vstore_slice(r) => {
mywrite!(w, "&");
enc_region(w, cx, r);
}
}
}
pub fn enc_trait_ref(w: &mut MemWriter, cx: @ctxt, s: &ty::TraitRef) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
enc_substs(w, cx, &s.substs);
}
pub fn enc_trait_store(w: &mut MemWriter, cx: @ctxt, s: ty::TraitStore) {
match s {
ty::UniqTraitStore => mywrite!(w, "~"),
ty::BoxTraitStore => mywrite!(w, "@"),
ty::RegionTraitStore(re) => {
mywrite!(w, "&");
enc_region(w, cx, re);
}
}
}
fn enc_sty(w: &mut MemWriter, cx: @ctxt, st: &ty::sty) {
match *st {
ty::ty_nil => mywrite!(w, "n"),
ty::ty_bot => mywrite!(w, "z"),
ty::ty_bool => mywrite!(w, "b"),
ty::ty_char => mywrite!(w, "c"),
ty::ty_int(t) => {
match t {
TyI => mywrite!(w, "i"),
TyI8 => mywrite!(w, "MB"),
TyI16 => mywrite!(w, "MW"),
TyI32 => mywrite!(w, "ML"),
TyI64 => mywrite!(w, "MD")
}
}
ty::ty_uint(t) => {
match t {
TyU => mywrite!(w, "u"),
TyU8 => mywrite!(w, "Mb"),
TyU16 => mywrite!(w, "Mw"),
TyU32 => mywrite!(w, "Ml"),
TyU64 => mywrite!(w, "Md")
}
}
ty::ty_float(t) => {
match t {
TyF32 => mywrite!(w, "Mf"),
TyF64 => mywrite!(w, "MF"),
}
}
ty::ty_enum(def, ref substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(def, ref substs, store, mt, bounds) => {
mywrite!(w, "x[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
enc_trait_store(w, cx, store);
enc_mutability(w, mt);
let bounds = ty::ParamBounds {builtin_bounds: bounds,
trait_bounds: ~[]};
enc_bounds(w, cx, &bounds);
mywrite!(w, "]");
}
ty::ty_tup(ref ts) => {
mywrite!(w, "T[");
for t in ts.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
ty::ty_box(typ) => { mywrite!(w, "@"); | mywrite | identifier_name |
|
tyencode.rs | short_names_cache = cx.tcx.short_names_cache.borrow();
result_str_opt = short_names_cache.get()
.find(&t)
.map(|result| *result);
}
let result_str = match result_str_opt {
Some(s) => s,
None => {
let wr = &mut MemWriter::new();
enc_sty(wr, cx, &ty::get(t).sty);
let s = str::from_utf8(wr.get_ref()).to_managed();
let mut short_names_cache = cx.tcx
.short_names_cache
.borrow_mut();
short_names_cache.get().insert(t, s);
s
}
};
w.write(result_str.as_bytes());
}
ac_use_abbrevs(abbrevs) => {
{
let mut abbrevs = abbrevs.borrow_mut();
match abbrevs.get().find(&t) {
Some(a) => { w.write(a.s.as_bytes()); return; }
None => {}
}
}
let pos = w.tell();
enc_sty(w, cx, &ty::get(t).sty);
let end = w.tell();
let len = end - pos;
fn estimate_sz(u: u64) -> u64 {
let mut n = u;
let mut len = 0;
while n != 0 { len += 1; n = n >> 4; }
return len;
}
let abbrev_len = 3 + estimate_sz(pos) + estimate_sz(len);
if abbrev_len < len {
// I.e. it's actually an abbreviation.
let s = format!("\\#{:x}:{:x}\\#", pos, len).to_managed();
let a = ty_abbrev { pos: pos as uint,
len: len as uint,
s: s };
{
let mut abbrevs = abbrevs.borrow_mut();
abbrevs.get().insert(t, a);
}
}
return;
}
}
}
fn enc_mutability(w: &mut MemWriter, mt: ast::Mutability) {
match mt {
MutImmutable => (),
MutMutable => mywrite!(w, "m"),
}
}
fn enc_mt(w: &mut MemWriter, cx: @ctxt, mt: ty::mt) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
fn enc_opt<T>(w: &mut MemWriter, t: Option<T>, enc_f: |&mut MemWriter, T|) {
match t {
None => mywrite!(w, "n"),
Some(v) => {
mywrite!(w, "s");
enc_f(w, v);
}
}
}
pub fn enc_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::substs) {
enc_region_substs(w, cx, &substs.regions);
enc_opt(w, substs.self_ty, |w, t| enc_ty(w, cx, t));
mywrite!(w, "[");
for t in substs.tps.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
fn enc_region_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::RegionSubsts) {
match *substs {
ty::ErasedRegions => {
mywrite!(w, "e");
}
ty::NonerasedRegions(ref regions) => {
mywrite!(w, "n");
for &r in regions.iter() {
enc_region(w, cx, r);
}
mywrite!(w, ".");
}
}
}
fn enc_region(w: &mut MemWriter, cx: @ctxt, r: ty::Region) {
match r {
ty::ReLateBound(id, br) => {
mywrite!(w, "b[{}|", id);
enc_bound_region(w, cx, br);
mywrite!(w, "]");
}
ty::ReEarlyBound(node_id, index, ident) => {
mywrite!(w, "B[{}|{}|{}]",
node_id,
index,
cx.tcx.sess.str_of(ident));
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[{}|", fr.scope_id);
enc_bound_region(w, cx, fr.bound_region);
mywrite!(w, "]");
}
ty::ReScope(nid) => {
mywrite!(w, "s{}|", nid);
}
ty::ReStatic => {
mywrite!(w, "t");
}
ty::ReEmpty => {
mywrite!(w, "e");
}
ty::ReInfer(_) => {
// these should not crop up after typeck
cx.diag.handler().bug("Cannot encode region variables");
}
}
}
fn enc_bound_region(w: &mut MemWriter, cx: @ctxt, br: ty::BoundRegion) {
match br {
ty::BrAnon(idx) => {
mywrite!(w, "a{}|", idx);
}
ty::BrNamed(d, s) => |
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
}
}
pub fn enc_vstore(w: &mut MemWriter, cx: @ctxt, v: ty::vstore) {
mywrite!(w, "/");
match v {
ty::vstore_fixed(u) => mywrite!(w, "{}|", u),
ty::vstore_uniq => mywrite!(w, "~"),
ty::vstore_box => mywrite!(w, "@"),
ty::vstore_slice(r) => {
mywrite!(w, "&");
enc_region(w, cx, r);
}
}
}
pub fn enc_trait_ref(w: &mut MemWriter, cx: @ctxt, s: &ty::TraitRef) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
enc_substs(w, cx, &s.substs);
}
pub fn enc_trait_store(w: &mut MemWriter, cx: @ctxt, s: ty::TraitStore) {
match s {
ty::UniqTraitStore => mywrite!(w, "~"),
ty::BoxTraitStore => mywrite!(w, "@"),
ty::RegionTraitStore(re) => {
mywrite!(w, "&");
enc_region(w, cx, re);
}
}
}
fn enc_sty(w: &mut MemWriter, cx: @ctxt, st: &ty::sty) {
match *st {
ty::ty_nil => mywrite!(w, "n"),
ty::ty_bot => mywrite!(w, "z"),
ty::ty_bool => mywrite!(w, "b"),
ty::ty_char => mywrite!(w, "c"),
ty::ty_int(t) => {
match t {
TyI => mywrite!(w, "i"),
TyI8 => mywrite!(w, "MB"),
TyI16 => mywrite!(w, "MW"),
TyI32 => mywrite!(w, "ML"),
TyI64 => mywrite!(w, "MD")
}
}
ty::ty_uint(t) => {
match t {
TyU => mywrite!(w, "u"),
TyU8 => mywrite!(w, "Mb"),
TyU16 => mywrite!(w, "Mw"),
TyU32 => mywrite!(w, "Ml"),
TyU64 => mywrite!(w, "Md")
}
}
ty::ty_float(t) => {
match t {
TyF32 => mywrite!(w, "Mf"),
TyF64 => mywrite!(w, "MF"),
}
}
ty::ty_enum(def, ref substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(def, ref substs, store, mt, bounds) => {
mywrite!(w, "x[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
enc_trait_store(w, cx, store);
enc_mutability(w, mt);
let bounds = ty::ParamBounds {builtin_bounds: bounds,
trait_bounds: ~[]};
enc_bounds(w, cx, &bounds);
mywrite!(w, "]");
}
ty::ty_tup(ref ts) => {
mywrite!(w, "T[");
for t in ts.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
ty::ty_box(typ) => { mywrite!(w, "@"); enc_ty(w, cx, typ); }
ty::ty_uniq(typ) => { mywrite!(w, "~"); enc_ty(w, cx, typ); }
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
| {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
cx.tcx.sess.str_of(s));
} | conditional_block |
tyencode.rs | short_names_cache = cx.tcx.short_names_cache.borrow();
result_str_opt = short_names_cache.get()
.find(&t)
.map(|result| *result);
}
let result_str = match result_str_opt {
Some(s) => s,
None => {
let wr = &mut MemWriter::new();
enc_sty(wr, cx, &ty::get(t).sty);
let s = str::from_utf8(wr.get_ref()).to_managed();
let mut short_names_cache = cx.tcx
.short_names_cache
.borrow_mut();
short_names_cache.get().insert(t, s);
s
}
};
w.write(result_str.as_bytes());
}
ac_use_abbrevs(abbrevs) => {
{
let mut abbrevs = abbrevs.borrow_mut();
match abbrevs.get().find(&t) {
Some(a) => { w.write(a.s.as_bytes()); return; }
None => {}
}
}
let pos = w.tell();
enc_sty(w, cx, &ty::get(t).sty);
let end = w.tell();
let len = end - pos;
fn estimate_sz(u: u64) -> u64 {
let mut n = u;
let mut len = 0;
while n != 0 { len += 1; n = n >> 4; }
return len;
}
let abbrev_len = 3 + estimate_sz(pos) + estimate_sz(len);
if abbrev_len < len {
// I.e. it's actually an abbreviation.
let s = format!("\\#{:x}:{:x}\\#", pos, len).to_managed();
let a = ty_abbrev { pos: pos as uint,
len: len as uint,
s: s };
{
let mut abbrevs = abbrevs.borrow_mut();
abbrevs.get().insert(t, a);
}
}
return;
}
}
}
fn enc_mutability(w: &mut MemWriter, mt: ast::Mutability) {
match mt {
MutImmutable => (),
MutMutable => mywrite!(w, "m"),
}
}
fn enc_mt(w: &mut MemWriter, cx: @ctxt, mt: ty::mt) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
fn enc_opt<T>(w: &mut MemWriter, t: Option<T>, enc_f: |&mut MemWriter, T|) {
match t {
None => mywrite!(w, "n"),
Some(v) => {
mywrite!(w, "s");
enc_f(w, v);
}
}
}
pub fn enc_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::substs) |
fn enc_region_substs(w: &mut MemWriter, cx: @ctxt, substs: &ty::RegionSubsts) {
match *substs {
ty::ErasedRegions => {
mywrite!(w, "e");
}
ty::NonerasedRegions(ref regions) => {
mywrite!(w, "n");
for &r in regions.iter() {
enc_region(w, cx, r);
}
mywrite!(w, ".");
}
}
}
fn enc_region(w: &mut MemWriter, cx: @ctxt, r: ty::Region) {
match r {
ty::ReLateBound(id, br) => {
mywrite!(w, "b[{}|", id);
enc_bound_region(w, cx, br);
mywrite!(w, "]");
}
ty::ReEarlyBound(node_id, index, ident) => {
mywrite!(w, "B[{}|{}|{}]",
node_id,
index,
cx.tcx.sess.str_of(ident));
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[{}|", fr.scope_id);
enc_bound_region(w, cx, fr.bound_region);
mywrite!(w, "]");
}
ty::ReScope(nid) => {
mywrite!(w, "s{}|", nid);
}
ty::ReStatic => {
mywrite!(w, "t");
}
ty::ReEmpty => {
mywrite!(w, "e");
}
ty::ReInfer(_) => {
// these should not crop up after typeck
cx.diag.handler().bug("Cannot encode region variables");
}
}
}
fn enc_bound_region(w: &mut MemWriter, cx: @ctxt, br: ty::BoundRegion) {
match br {
ty::BrAnon(idx) => {
mywrite!(w, "a{}|", idx);
}
ty::BrNamed(d, s) => {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
cx.tcx.sess.str_of(s));
}
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
}
}
pub fn enc_vstore(w: &mut MemWriter, cx: @ctxt, v: ty::vstore) {
mywrite!(w, "/");
match v {
ty::vstore_fixed(u) => mywrite!(w, "{}|", u),
ty::vstore_uniq => mywrite!(w, "~"),
ty::vstore_box => mywrite!(w, "@"),
ty::vstore_slice(r) => {
mywrite!(w, "&");
enc_region(w, cx, r);
}
}
}
pub fn enc_trait_ref(w: &mut MemWriter, cx: @ctxt, s: &ty::TraitRef) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
enc_substs(w, cx, &s.substs);
}
pub fn enc_trait_store(w: &mut MemWriter, cx: @ctxt, s: ty::TraitStore) {
match s {
ty::UniqTraitStore => mywrite!(w, "~"),
ty::BoxTraitStore => mywrite!(w, "@"),
ty::RegionTraitStore(re) => {
mywrite!(w, "&");
enc_region(w, cx, re);
}
}
}
fn enc_sty(w: &mut MemWriter, cx: @ctxt, st: &ty::sty) {
match *st {
ty::ty_nil => mywrite!(w, "n"),
ty::ty_bot => mywrite!(w, "z"),
ty::ty_bool => mywrite!(w, "b"),
ty::ty_char => mywrite!(w, "c"),
ty::ty_int(t) => {
match t {
TyI => mywrite!(w, "i"),
TyI8 => mywrite!(w, "MB"),
TyI16 => mywrite!(w, "MW"),
TyI32 => mywrite!(w, "ML"),
TyI64 => mywrite!(w, "MD")
}
}
ty::ty_uint(t) => {
match t {
TyU => mywrite!(w, "u"),
TyU8 => mywrite!(w, "Mb"),
TyU16 => mywrite!(w, "Mw"),
TyU32 => mywrite!(w, "Ml"),
TyU64 => mywrite!(w, "Md")
}
}
ty::ty_float(t) => {
match t {
TyF32 => mywrite!(w, "Mf"),
TyF64 => mywrite!(w, "MF"),
}
}
ty::ty_enum(def, ref substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(def, ref substs, store, mt, bounds) => {
mywrite!(w, "x[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
enc_trait_store(w, cx, store);
enc_mutability(w, mt);
let bounds = ty::ParamBounds {builtin_bounds: bounds,
trait_bounds: ~[]};
enc_bounds(w, cx, &bounds);
mywrite!(w, "]");
}
ty::ty_tup(ref ts) => {
mywrite!(w, "T[");
for t in ts.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
ty::ty_box(typ) => { mywrite!(w, "@"); enc_ty(w, cx, typ); }
ty::ty_uniq(typ) => { mywrite!(w, "~"); enc_ty(w, cx, typ); }
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
| {
enc_region_substs(w, cx, &substs.regions);
enc_opt(w, substs.self_ty, |w, t| enc_ty(w, cx, t));
mywrite!(w, "[");
for t in substs.tps.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
} | identifier_body |
note-service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {BaseService} from "./base-service";
import {Note} from "../classes/note";
import {Status} from "../classes/status";
@Injectable()
export class NoteService extends BaseService {
constructor(protected http: Http) {
super(http);
}
private noteUrl = "api/note/";
getAllNotes() : Observable<Note[]> {
return(this.http.get(this.noteUrl)
.map(this.extractData)
.catch(this.handleError));
} | .map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteApplicationId(noteApplicationId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteApplicationId=" + noteApplicationId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteProspectId(noteProspectId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteProspectId=" + noteProspectId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteNoteTypeId(noteNoteTypeId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteNoteTypeId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByBridgeStaffId(noteBridgeStaffId: string) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteBridgeStaffId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteDateRange(startDate: string, endDate: string) : Observable<Note[]> {
return (this.http.get(this.noteUrl + startDate + endDate)
.map(this.extractData)
.catch(this.handleError));
}
createNote(note: Note) : Observable<Status> {
return(this.http.post(this.noteUrl, note)
.map(this.extractMessage)
.catch(this.handleError));
}
} |
getNoteByNoteId(noteId: number) : Observable<Note> {
return(this.http.get(this.noteUrl + noteId) | random_line_split |
note-service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {BaseService} from "./base-service";
import {Note} from "../classes/note";
import {Status} from "../classes/status";
@Injectable()
export class NoteService extends BaseService {
constructor(protected http: Http) {
super(http);
}
private noteUrl = "api/note/";
getAllNotes() : Observable<Note[]> {
return(this.http.get(this.noteUrl)
.map(this.extractData)
.catch(this.handleError));
}
| (noteId: number) : Observable<Note> {
return(this.http.get(this.noteUrl + noteId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteApplicationId(noteApplicationId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteApplicationId=" + noteApplicationId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteProspectId(noteProspectId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteProspectId=" + noteProspectId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteNoteTypeId(noteNoteTypeId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteNoteTypeId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByBridgeStaffId(noteBridgeStaffId: string) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteBridgeStaffId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteDateRange(startDate: string, endDate: string) : Observable<Note[]> {
return (this.http.get(this.noteUrl + startDate + endDate)
.map(this.extractData)
.catch(this.handleError));
}
createNote(note: Note) : Observable<Status> {
return(this.http.post(this.noteUrl, note)
.map(this.extractMessage)
.catch(this.handleError));
}
} | getNoteByNoteId | identifier_name |
note-service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {BaseService} from "./base-service";
import {Note} from "../classes/note";
import {Status} from "../classes/status";
@Injectable()
export class NoteService extends BaseService {
constructor(protected http: Http) {
super(http);
}
private noteUrl = "api/note/";
getAllNotes() : Observable<Note[]> |
getNoteByNoteId(noteId: number) : Observable<Note> {
return(this.http.get(this.noteUrl + noteId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteApplicationId(noteApplicationId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteApplicationId=" + noteApplicationId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteProspectId(noteProspectId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + "?noteProspectId=" + noteProspectId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteNoteTypeId(noteNoteTypeId: number) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteNoteTypeId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByBridgeStaffId(noteBridgeStaffId: string) : Observable<Note[]> {
return(this.http.get(this.noteUrl + noteBridgeStaffId)
.map(this.extractData)
.catch(this.handleError));
}
getNotesByNoteDateRange(startDate: string, endDate: string) : Observable<Note[]> {
return (this.http.get(this.noteUrl + startDate + endDate)
.map(this.extractData)
.catch(this.handleError));
}
createNote(note: Note) : Observable<Status> {
return(this.http.post(this.noteUrl, note)
.map(this.extractMessage)
.catch(this.handleError));
}
} | {
return(this.http.get(this.noteUrl)
.map(this.extractData)
.catch(this.handleError));
} | identifier_body |
id.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use safe_nd::{AppFullId, ClientFullId, PublicId, PublicKey, Signature};
use std::sync::Arc;
/// An enum representing the Full Id variants for a Client or App.
#[derive(Clone)]
pub enum SafeKey {
/// Represents an application authorised by a client.
App(Arc<AppFullId>),
/// Represents a network client.
Client(Arc<ClientFullId>),
}
impl SafeKey {
/// Creates a client full ID.
pub fn | (full_id: ClientFullId) -> Self {
Self::Client(Arc::new(full_id))
}
/// Creates an app full ID.
pub fn app(full_id: AppFullId) -> Self {
Self::App(Arc::new(full_id))
}
/// Signs a given message using the App / Client full id as required.
pub fn sign(&self, msg: &[u8]) -> Signature {
match self {
Self::App(app_full_id) => app_full_id.sign(msg),
Self::Client(client_full_id) => client_full_id.sign(msg),
}
}
/// Returns a corresponding public ID.
pub fn public_id(&self) -> PublicId {
match self {
Self::App(app_full_id) => PublicId::App(app_full_id.public_id().clone()),
Self::Client(client_full_id) => PublicId::Client(client_full_id.public_id().clone()),
}
}
/// Returns a corresponding public key.
pub fn public_key(&self) -> PublicKey {
match self {
Self::App(app_full_id) => *app_full_id.public_id().public_key(),
Self::Client(client_full_id) => *client_full_id.public_id().public_key(),
}
}
}
| client | identifier_name |
id.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use safe_nd::{AppFullId, ClientFullId, PublicId, PublicKey, Signature};
use std::sync::Arc;
/// An enum representing the Full Id variants for a Client or App.
#[derive(Clone)]
pub enum SafeKey {
/// Represents an application authorised by a client.
App(Arc<AppFullId>),
/// Represents a network client.
Client(Arc<ClientFullId>),
}
impl SafeKey {
/// Creates a client full ID.
pub fn client(full_id: ClientFullId) -> Self {
Self::Client(Arc::new(full_id))
}
/// Creates an app full ID.
pub fn app(full_id: AppFullId) -> Self {
Self::App(Arc::new(full_id))
}
/// Signs a given message using the App / Client full id as required.
pub fn sign(&self, msg: &[u8]) -> Signature { | }
}
/// Returns a corresponding public ID.
pub fn public_id(&self) -> PublicId {
match self {
Self::App(app_full_id) => PublicId::App(app_full_id.public_id().clone()),
Self::Client(client_full_id) => PublicId::Client(client_full_id.public_id().clone()),
}
}
/// Returns a corresponding public key.
pub fn public_key(&self) -> PublicKey {
match self {
Self::App(app_full_id) => *app_full_id.public_id().public_key(),
Self::Client(client_full_id) => *client_full_id.public_id().public_key(),
}
}
} | match self {
Self::App(app_full_id) => app_full_id.sign(msg),
Self::Client(client_full_id) => client_full_id.sign(msg), | random_line_split |
id.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use safe_nd::{AppFullId, ClientFullId, PublicId, PublicKey, Signature};
use std::sync::Arc;
/// An enum representing the Full Id variants for a Client or App.
#[derive(Clone)]
pub enum SafeKey {
/// Represents an application authorised by a client.
App(Arc<AppFullId>),
/// Represents a network client.
Client(Arc<ClientFullId>),
}
impl SafeKey {
/// Creates a client full ID.
pub fn client(full_id: ClientFullId) -> Self |
/// Creates an app full ID.
pub fn app(full_id: AppFullId) -> Self {
Self::App(Arc::new(full_id))
}
/// Signs a given message using the App / Client full id as required.
pub fn sign(&self, msg: &[u8]) -> Signature {
match self {
Self::App(app_full_id) => app_full_id.sign(msg),
Self::Client(client_full_id) => client_full_id.sign(msg),
}
}
/// Returns a corresponding public ID.
pub fn public_id(&self) -> PublicId {
match self {
Self::App(app_full_id) => PublicId::App(app_full_id.public_id().clone()),
Self::Client(client_full_id) => PublicId::Client(client_full_id.public_id().clone()),
}
}
/// Returns a corresponding public key.
pub fn public_key(&self) -> PublicKey {
match self {
Self::App(app_full_id) => *app_full_id.public_id().public_key(),
Self::Client(client_full_id) => *client_full_id.public_id().public_key(),
}
}
}
| {
Self::Client(Arc::new(full_id))
} | identifier_body |
pkgid.rs | use cargo::ops;
use cargo::util::{CliResult, CliError, Config};
use cargo::util::important_paths::{find_root_manifest_for_cwd};
#[derive(RustcDecodable)]
struct Options {
flag_verbose: bool,
flag_quiet: bool,
flag_manifest_path: Option<String>,
arg_spec: Option<String>,
}
pub const USAGE: &'static str = "
Print a fully qualified package specification
Usage:
cargo pkgid [options] [<spec>]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to the package to clean
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
Given a <spec> argument, print out the fully qualified package id specifier.
This command will generate an error if <spec> is ambiguous as to which package
it refers to in the dependency graph. If no <spec> is given, then the pkgid for
the local package is printed.
This command requires that a lockfile is available and dependencies have been
fetched.
Example Package IDs
pkgid | name | version | url
|-----------------------------|--------|-----------|---------------------|
foo | foo | * | *
foo:1.2.3 | foo | 1.2.3 | *
crates.io/foo | foo | * | *://crates.io/foo
crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo
crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar
http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo
";
pub fn | (options: Options,
config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path.clone()));
let spec = options.arg_spec.as_ref().map(|s| &s[..]);
let spec = try!(ops::pkgid(&root, spec, config).map_err(|err| {
CliError::from_boxed(err, 101)
}));
println!("{}", spec);
Ok(None)
}
| execute | identifier_name |
pkgid.rs | use cargo::ops;
use cargo::util::{CliResult, CliError, Config};
use cargo::util::important_paths::{find_root_manifest_for_cwd};
#[derive(RustcDecodable)]
struct Options {
flag_verbose: bool,
flag_quiet: bool,
flag_manifest_path: Option<String>,
arg_spec: Option<String>,
}
pub const USAGE: &'static str = "
Print a fully qualified package specification
Usage:
cargo pkgid [options] [<spec>]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to the package to clean
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
Given a <spec> argument, print out the fully qualified package id specifier.
This command will generate an error if <spec> is ambiguous as to which package
it refers to in the dependency graph. If no <spec> is given, then the pkgid for
the local package is printed.
This command requires that a lockfile is available and dependencies have been
fetched.
Example Package IDs
pkgid | name | version | url
|-----------------------------|--------|-----------|---------------------|
foo | foo | * | * | crates.io/foo | foo | * | *://crates.io/foo
crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo
crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar
http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo
";
pub fn execute(options: Options,
config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path.clone()));
let spec = options.arg_spec.as_ref().map(|s| &s[..]);
let spec = try!(ops::pkgid(&root, spec, config).map_err(|err| {
CliError::from_boxed(err, 101)
}));
println!("{}", spec);
Ok(None)
} | foo:1.2.3 | foo | 1.2.3 | * | random_line_split |
question_6.rs | pub fn compress(string: &str) -> String {
let mut character_count = 0;
let mut previous_char = string.chars().nth(0).unwrap(); // Starts at first char
let mut new_string_parts: Vec<String> = vec![];
for c in string.chars() {
if previous_char == c {
character_count = character_count + 1;
} else {
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
character_count = 1;
}
previous_char = c;
}
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
let new_string = new_string_parts.join("");
if string.len() <= new_string.len() {
return string.to_string();
} else |
}
#[test]
fn example_compress() {
assert_eq!(compress("aabcccccaaa"), "a2b1c5a3");
}
#[test]
fn compress_should_return_original_string_when_not_smaller() {
assert_eq!(compress("aa"), "aa");
}
#[test]
fn compress_should_return_original_string_when_not_smaller_with_larger_example() {
assert_eq!(compress("aabbccddeeffgg"), "aabbccddeeffgg");
}
#[test]
fn compress_should_return_original_string_when_compression_generates_larger_string() {
// if compress() had its way "abcdee" would be "a1b1c1d1e2"
assert_eq!(compress("abcdee"), "abcdee");
}
| {
return new_string_parts.join("");
} | conditional_block |
question_6.rs | pub fn compress(string: &str) -> String {
let mut character_count = 0;
let mut previous_char = string.chars().nth(0).unwrap(); // Starts at first char
let mut new_string_parts: Vec<String> = vec![];
for c in string.chars() {
if previous_char == c {
character_count = character_count + 1;
} else {
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
character_count = 1;
}
previous_char = c;
}
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
let new_string = new_string_parts.join("");
if string.len() <= new_string.len() {
return string.to_string();
} else {
return new_string_parts.join("");
}
}
#[test]
fn example_compress() {
assert_eq!(compress("aabcccccaaa"), "a2b1c5a3");
}
#[test]
fn | () {
assert_eq!(compress("aa"), "aa");
}
#[test]
fn compress_should_return_original_string_when_not_smaller_with_larger_example() {
assert_eq!(compress("aabbccddeeffgg"), "aabbccddeeffgg");
}
#[test]
fn compress_should_return_original_string_when_compression_generates_larger_string() {
// if compress() had its way "abcdee" would be "a1b1c1d1e2"
assert_eq!(compress("abcdee"), "abcdee");
}
| compress_should_return_original_string_when_not_smaller | identifier_name |
question_6.rs | pub fn compress(string: &str) -> String {
let mut character_count = 0;
let mut previous_char = string.chars().nth(0).unwrap(); // Starts at first char
let mut new_string_parts: Vec<String> = vec![];
for c in string.chars() {
if previous_char == c {
character_count = character_count + 1;
} else {
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
character_count = 1; | previous_char = c;
}
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
let new_string = new_string_parts.join("");
if string.len() <= new_string.len() {
return string.to_string();
} else {
return new_string_parts.join("");
}
}
#[test]
fn example_compress() {
assert_eq!(compress("aabcccccaaa"), "a2b1c5a3");
}
#[test]
fn compress_should_return_original_string_when_not_smaller() {
assert_eq!(compress("aa"), "aa");
}
#[test]
fn compress_should_return_original_string_when_not_smaller_with_larger_example() {
assert_eq!(compress("aabbccddeeffgg"), "aabbccddeeffgg");
}
#[test]
fn compress_should_return_original_string_when_compression_generates_larger_string() {
// if compress() had its way "abcdee" would be "a1b1c1d1e2"
assert_eq!(compress("abcdee"), "abcdee");
} | } | random_line_split |
question_6.rs | pub fn compress(string: &str) -> String {
let mut character_count = 0;
let mut previous_char = string.chars().nth(0).unwrap(); // Starts at first char
let mut new_string_parts: Vec<String> = vec![];
for c in string.chars() {
if previous_char == c {
character_count = character_count + 1;
} else {
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
character_count = 1;
}
previous_char = c;
}
new_string_parts.push(previous_char.to_string());
new_string_parts.push(character_count.to_string());
let new_string = new_string_parts.join("");
if string.len() <= new_string.len() {
return string.to_string();
} else {
return new_string_parts.join("");
}
}
#[test]
fn example_compress() |
#[test]
fn compress_should_return_original_string_when_not_smaller() {
assert_eq!(compress("aa"), "aa");
}
#[test]
fn compress_should_return_original_string_when_not_smaller_with_larger_example() {
assert_eq!(compress("aabbccddeeffgg"), "aabbccddeeffgg");
}
#[test]
fn compress_should_return_original_string_when_compression_generates_larger_string() {
// if compress() had its way "abcdee" would be "a1b1c1d1e2"
assert_eq!(compress("abcdee"), "abcdee");
}
| {
assert_eq!(compress("aabcccccaaa"), "a2b1c5a3");
} | identifier_body |
app.component.ts | import { Component } from '@angular/core';
import { Product } from './product.model';
/**
* @InventoryApp: the top-level component for our application
*/
@Component({
selector: 'inventory-app-root',
templateUrl: './app.component.html'
})
export class AppComponent {
products: Product[];
| 'MYSHOES',
'Black Running Shoes',
'/assets/images/products/black-shoes.jpg',
['Men', 'Shoes', 'Running Shoes'],
109.99),
new Product(
'NEATOJACKET',
'Blue Jacket',
'/assets/images/products/blue-jacket.jpg',
['Women', 'Apparel', 'Jackets & Vests'],
238.99),
new Product(
'NICEHAT',
'A Nice Black Hat',
'/assets/images/products/black-hat.jpg',
['Men', 'Accessories', 'Hats'],
29.99)
];
}
productWasSelected(product: Product): void {
console.log('Product clicked: ', product);
}
} | constructor() {
this.products = [
new Product( | random_line_split |
app.component.ts | import { Component } from '@angular/core';
import { Product } from './product.model';
/**
* @InventoryApp: the top-level component for our application
*/
@Component({
selector: 'inventory-app-root',
templateUrl: './app.component.html'
})
export class AppComponent {
products: Product[];
constructor() {
this.products = [
new Product(
'MYSHOES',
'Black Running Shoes',
'/assets/images/products/black-shoes.jpg',
['Men', 'Shoes', 'Running Shoes'],
109.99),
new Product(
'NEATOJACKET',
'Blue Jacket',
'/assets/images/products/blue-jacket.jpg',
['Women', 'Apparel', 'Jackets & Vests'],
238.99),
new Product(
'NICEHAT',
'A Nice Black Hat',
'/assets/images/products/black-hat.jpg',
['Men', 'Accessories', 'Hats'],
29.99)
];
}
productWasSelected(product: Product): void |
}
| {
console.log('Product clicked: ', product);
} | identifier_body |
app.component.ts | import { Component } from '@angular/core';
import { Product } from './product.model';
/**
* @InventoryApp: the top-level component for our application
*/
@Component({
selector: 'inventory-app-root',
templateUrl: './app.component.html'
})
export class AppComponent {
products: Product[];
constructor() {
this.products = [
new Product(
'MYSHOES',
'Black Running Shoes',
'/assets/images/products/black-shoes.jpg',
['Men', 'Shoes', 'Running Shoes'],
109.99),
new Product(
'NEATOJACKET',
'Blue Jacket',
'/assets/images/products/blue-jacket.jpg',
['Women', 'Apparel', 'Jackets & Vests'],
238.99),
new Product(
'NICEHAT',
'A Nice Black Hat',
'/assets/images/products/black-hat.jpg',
['Men', 'Accessories', 'Hats'],
29.99)
];
}
| (product: Product): void {
console.log('Product clicked: ', product);
}
}
| productWasSelected | identifier_name |
html.py | d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x) for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
| if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else '' | conditional_block |
|
html.py | # List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x) for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
""" | if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
| random_line_split |
|
html.py | # List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x) for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
| safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
| """
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)] | identifier_body |
html.py | # List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x) for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def | (format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing | format_html | identifier_name |
nthday.js |
/*
Parameters:
index: n’th occurrence of the specified day
1 - first
2 - second
3 - third
4 - fourth
5 - fifth
6 - last
day: daynumber – javascript way where sunday is 0 and is saturday is 6
month: which is 1-12 [optional – defaults to current]
year: Full year – four digits [optional – defaults to current]
var myDay = getNthDayOfMonth(1, 0, 9, '');
console.log("first sunday in September "+myDay);
var myDay = getNthDayOfMonth(2, 0, 9, '');
console.log("second sunday in September "+myDay);
var myDay = getNthDayOfMonth(3, 0, 10, '');
console.log("third sunday in October "+myDay);
var myDay = getNthDayOfMonth(6, 0, 11, '');
console.log("last sunday in November "+myDay);
var myDay = getNthDayOfMonth(6, 0, 12, '');
console.log("last sunday in December "+myDay);
var myDay = getNthDayOfMonth(4, 0, 1, '');
console.log("4th sunday in January?? "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '');
console.log("last sunday in January 17 "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '2018');
console.log("last sunday in January "+myDay);
*/
function getNthDayOfMonth(index, day, month, year) {
// Create date object
var date = new Date();
// Set to first day of month
date.setDate(1);
// If supplied – set the month
if (month !== '' && month !== undefined) {
// Set month
month -=1;
date.setMonth(month);
} else {
month = | lied – set the year
if (year !== '' && year !== undefined) {
// Set year
date.setFullYear(year);
} else {
year = date.getFullYear();
}
// Find daynumber
firstDay = date.getDay();
// Find first friday.
while (date.getDay() != day) {
date.setDate(date.getDate() + 1);
}
switch (index) {
case 2:
date.setDate(date.getDate() + 7);
break;
case 3:
date.setDate(date.getDate() + 14);
break;
case 4:
date.setDate(date.getDate() + 21);
break;
case 5:
date.setDate(date.getDate() + 28);
if (date.getMonth() !== month) {
date = null;
}
break;
case 6:
// 6 denotes the last {day} of the month (last sunday)
month += 1;
var firstDayOfNextMonth = new Date((month == 12 ? 1 : month + 1) + '/01/' + '/' + (month == 12 ? year + 1 : year));
//Getting the Last Day of the given month and year
var lastDayofThemonth = new Date(firstDayOfNextMonth-1);
var lastSundayOfTheMonth= new Date();
for(i=0; i<=7; i++)
{
var tempDate = new Date(month + '/' + (lastDayofThemonth.getDate() - i) + '/' + year);
if(tempDate.getDay() ==0)
{
date = tempDate;
break;
}
}
break;
}
return date;
}
| date.getMonth();
}
// If supp | conditional_block |
nthday.js |
/*
Parameters:
index: n’th occurrence of the specified day
1 - first
2 - second
3 - third
4 - fourth
5 - fifth
6 - last
day: daynumber – javascript way where sunday is 0 and is saturday is 6
month: which is 1-12 [optional – defaults to current]
year: Full year – four digits [optional – defaults to current]
var myDay = getNthDayOfMonth(1, 0, 9, '');
console.log("first sunday in September "+myDay);
var myDay = getNthDayOfMonth(2, 0, 9, '');
console.log("second sunday in September "+myDay);
var myDay = getNthDayOfMonth(3, 0, 10, '');
console.log("third sunday in October "+myDay);
var myDay = getNthDayOfMonth(6, 0, 11, '');
console.log("last sunday in November "+myDay);
var myDay = getNthDayOfMonth(6, 0, 12, '');
console.log("last sunday in December "+myDay);
var myDay = getNthDayOfMonth(4, 0, 1, '');
console.log("4th sunday in January?? "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '');
console.log("last sunday in January 17 "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '2018');
console.log("last sunday in January "+myDay);
*/
function getNthDayO | y, month, year) {
// Create date object
var date = new Date();
// Set to first day of month
date.setDate(1);
// If supplied – set the month
if (month !== '' && month !== undefined) {
// Set month
month -=1;
date.setMonth(month);
} else {
month = date.getMonth();
}
// If supplied – set the year
if (year !== '' && year !== undefined) {
// Set year
date.setFullYear(year);
} else {
year = date.getFullYear();
}
// Find daynumber
firstDay = date.getDay();
// Find first friday.
while (date.getDay() != day) {
date.setDate(date.getDate() + 1);
}
switch (index) {
case 2:
date.setDate(date.getDate() + 7);
break;
case 3:
date.setDate(date.getDate() + 14);
break;
case 4:
date.setDate(date.getDate() + 21);
break;
case 5:
date.setDate(date.getDate() + 28);
if (date.getMonth() !== month) {
date = null;
}
break;
case 6:
// 6 denotes the last {day} of the month (last sunday)
month += 1;
var firstDayOfNextMonth = new Date((month == 12 ? 1 : month + 1) + '/01/' + '/' + (month == 12 ? year + 1 : year));
//Getting the Last Day of the given month and year
var lastDayofThemonth = new Date(firstDayOfNextMonth-1);
var lastSundayOfTheMonth= new Date();
for(i=0; i<=7; i++)
{
var tempDate = new Date(month + '/' + (lastDayofThemonth.getDate() - i) + '/' + year);
if(tempDate.getDay() ==0)
{
date = tempDate;
break;
}
}
break;
}
return date;
}
| fMonth(index, da | identifier_name |
nthday.js | /*
Parameters:
index: n’th occurrence of the specified day | 4 - fourth
5 - fifth
6 - last
day: daynumber – javascript way where sunday is 0 and is saturday is 6
month: which is 1-12 [optional – defaults to current]
year: Full year – four digits [optional – defaults to current]
var myDay = getNthDayOfMonth(1, 0, 9, '');
console.log("first sunday in September "+myDay);
var myDay = getNthDayOfMonth(2, 0, 9, '');
console.log("second sunday in September "+myDay);
var myDay = getNthDayOfMonth(3, 0, 10, '');
console.log("third sunday in October "+myDay);
var myDay = getNthDayOfMonth(6, 0, 11, '');
console.log("last sunday in November "+myDay);
var myDay = getNthDayOfMonth(6, 0, 12, '');
console.log("last sunday in December "+myDay);
var myDay = getNthDayOfMonth(4, 0, 1, '');
console.log("4th sunday in January?? "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '');
console.log("last sunday in January 17 "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '2018');
console.log("last sunday in January "+myDay);
*/
function getNthDayOfMonth(index, day, month, year) {
// Create date object
var date = new Date();
// Set to first day of month
date.setDate(1);
// If supplied – set the month
if (month !== '' && month !== undefined) {
// Set month
month -=1;
date.setMonth(month);
} else {
month = date.getMonth();
}
// If supplied – set the year
if (year !== '' && year !== undefined) {
// Set year
date.setFullYear(year);
} else {
year = date.getFullYear();
}
// Find daynumber
firstDay = date.getDay();
// Find first friday.
while (date.getDay() != day) {
date.setDate(date.getDate() + 1);
}
switch (index) {
case 2:
date.setDate(date.getDate() + 7);
break;
case 3:
date.setDate(date.getDate() + 14);
break;
case 4:
date.setDate(date.getDate() + 21);
break;
case 5:
date.setDate(date.getDate() + 28);
if (date.getMonth() !== month) {
date = null;
}
break;
case 6:
// 6 denotes the last {day} of the month (last sunday)
month += 1;
var firstDayOfNextMonth = new Date((month == 12 ? 1 : month + 1) + '/01/' + '/' + (month == 12 ? year + 1 : year));
//Getting the Last Day of the given month and year
var lastDayofThemonth = new Date(firstDayOfNextMonth-1);
var lastSundayOfTheMonth= new Date();
for(i=0; i<=7; i++)
{
var tempDate = new Date(month + '/' + (lastDayofThemonth.getDate() - i) + '/' + year);
if(tempDate.getDay() ==0)
{
date = tempDate;
break;
}
}
break;
}
return date;
} | 1 - first
2 - second
3 - third | random_line_split |
nthday.js |
/*
Parameters:
index: n’th occurrence of the specified day
1 - first
2 - second
3 - third
4 - fourth
5 - fifth
6 - last
day: daynumber – javascript way where sunday is 0 and is saturday is 6
month: which is 1-12 [optional – defaults to current]
year: Full year – four digits [optional – defaults to current]
var myDay = getNthDayOfMonth(1, 0, 9, '');
console.log("first sunday in September "+myDay);
var myDay = getNthDayOfMonth(2, 0, 9, '');
console.log("second sunday in September "+myDay);
var myDay = getNthDayOfMonth(3, 0, 10, '');
console.log("third sunday in October "+myDay);
var myDay = getNthDayOfMonth(6, 0, 11, '');
console.log("last sunday in November "+myDay);
var myDay = getNthDayOfMonth(6, 0, 12, '');
console.log("last sunday in December "+myDay);
var myDay = getNthDayOfMonth(4, 0, 1, '');
console.log("4th sunday in January?? "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '');
console.log("last sunday in January 17 "+myDay);
var myDay = getNthDayOfMonth(6, 0, 1, '2018');
console.log("last sunday in January "+myDay);
*/
function getNthDayOfMonth(index, day, month, year) {
// Crea | firstDay = date.getDay();
// Find first friday.
while (date.getDay() != day) {
date.setDate(date.getDate() + 1);
}
switch (index) {
case 2:
date.setDate(date.getDate() + 7);
break;
case 3:
date.setDate(date.getDate() + 14);
break;
case 4:
date.setDate(date.getDate() + 21);
break;
case 5:
date.setDate(date.getDate() + 28);
if (date.getMonth() !== month) {
date = null;
}
break;
case 6:
// 6 denotes the last {day} of the month (last sunday)
month += 1;
var firstDayOfNextMonth = new Date((month == 12 ? 1 : month + 1) + '/01/' + '/' + (month == 12 ? year + 1 : year));
//Getting the Last Day of the given month and year
var lastDayofThemonth = new Date(firstDayOfNextMonth-1);
var lastSundayOfTheMonth= new Date();
for(i=0; i<=7; i++)
{
var tempDate = new Date(month + '/' + (lastDayofThemonth.getDate() - i) + '/' + year);
if(tempDate.getDay() ==0)
{
date = tempDate;
break;
}
}
break;
}
return date;
}
| te date object
var date = new Date();
// Set to first day of month
date.setDate(1);
// If supplied – set the month
if (month !== '' && month !== undefined) {
// Set month
month -=1;
date.setMonth(month);
} else {
month = date.getMonth();
}
// If supplied – set the year
if (year !== '' && year !== undefined) {
// Set year
date.setFullYear(year);
} else {
year = date.getFullYear();
}
// Find daynumber | identifier_body |
settings.py | """
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.YAMLRenderer', | 'rest_framework.parsers.YAMLParser',
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import importlib
from rest_framework import ISO_8601
from rest_framework.compat import six
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK', None)
DEFAULTS = {
# Base API policies
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_THROTTLE_CLASSES': (
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS':
'rest_framework.negotiation.DefaultContentNegotiation',
# Genric view behavior
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework.pagination.PaginationSerializer',
'DEFAULT_FILTER_BACKENDS': (),
# Throttling
'DEFAULT_THROTTLE_RATES': {
'user': None,
'anon': None,
},
# Pagination
'PAGINATE_BY': None,
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
# View configuration
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
# Exception handling
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'multipart',
# Browser enhancements
'FORM_METHOD_OVERRIDE': '_method',
'FORM_CONTENT_OVERRIDE': '_content',
'FORM_CONTENTTYPE_OVERRIDE': '_content_type',
'URL_ACCEPT_OVERRIDE': 'accept',
'URL_FORMAT_OVERRIDE': 'format',
'FORMAT_SUFFIX_KWARG': 'format',
# Input and output formats
'DATE_INPUT_FORMATS': (
ISO_8601,
),
'DATE_FORMAT': None,
'DATETIME_INPUT_FORMATS': (
ISO_8601,
),
'DATETIME_FORMAT': None,
'TIME_INPUT_FORMATS': (
ISO_8601,
),
'TIME_FORMAT': None,
# Pending deprecation
'FILTER_BACKEND': None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'DEFAULT_RENDERER_CLASSES',
'DEFAULT_PARSER_CLASSES',
'DEFAULT_AUTHENTICATION_CLASSES',
'DEFAULT_PERMISSION_CLASSES',
'DEFAULT_THROTTLE_CLASSES',
'DEFAULT_CONTENT_NEGOTIATION_CLASS',
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
'UNAUTHENTICATED_TOKEN',
'VIEW_NAME_FUNCTION',
'VIEW_DESCRIPTION_FUNCTION'
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from rest_framework.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if attr == 'FILTER_BACKEND' and val is not None:
# Make sure we can initialize the class
val()
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS) | )
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser', | random_line_split |
settings.py | """
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.YAMLRenderer',
)
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.YAMLParser',
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import importlib
from rest_framework import ISO_8601
from rest_framework.compat import six
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK', None)
DEFAULTS = {
# Base API policies
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_THROTTLE_CLASSES': (
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS':
'rest_framework.negotiation.DefaultContentNegotiation',
# Genric view behavior
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework.pagination.PaginationSerializer',
'DEFAULT_FILTER_BACKENDS': (),
# Throttling
'DEFAULT_THROTTLE_RATES': {
'user': None,
'anon': None,
},
# Pagination
'PAGINATE_BY': None,
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
# View configuration
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
# Exception handling
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'multipart',
# Browser enhancements
'FORM_METHOD_OVERRIDE': '_method',
'FORM_CONTENT_OVERRIDE': '_content',
'FORM_CONTENTTYPE_OVERRIDE': '_content_type',
'URL_ACCEPT_OVERRIDE': 'accept',
'URL_FORMAT_OVERRIDE': 'format',
'FORMAT_SUFFIX_KWARG': 'format',
# Input and output formats
'DATE_INPUT_FORMATS': (
ISO_8601,
),
'DATE_FORMAT': None,
'DATETIME_INPUT_FORMATS': (
ISO_8601,
),
'DATETIME_FORMAT': None,
'TIME_INPUT_FORMATS': (
ISO_8601,
),
'TIME_FORMAT': None,
# Pending deprecation
'FILTER_BACKEND': None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'DEFAULT_RENDERER_CLASSES',
'DEFAULT_PARSER_CLASSES',
'DEFAULT_AUTHENTICATION_CLASSES',
'DEFAULT_PERMISSION_CLASSES',
'DEFAULT_THROTTLE_CLASSES',
'DEFAULT_CONTENT_NEGOTIATION_CLASS',
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
'UNAUTHENTICATED_TOKEN',
'VIEW_NAME_FUNCTION',
'VIEW_DESCRIPTION_FUNCTION'
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from rest_framework.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if attr == 'FILTER_BACKEND' and val is not None:
# Make sure we can initialize the class
|
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
| val() | conditional_block |
settings.py | """
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.YAMLRenderer',
)
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.YAMLParser',
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import importlib
from rest_framework import ISO_8601
from rest_framework.compat import six
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK', None)
DEFAULTS = {
# Base API policies
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_THROTTLE_CLASSES': (
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS':
'rest_framework.negotiation.DefaultContentNegotiation',
# Genric view behavior
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework.pagination.PaginationSerializer',
'DEFAULT_FILTER_BACKENDS': (),
# Throttling
'DEFAULT_THROTTLE_RATES': {
'user': None,
'anon': None,
},
# Pagination
'PAGINATE_BY': None,
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
# View configuration
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
# Exception handling
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'multipart',
# Browser enhancements
'FORM_METHOD_OVERRIDE': '_method',
'FORM_CONTENT_OVERRIDE': '_content',
'FORM_CONTENTTYPE_OVERRIDE': '_content_type',
'URL_ACCEPT_OVERRIDE': 'accept',
'URL_FORMAT_OVERRIDE': 'format',
'FORMAT_SUFFIX_KWARG': 'format',
# Input and output formats
'DATE_INPUT_FORMATS': (
ISO_8601,
),
'DATE_FORMAT': None,
'DATETIME_INPUT_FORMATS': (
ISO_8601,
),
'DATETIME_FORMAT': None,
'TIME_INPUT_FORMATS': (
ISO_8601,
),
'TIME_FORMAT': None,
# Pending deprecation
'FILTER_BACKEND': None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'DEFAULT_RENDERER_CLASSES',
'DEFAULT_PARSER_CLASSES',
'DEFAULT_AUTHENTICATION_CLASSES',
'DEFAULT_PERMISSION_CLASSES',
'DEFAULT_THROTTLE_CLASSES',
'DEFAULT_CONTENT_NEGOTIATION_CLASS',
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
'UNAUTHENTICATED_TOKEN',
'VIEW_NAME_FUNCTION',
'VIEW_DESCRIPTION_FUNCTION'
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from rest_framework.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
|
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
| if attr == 'FILTER_BACKEND' and val is not None:
# Make sure we can initialize the class
val() | identifier_body |
settings.py | """
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.YAMLRenderer',
)
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.YAMLParser',
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import importlib
from rest_framework import ISO_8601
from rest_framework.compat import six
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK', None)
DEFAULTS = {
# Base API policies
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_THROTTLE_CLASSES': (
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS':
'rest_framework.negotiation.DefaultContentNegotiation',
# Genric view behavior
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework.pagination.PaginationSerializer',
'DEFAULT_FILTER_BACKENDS': (),
# Throttling
'DEFAULT_THROTTLE_RATES': {
'user': None,
'anon': None,
},
# Pagination
'PAGINATE_BY': None,
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
# View configuration
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
# Exception handling
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'multipart',
# Browser enhancements
'FORM_METHOD_OVERRIDE': '_method',
'FORM_CONTENT_OVERRIDE': '_content',
'FORM_CONTENTTYPE_OVERRIDE': '_content_type',
'URL_ACCEPT_OVERRIDE': 'accept',
'URL_FORMAT_OVERRIDE': 'format',
'FORMAT_SUFFIX_KWARG': 'format',
# Input and output formats
'DATE_INPUT_FORMATS': (
ISO_8601,
),
'DATE_FORMAT': None,
'DATETIME_INPUT_FORMATS': (
ISO_8601,
),
'DATETIME_FORMAT': None,
'TIME_INPUT_FORMATS': (
ISO_8601,
),
'TIME_FORMAT': None,
# Pending deprecation
'FILTER_BACKEND': None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'DEFAULT_RENDERER_CLASSES',
'DEFAULT_PARSER_CLASSES',
'DEFAULT_AUTHENTICATION_CLASSES',
'DEFAULT_PERMISSION_CLASSES',
'DEFAULT_THROTTLE_CLASSES',
'DEFAULT_CONTENT_NEGOTIATION_CLASS',
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
'UNAUTHENTICATED_TOKEN',
'VIEW_NAME_FUNCTION',
'VIEW_DESCRIPTION_FUNCTION'
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def | (val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from rest_framework.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if attr == 'FILTER_BACKEND' and val is not None:
# Make sure we can initialize the class
val()
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
| import_from_string | identifier_name |
type_name.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::type_name;
// pub fn type_name<T>() -> usize;
macro_rules! type_name_test {
($T:ty, $message:expr) => ({
let message: &'static str = unsafe { type_name::<$T>() };
assert_eq!(message, $message);
})
}
#[test]
fn | () {
type_name_test!( u8, "u8" );
type_name_test!( u16, "u16" );
type_name_test!( u32, "u32" );
type_name_test!( u64, "u64" );
type_name_test!( i8, "i8" );
type_name_test!( i16, "i16" );
type_name_test!( i32, "i32" );
type_name_test!( i64, "i64" );
type_name_test!( f32, "f32" );
type_name_test!( f64, "f64" );
type_name_test!( [u8; 0], "[u8; 0]" );
type_name_test!( [u8; 68], "[u8; 68]" );
type_name_test!( [u32; 0], "[u32; 0]" );
type_name_test!( [u32; 68], "[u32; 68]" );
type_name_test!( (u8,), "(u8,)" );
type_name_test!( (u8, u16), "(u8, u16)" );
type_name_test!( (u8, u16, u32), "(u8, u16, u32)" );
type_name_test!( (u8, u16, u32, u64), "(u8, u16, u32, u64)" );
}
}
| type_name_test1 | identifier_name |
type_name.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::type_name;
// pub fn type_name<T>() -> usize;
macro_rules! type_name_test {
($T:ty, $message:expr) => ({
let message: &'static str = unsafe { type_name::<$T>() };
assert_eq!(message, $message);
})
}
#[test]
fn type_name_test1() | type_name_test!( (u8, u16), "(u8, u16)" );
type_name_test!( (u8, u16, u32), "(u8, u16, u32)" );
type_name_test!( (u8, u16, u32, u64), "(u8, u16, u32, u64)" );
}
}
| {
type_name_test!( u8, "u8" );
type_name_test!( u16, "u16" );
type_name_test!( u32, "u32" );
type_name_test!( u64, "u64" );
type_name_test!( i8, "i8" );
type_name_test!( i16, "i16" );
type_name_test!( i32, "i32" );
type_name_test!( i64, "i64" );
type_name_test!( f32, "f32" );
type_name_test!( f64, "f64" );
type_name_test!( [u8; 0], "[u8; 0]" );
type_name_test!( [u8; 68], "[u8; 68]" );
type_name_test!( [u32; 0], "[u32; 0]" );
type_name_test!( [u32; 68], "[u32; 68]" );
type_name_test!( (u8,), "(u8,)" ); | identifier_body |
type_name.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::type_name;
// pub fn type_name<T>() -> usize;
macro_rules! type_name_test {
($T:ty, $message:expr) => ({
let message: &'static str = unsafe { type_name::<$T>() };
assert_eq!(message, $message);
})
}
#[test]
fn type_name_test1() { | type_name_test!( i8, "i8" );
type_name_test!( i16, "i16" );
type_name_test!( i32, "i32" );
type_name_test!( i64, "i64" );
type_name_test!( f32, "f32" );
type_name_test!( f64, "f64" );
type_name_test!( [u8; 0], "[u8; 0]" );
type_name_test!( [u8; 68], "[u8; 68]" );
type_name_test!( [u32; 0], "[u32; 0]" );
type_name_test!( [u32; 68], "[u32; 68]" );
type_name_test!( (u8,), "(u8,)" );
type_name_test!( (u8, u16), "(u8, u16)" );
type_name_test!( (u8, u16, u32), "(u8, u16, u32)" );
type_name_test!( (u8, u16, u32, u64), "(u8, u16, u32, u64)" );
}
} | type_name_test!( u8, "u8" );
type_name_test!( u16, "u16" );
type_name_test!( u32, "u32" );
type_name_test!( u64, "u64" ); | random_line_split |
pike.py | ###############################################################################
# Name: pike.py #
# Purpose: Define highlighting/syntax for Pike programming language #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: pike.py
@summary: Defines syntax and highlighting settings for the Pike programming
language. Pike is very similar in form to C/CPP so the Cpp lexer is
used to provide the highlighting settings.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: pike.py 55174 2008-08-22 15:12:27Z CJP $"
__revision__ = "$Revision: 55174 $"
#-----------------------------------------------------------------------------#
# Local Imports
import synglob
import cpp
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
PIKE_KW = (0, "goto break return continue case default if else switch while "
"foreach do gauge destruct lambda inherit import typeof catch "
"for inline nomask")
PIKE_TYPE = (1, "private protected public static "
"int string void float mapping array multiset mixed program "
"object function")
PIKE_DOC = tuple(cpp.DOC_KEYWORDS)
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = list(cpp.SYNTAX_ITEMS)
#---- Extra Properties ----#
# Fetched from cpp module on request
#-----------------------------------------------------------------------------#
#---- Required Module Functions ----# | @keyword lang_id: used to select specific subset of keywords
"""
if lang_id == synglob.ID_LANG_PIKE:
return [PIKE_KW, PIKE_TYPE, PIKE_DOC]
else:
return list()
def SyntaxSpec(lang_id=0):
"""Syntax Specifications
@keyword lang_id: used for selecting a specific subset of syntax specs
"""
if lang_id == synglob.ID_LANG_PIKE:
return SYNTAX_ITEMS
else:
return list()
def Properties(lang_id=0):
"""Returns a list of Extra Properties to set
@keyword lang_id: used to select a specific set of properties
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.Properties(synglob.ID_LANG_CPP)
else:
return list()
def CommentPattern(lang_id=0):
"""Returns a list of characters used to comment a block of code
@keyword lang_id: used to select a specific subset of comment pattern(s)
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.CommentPattern(synglob.ID_LANG_CPP)
else:
return list()
#---- End Required Module Functions ----#
AutoIndenter = cpp.AutoIndenter
#---- Syntax Modules Internal Functions ----#
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return None
#---- End Syntax Modules Internal Functions ----# | def Keywords(lang_id=0):
"""Returns Specified Keywords List | random_line_split |
pike.py | ###############################################################################
# Name: pike.py #
# Purpose: Define highlighting/syntax for Pike programming language #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: pike.py
@summary: Defines syntax and highlighting settings for the Pike programming
language. Pike is very similar in form to C/CPP so the Cpp lexer is
used to provide the highlighting settings.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: pike.py 55174 2008-08-22 15:12:27Z CJP $"
__revision__ = "$Revision: 55174 $"
#-----------------------------------------------------------------------------#
# Local Imports
import synglob
import cpp
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
PIKE_KW = (0, "goto break return continue case default if else switch while "
"foreach do gauge destruct lambda inherit import typeof catch "
"for inline nomask")
PIKE_TYPE = (1, "private protected public static "
"int string void float mapping array multiset mixed program "
"object function")
PIKE_DOC = tuple(cpp.DOC_KEYWORDS)
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = list(cpp.SYNTAX_ITEMS)
#---- Extra Properties ----#
# Fetched from cpp module on request
#-----------------------------------------------------------------------------#
#---- Required Module Functions ----#
def Keywords(lang_id=0):
"""Returns Specified Keywords List
@keyword lang_id: used to select specific subset of keywords
"""
if lang_id == synglob.ID_LANG_PIKE:
return [PIKE_KW, PIKE_TYPE, PIKE_DOC]
else:
return list()
def SyntaxSpec(lang_id=0):
"""Syntax Specifications
@keyword lang_id: used for selecting a specific subset of syntax specs
"""
if lang_id == synglob.ID_LANG_PIKE:
return SYNTAX_ITEMS
else:
return list()
def Properties(lang_id=0):
"""Returns a list of Extra Properties to set
@keyword lang_id: used to select a specific set of properties
"""
if lang_id == synglob.ID_LANG_PIKE:
|
else:
return list()
def CommentPattern(lang_id=0):
"""Returns a list of characters used to comment a block of code
@keyword lang_id: used to select a specific subset of comment pattern(s)
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.CommentPattern(synglob.ID_LANG_CPP)
else:
return list()
#---- End Required Module Functions ----#
AutoIndenter = cpp.AutoIndenter
#---- Syntax Modules Internal Functions ----#
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return None
#---- End Syntax Modules Internal Functions ----#
| return cpp.Properties(synglob.ID_LANG_CPP) | conditional_block |
pike.py | ###############################################################################
# Name: pike.py #
# Purpose: Define highlighting/syntax for Pike programming language #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: pike.py
@summary: Defines syntax and highlighting settings for the Pike programming
language. Pike is very similar in form to C/CPP so the Cpp lexer is
used to provide the highlighting settings.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: pike.py 55174 2008-08-22 15:12:27Z CJP $"
__revision__ = "$Revision: 55174 $"
#-----------------------------------------------------------------------------#
# Local Imports
import synglob
import cpp
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
PIKE_KW = (0, "goto break return continue case default if else switch while "
"foreach do gauge destruct lambda inherit import typeof catch "
"for inline nomask")
PIKE_TYPE = (1, "private protected public static "
"int string void float mapping array multiset mixed program "
"object function")
PIKE_DOC = tuple(cpp.DOC_KEYWORDS)
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = list(cpp.SYNTAX_ITEMS)
#---- Extra Properties ----#
# Fetched from cpp module on request
#-----------------------------------------------------------------------------#
#---- Required Module Functions ----#
def Keywords(lang_id=0):
"""Returns Specified Keywords List
@keyword lang_id: used to select specific subset of keywords
"""
if lang_id == synglob.ID_LANG_PIKE:
return [PIKE_KW, PIKE_TYPE, PIKE_DOC]
else:
return list()
def SyntaxSpec(lang_id=0):
|
def Properties(lang_id=0):
"""Returns a list of Extra Properties to set
@keyword lang_id: used to select a specific set of properties
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.Properties(synglob.ID_LANG_CPP)
else:
return list()
def CommentPattern(lang_id=0):
"""Returns a list of characters used to comment a block of code
@keyword lang_id: used to select a specific subset of comment pattern(s)
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.CommentPattern(synglob.ID_LANG_CPP)
else:
return list()
#---- End Required Module Functions ----#
AutoIndenter = cpp.AutoIndenter
#---- Syntax Modules Internal Functions ----#
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return None
#---- End Syntax Modules Internal Functions ----#
| """Syntax Specifications
@keyword lang_id: used for selecting a specific subset of syntax specs
"""
if lang_id == synglob.ID_LANG_PIKE:
return SYNTAX_ITEMS
else:
return list() | identifier_body |
pike.py | ###############################################################################
# Name: pike.py #
# Purpose: Define highlighting/syntax for Pike programming language #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: pike.py
@summary: Defines syntax and highlighting settings for the Pike programming
language. Pike is very similar in form to C/CPP so the Cpp lexer is
used to provide the highlighting settings.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: pike.py 55174 2008-08-22 15:12:27Z CJP $"
__revision__ = "$Revision: 55174 $"
#-----------------------------------------------------------------------------#
# Local Imports
import synglob
import cpp
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
PIKE_KW = (0, "goto break return continue case default if else switch while "
"foreach do gauge destruct lambda inherit import typeof catch "
"for inline nomask")
PIKE_TYPE = (1, "private protected public static "
"int string void float mapping array multiset mixed program "
"object function")
PIKE_DOC = tuple(cpp.DOC_KEYWORDS)
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = list(cpp.SYNTAX_ITEMS)
#---- Extra Properties ----#
# Fetched from cpp module on request
#-----------------------------------------------------------------------------#
#---- Required Module Functions ----#
def | (lang_id=0):
"""Returns Specified Keywords List
@keyword lang_id: used to select specific subset of keywords
"""
if lang_id == synglob.ID_LANG_PIKE:
return [PIKE_KW, PIKE_TYPE, PIKE_DOC]
else:
return list()
def SyntaxSpec(lang_id=0):
"""Syntax Specifications
@keyword lang_id: used for selecting a specific subset of syntax specs
"""
if lang_id == synglob.ID_LANG_PIKE:
return SYNTAX_ITEMS
else:
return list()
def Properties(lang_id=0):
"""Returns a list of Extra Properties to set
@keyword lang_id: used to select a specific set of properties
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.Properties(synglob.ID_LANG_CPP)
else:
return list()
def CommentPattern(lang_id=0):
"""Returns a list of characters used to comment a block of code
@keyword lang_id: used to select a specific subset of comment pattern(s)
"""
if lang_id == synglob.ID_LANG_PIKE:
return cpp.CommentPattern(synglob.ID_LANG_CPP)
else:
return list()
#---- End Required Module Functions ----#
AutoIndenter = cpp.AutoIndenter
#---- Syntax Modules Internal Functions ----#
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return None
#---- End Syntax Modules Internal Functions ----#
| Keywords | identifier_name |
image_interpolation_params.py | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from lucid.optvis.param import lowres_tensor
def | (n_objectives=6, n_interp_steps=5, width=128,
channels=3):
"""A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i
"""
N, M, W, Ch = n_objectives, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col)
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term
| multi_interpolation_basis | identifier_name |
image_interpolation_params.py | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from lucid.optvis.param import lowres_tensor
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128,
channels=3):
"""A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i
"""
N, M, W, Ch = n_objectives, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
|
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term
| col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col) | conditional_block |
image_interpolation_params.py | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from lucid.optvis.param import lowres_tensor
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128,
channels=3):
"""A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i
"""
N, M, W, Ch = n_objectives, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col)
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term | # http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
image_interpolation_params.py | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from lucid.optvis.param import lowres_tensor
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128,
channels=3):
|
"""
N, M, W, Ch = n_objectives, n_interp_steps, width, channels
const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [
sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch])
for k in [1, 2, 4, 8]])
for _ in range(N)]
example_basis = []
for n in range(N):
col = []
for m in range(N):
interp = example_interps[n] + example_interps[m][::-1]
col.append(interp)
example_basis.append(col)
interp_basis = []
for n in range(N):
col = [interp_basis[m][N-n][::-1] for m in range(n)]
col.append(tf.zeros([M, W, W, 3]))
for m in range(n+1, N):
interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch])
for k in [1, 2]])
col.append(interp)
interp_basis.append(col)
basis = []
for n in range(N):
col_ex = tf.stack(example_basis[n])
col_in = tf.stack(interp_basis[n])
basis.append(col_ex + col_in)
basis = tf.stack(basis)
return basis + const_term
| """A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps
width: width of intepolated images
channel
Returns:
A [n_objectives, n_objectives, n_interp_steps, width, width, channel]
shaped tensor, t, where the final [width, width, channel] should be
seen as images, such that the following properties hold:
t[a, b] = t[b, a, ::-1]
t[a, i, 0] = t[a, j, 0] for all i, j
t[a, a, i] = t[a, a, j] for all i, j
t[a, b, i] = t[b, a, -i] for all i | identifier_body |
main.rs | // Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern mod glfw;
use std::libc;
use std::unstable::finally::Finally;
#[start]
fn start(argc: int, argv: **u8, crate_map: *u8) -> int {
// GLFW must run on the main platform thread
std::rt::start_on_main_thread(argc, argv, crate_map, main)
}
fn main() {
glfw::set_error_callback(error_callback);
if glfw::init().is_err() {
fail!(~"Failed to initialize GLFW");
} else {
(||{
let window = glfw::Window::create(300, 300, "Hello this is window", glfw::Windowed).unwrap();
window.set_key_callback(key_callback);
window.make_context_current();
while !window.should_close() {
window.poll_events();
glfw::poll_events();
}
// Use `finally` to ensure that `glfw::terminate` is called even if a failure occurs
}).finally(glfw::terminate);
}
}
fn key_callback(window: &glfw::Window, key: libc::c_int, _: libc::c_int, action: libc::c_int, _: glfw::KeyMods) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
window.set_should_close(true);
}
}
fn | (_: libc::c_int, description: ~str) {
println(fmt!("GLFW Error: %s", description));
}
| error_callback | identifier_name |
main.rs | // Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern mod glfw;
use std::libc;
use std::unstable::finally::Finally;
#[start]
fn start(argc: int, argv: **u8, crate_map: *u8) -> int {
// GLFW must run on the main platform thread
std::rt::start_on_main_thread(argc, argv, crate_map, main)
}
fn main() |
fn key_callback(window: &glfw::Window, key: libc::c_int, _: libc::c_int, action: libc::c_int, _: glfw::KeyMods) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
window.set_should_close(true);
}
}
fn error_callback(_: libc::c_int, description: ~str) {
println(fmt!("GLFW Error: %s", description));
}
| {
glfw::set_error_callback(error_callback);
if glfw::init().is_err() {
fail!(~"Failed to initialize GLFW");
} else {
(||{
let window = glfw::Window::create(300, 300, "Hello this is window", glfw::Windowed).unwrap();
window.set_key_callback(key_callback);
window.make_context_current();
while !window.should_close() {
window.poll_events();
glfw::poll_events();
}
// Use `finally` to ensure that `glfw::terminate` is called even if a failure occurs
}).finally(glfw::terminate);
}
} | identifier_body |
main.rs | // Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern mod glfw;
use std::libc;
use std::unstable::finally::Finally;
#[start]
fn start(argc: int, argv: **u8, crate_map: *u8) -> int {
// GLFW must run on the main platform thread
std::rt::start_on_main_thread(argc, argv, crate_map, main)
}
fn main() {
glfw::set_error_callback(error_callback);
if glfw::init().is_err() {
fail!(~"Failed to initialize GLFW"); | } else {
(||{
let window = glfw::Window::create(300, 300, "Hello this is window", glfw::Windowed).unwrap();
window.set_key_callback(key_callback);
window.make_context_current();
while !window.should_close() {
window.poll_events();
glfw::poll_events();
}
// Use `finally` to ensure that `glfw::terminate` is called even if a failure occurs
}).finally(glfw::terminate);
}
}
fn key_callback(window: &glfw::Window, key: libc::c_int, _: libc::c_int, action: libc::c_int, _: glfw::KeyMods) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
window.set_should_close(true);
}
}
fn error_callback(_: libc::c_int, description: ~str) {
println(fmt!("GLFW Error: %s", description));
} | random_line_split |
|
main.rs | // Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern mod glfw;
use std::libc;
use std::unstable::finally::Finally;
#[start]
fn start(argc: int, argv: **u8, crate_map: *u8) -> int {
// GLFW must run on the main platform thread
std::rt::start_on_main_thread(argc, argv, crate_map, main)
}
fn main() {
glfw::set_error_callback(error_callback);
if glfw::init().is_err() | else {
(||{
let window = glfw::Window::create(300, 300, "Hello this is window", glfw::Windowed).unwrap();
window.set_key_callback(key_callback);
window.make_context_current();
while !window.should_close() {
window.poll_events();
glfw::poll_events();
}
// Use `finally` to ensure that `glfw::terminate` is called even if a failure occurs
}).finally(glfw::terminate);
}
}
fn key_callback(window: &glfw::Window, key: libc::c_int, _: libc::c_int, action: libc::c_int, _: glfw::KeyMods) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
window.set_should_close(true);
}
}
fn error_callback(_: libc::c_int, description: ~str) {
println(fmt!("GLFW Error: %s", description));
}
| {
fail!(~"Failed to initialize GLFW");
} | conditional_block |
annulus_distribution.rs | //! Implementation of a uniform distribuition of points on a two-dimensional
//! annulus.
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::consts::PI;
pub use Point;
/// The uniform distribution of 2D points on an annulus `{x: r_1 <= |x| <= r_2}`.
pub struct AnnulusDist {
r1_sq: f64,
r2_sq: f64,
}
impl AnnulusDist {
/// Construct a new `AnnulusDist` with the given inner and outer radius
/// `r1`, `r2`. Panics if not `0 < r1 < r2`.
pub fn new(r1: f64, r2: f64) -> AnnulusDist {
assert!(0. < r1, "AnnulusDist::new called with `r1 <= 0`");
assert!(r1 < r2, "AnnulusDist::new called with `r2 <= r1`");
AnnulusDist { | }
impl Distribution<Point> for AnnulusDist {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Point {
// For points to be uniformly distributed in the annulus, the area of the disk with radius
// equal to the distance of the point from the origin is distributed uniformly between r₁²
// and r₂².
let r = (self.r1_sq + rng.gen::<f64>() * (self.r2_sq - self.r1_sq)).sqrt();
// The angle is uniform between 0 and 2π.
let (y, x) = (2. * PI * rng.gen::<f64>()).sin_cos();
Point(r * x, r * y)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn all_in_annulus() {
let r1 = 12.;
let r2 = 58.;
assert!(
AnnulusDist::new(r1, r2)
.sample_iter(&mut ::rand::thread_rng())
.take(1000)
.all(|p| {
let d = p.dist(Point(0., 0.));
r1 <= d && d <= r2
})
);
}
} | r1_sq: r1 * r1,
r2_sq: r2 * r2,
}
} | random_line_split |
annulus_distribution.rs | //! Implementation of a uniform distribuition of points on a two-dimensional
//! annulus.
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::consts::PI;
pub use Point;
/// The uniform distribution of 2D points on an annulus `{x: r_1 <= |x| <= r_2}`.
pub struct | {
r1_sq: f64,
r2_sq: f64,
}
impl AnnulusDist {
/// Construct a new `AnnulusDist` with the given inner and outer radius
/// `r1`, `r2`. Panics if not `0 < r1 < r2`.
pub fn new(r1: f64, r2: f64) -> AnnulusDist {
assert!(0. < r1, "AnnulusDist::new called with `r1 <= 0`");
assert!(r1 < r2, "AnnulusDist::new called with `r2 <= r1`");
AnnulusDist {
r1_sq: r1 * r1,
r2_sq: r2 * r2,
}
}
}
impl Distribution<Point> for AnnulusDist {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Point {
// For points to be uniformly distributed in the annulus, the area of the disk with radius
// equal to the distance of the point from the origin is distributed uniformly between r₁²
// and r₂².
let r = (self.r1_sq + rng.gen::<f64>() * (self.r2_sq - self.r1_sq)).sqrt();
// The angle is uniform between 0 and 2π.
let (y, x) = (2. * PI * rng.gen::<f64>()).sin_cos();
Point(r * x, r * y)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn all_in_annulus() {
let r1 = 12.;
let r2 = 58.;
assert!(
AnnulusDist::new(r1, r2)
.sample_iter(&mut ::rand::thread_rng())
.take(1000)
.all(|p| {
let d = p.dist(Point(0., 0.));
r1 <= d && d <= r2
})
);
}
}
| AnnulusDist | identifier_name |
annulus_distribution.rs | //! Implementation of a uniform distribuition of points on a two-dimensional
//! annulus.
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::consts::PI;
pub use Point;
/// The uniform distribution of 2D points on an annulus `{x: r_1 <= |x| <= r_2}`.
pub struct AnnulusDist {
r1_sq: f64,
r2_sq: f64,
}
impl AnnulusDist {
/// Construct a new `AnnulusDist` with the given inner and outer radius
/// `r1`, `r2`. Panics if not `0 < r1 < r2`.
pub fn new(r1: f64, r2: f64) -> AnnulusDist |
}
impl Distribution<Point> for AnnulusDist {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Point {
// For points to be uniformly distributed in the annulus, the area of the disk with radius
// equal to the distance of the point from the origin is distributed uniformly between r₁²
// and r₂².
let r = (self.r1_sq + rng.gen::<f64>() * (self.r2_sq - self.r1_sq)).sqrt();
// The angle is uniform between 0 and 2π.
let (y, x) = (2. * PI * rng.gen::<f64>()).sin_cos();
Point(r * x, r * y)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn all_in_annulus() {
let r1 = 12.;
let r2 = 58.;
assert!(
AnnulusDist::new(r1, r2)
.sample_iter(&mut ::rand::thread_rng())
.take(1000)
.all(|p| {
let d = p.dist(Point(0., 0.));
r1 <= d && d <= r2
})
);
}
}
| {
assert!(0. < r1, "AnnulusDist::new called with `r1 <= 0`");
assert!(r1 < r2, "AnnulusDist::new called with `r2 <= r1`");
AnnulusDist {
r1_sq: r1 * r1,
r2_sq: r2 * r2,
}
} | identifier_body |
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn main() {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a != b);
assert!(a != c);
assert!(b != c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f); | } |
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b)); | random_line_split |
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn | () {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a != b);
assert!(a != c);
assert!(b != c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f);
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b));
}
| main | identifier_name |
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn main() | assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a != b);
assert!(a != c);
assert!(b != c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f);
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b));
}
| {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
| identifier_body |
post_render.js | var util = require('hexo-util');
var code = [
'if tired && night:',
' sleep()'
].join('\n');
var content = [
'# Title',
'``` python',
code,
'```',
'some content',
'',
'## Another title',
'{% blockquote %}',
'quote content',
'{% endblockquote %}',
'',
'{% quote Hello World %}',
'quote content',
'{% endquote %}'
].join('\n');
exports.content = content;
exports.expected = [
'<h1 id="Title"><a href="#Title" class="headerlink" title="Title"></a>Title</h1>',
util.highlight(code, {lang: 'python'}),
'\n<p>some content</p>\n',
'<h2 id="Another-title"><a href="#Another-title" class="headerlink" title="Another title"></a>Another title</h2>',
'<blockquote>',
'<p>quote content</p>\n',
'</blockquote>\n', | ].join(''); | '<blockquote><p>quote content</p>\n',
'<footer><strong>Hello World</strong></footer></blockquote>' | random_line_split |
officemru.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides ([email protected])'
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message) | if __name__ == '__main__':
unittest.main() | random_line_split |
|
officemru.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides ([email protected])'
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
| unittest.main() | conditional_block |
|
officemru.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides ([email protected])'
class | (test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| OfficeMRUPluginTest | identifier_name |
officemru.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides ([email protected])'
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
| # and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| """Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly, | identifier_body |
angular-locale_fr-gp.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"dimanche",
"lundi",
"mardi",
"mercredi",
"jeudi",
"vendredi",
"samedi"
],
"ERANAMES": [
"avant J\u00e9sus-Christ",
"apr\u00e8s J\u00e9sus-Christ"
],
"ERAS": [
"av. J.-C.",
"ap. J.-C."
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"janvier",
"f\u00e9vrier",
"mars",
"avril",
"mai",
"juin",
"juillet",
"ao\u00fbt",
"septembre",
"octobre",
"novembre",
"d\u00e9cembre"
],
"SHORTDAY": [
"dim.",
"lun.",
"mar.",
"mer.",
"jeu.",
"ven.",
"sam."
],
"SHORTMONTH": [
"janv.",
"f\u00e9vr.",
"mars",
"avr.",
"mai",
"juin",
"juil.",
"ao\u00fbt",
"sept.",
"oct.",
"nov.",
"d\u00e9c."
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/y HH:mm",
"shortDate": "dd/MM/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "fr-gp",
"pluralCat": function(n, opt_precision) { var i = n | 0; if (i == 0 || i == 1) | return PLURAL_CATEGORY.OTHER;}
});
}]);
| { return PLURAL_CATEGORY.ONE; } | conditional_block |
angular-locale_fr-gp.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"dimanche",
"lundi",
"mardi",
"mercredi",
"jeudi",
"vendredi",
"samedi"
],
"ERANAMES": [
"avant J\u00e9sus-Christ",
"apr\u00e8s J\u00e9sus-Christ"
],
"ERAS": [
"av. J.-C.",
"ap. J.-C."
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"janvier",
"f\u00e9vrier",
"mars",
"avril",
"mai",
"juin",
"juillet",
"ao\u00fbt",
"septembre",
"octobre",
"novembre",
"d\u00e9cembre"
],
"SHORTDAY": [
"dim.",
"lun.",
"mar.",
"mer.",
"jeu.",
"ven.",
"sam."
],
"SHORTMONTH": [
"janv.",
"f\u00e9vr.",
"mars",
"avr.",
"mai",
"juin",
"juil.",
"ao\u00fbt",
"sept.",
"oct.",
"nov.",
"d\u00e9c."
],
"WEEKENDRANGE": [
5,
6 | "mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/y HH:mm",
"shortDate": "dd/MM/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "fr-gp",
"pluralCat": function(n, opt_precision) { var i = n | 0; if (i == 0 || i == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | ],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss", | random_line_split |
cpp.py | import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
def compatibilize(name):
result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def write_source(self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance)
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
| scope = scope.parent
# We should always find at least one valid implementation, unless
# something went wrong with the type solver.
assert False, 'could not find the implementation of {}'.format(node.callee)
| for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
# When the object denoted by the identifier is a type, it means
# it's been declared in another module. Hence, we should refer to
# the symbol of this other module.
else:
assert False, 'TODO: {} is declared in another module'.format(node.callee)
# Move to the enclosing scope if we couldn't find any match. | conditional_block |
cpp.py | import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
| result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def write_source(self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance)
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
# When the object denoted by the identifier is a type, it means
# it's been declared in another module. Hence, we should refer to
# the symbol of this other module.
else:
assert False, 'TODO: {} is declared in another module'.format(node.callee)
# Move to the enclosing scope if we couldn't find any match.
scope = scope.parent
# We should always find at least one valid implementation, unless
# something went wrong with the type solver.
assert False, 'could not find the implementation of {}'.format(node.callee) | def compatibilize(name): | random_line_split |
cpp.py | import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
def compatibilize(name):
result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def | (self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance)
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
# When the object denoted by the identifier is a type, it means
# it's been declared in another module. Hence, we should refer to
# the symbol of this other module.
else:
assert False, 'TODO: {} is declared in another module'.format(node.callee)
# Move to the enclosing scope if we couldn't find any match.
scope = scope.parent
# We should always find at least one valid implementation, unless
# something went wrong with the type solver.
assert False, 'could not find the implementation of {}'.format(node.callee)
| write_source | identifier_name |
cpp.py | import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
def compatibilize(name):
result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def write_source(self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
|
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
# When the object denoted by the identifier is a type, it means
# it's been declared in another module. Hence, we should refer to
# the symbol of this other module.
else:
assert False, 'TODO: {} is declared in another module'.format(node.callee)
# Move to the enclosing scope if we couldn't find any match.
scope = scope.parent
# We should always find at least one valid implementation, unless
# something went wrong with the type solver.
assert False, 'could not find the implementation of {}'.format(node.callee)
| if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance) | identifier_body |
mspe-eligibility.e2e-spec.ts | import { browser, element, by } from 'protractor';
import { EligibilityPage, BaseMSPEnrolmentTestPage } from './mspe-enrolment.po';
import { FakeDataEnrolment } from './mspe-enrolment.data';
describe('MSP Enrolment - Check Eligibility', () => { | const PERSONAL_PAGE_URL = `msp/enrolment/personal-info`;
beforeAll(() => {
console.log('START OF E2E ENROLMENT' + '\nThis test uses Seed #: ' + data.getSeed());
});
beforeEach(() => {
page = new EligibilityPage();
basePage = new BaseMSPEnrolmentTestPage();
data.setSeed();
});
it('01. should load the page without issue', () => {
page.navigateTo()
expect(browser.getCurrentUrl()).toContain(ELIGIBILITY_PAGE_URL);
});
it('02. should NOT let the user to proceed if the user has not agreeed to the info collection notice', () => {
page.navigateTo();
page.clickModalContinue();
page.checkModal().then(val => {
expect(val).toBe(true);
});
expect(browser.getCurrentUrl()).toContain(ELIGIBILITY_PAGE_URL, 'should stay on page');
});
it('03. should let the user to proceed if the user has agreeed to the info collection notice', () => {
page.navigateTo();
page.clickCheckBox();
page.clickModalContinue();
page.checkModal().then(val => {
expect(val).toBe(false);
});
expect(browser.getCurrentUrl()).toContain(ELIGIBILITY_PAGE_URL, 'should stay on page');
});
it('04. should NOT let the user continue if not all questions have been answered', () => {
basePage.navigateTo();
basePage.selectMSPEnrolment();
page.clickCheckBox();
page.clickModalContinue();
page.clickContinue();
// TODO: insert expect that checks if there are errors in the page
expect(browser.getCurrentUrl()).toContain(ELIGIBILITY_PAGE_URL, 'should stay on page since there are errors');
});
it('05. should be able to continue after answering all the questions', () => {
// Need to go to base page when modal is clicked previously
basePage.navigateTo();
basePage.selectMSPEnrolment();
page.clickCheckBox();
page.clickModalContinue();
page.clickRadioButton('Do you currently live in Briti', 'true');
page.clickRadioButton('Will you or anyone in your imm', 'false');
page.clickRadioButton('Is anyone you\'re applying for', 'false');
page.clickContinue();
expect(browser.getCurrentUrl()).toContain(PERSONAL_PAGE_URL, 'should navigate to the next page');
});
}); | let page: EligibilityPage;
let basePage: BaseMSPEnrolmentTestPage;
const data = new FakeDataEnrolment();
let eliData;
const ELIGIBILITY_PAGE_URL = `msp/enrolment/prepare` | random_line_split |
category-bucket-comparison.summarization.service.ts | import * as math from 'mathjs';
import { Injectable } from '@angular/core';
import { map } from 'rxjs/operators';
import { Observable, of } from 'rxjs';
import { SummarizationDataSourceService } from './summarization-data-source.service';
import { SummarizationService, BaseConfig } from './summarization.service';
import { SummaryGroup, Summary } from './types';
import { CategoricalPoint } from '../../datasets/metas/types';
import { formatY } from '../../utils/formatters';
export interface CategoryBucketComparisonConfig extends BaseConfig {
metric: string;
bucketPercentageTolerance: number;
}
export type CategoryBucketComparisonProperties = {
};
const defaultConfig: Partial<CategoryBucketComparisonConfig> = {
metric: 'sessions',
bucketPercentageTolerance: 5,
};
@Injectable({
providedIn: 'any',
})
export class CategoryBucketComparisonSummarizationService extends
SummarizationService<CategoricalPoint, CategoryBucketComparisonProperties, CategoryBucketComparisonConfig> {
constructor(
protected summarizationDataSourceService: SummarizationDataSourceService,
) {
super();
}
prepareConfig(config: BaseConfig & Partial<CategoryBucketComparisonConfig>): CategoryBucketComparisonConfig {
return { ...defaultConfig, ...config } as CategoryBucketComparisonConfig;
}
createDataProperties$(config: CategoryBucketComparisonConfig): Observable<CategoryBucketComparisonProperties> {
return of({});
}
/**
* Create summaries that describe the difference of y-values average between buckets.
* The bucket is a subset of data with similar y-values.
*
* Sample Summaries:
* - Desktop has 56.4 (129.4%) more sessions than Mobile and Tablet.
*/
createSummaries$(config: CategoryBucketComparisonConfig): Observable<SummaryGroup[]> {
// The length of datumLabels should be 1 for this summarization
const { datumLabels, metric, bucketPercentageTolerance } = config;
return this.summarizationDataSourceService.pointsByLabels$(datumLabels)
.pipe(map(pointsArray => {
// datum label should be unique in data, so length of pointsArray is either 0 or 1
const points = (pointsArray.length === 0 ? [] : pointsArray[0]) as CategoricalPoint[];
const maxYValue = Math.max(...points.map(({ y }) => y));
const sortedPoints = [...points].sort(({ y: y1 }, { y: y2 }) => y2 - y1);
const sortedRelativePercentagePoints = sortedPoints.map(({ x, y }) => ({ x, y: y / maxYValue * 100 }));
const buckets = this.bucketizePoints(sortedRelativePercentagePoints, bucketPercentageTolerance);
const summaries: Summary[] = [];
for (let i = 1; i < buckets.length; i++) {
const bucketGreaterYAverage = math.mean(buckets[i - 1].map(({ y }) => y));
const bucketSmallerYAverage = math.mean(buckets[i].map(({ y }) => y));
const bucketGreaterXValuesText = buckets[i - 1].map(({ x }) => x).join(', ');
const bucketSmallerXValuesText = buckets[i].map(({ x }) => x).join(', ');
const haveText = buckets[i - 1].length === 1 ? 'has' : 'have';
const yAverageDiff = bucketGreaterYAverage - bucketSmallerYAverage;
const yAverageDiffPercentage = yAverageDiff / bucketSmallerYAverage * 100;
const yAverageDiffText = formatY(yAverageDiff);
const yAverageDiffPercentageText = formatY(yAverageDiffPercentage);
const text = `<b>${bucketGreaterXValuesText}</b> ${haveText} <b>${yAverageDiffText} (${yAverageDiffPercentageText}%)</b> more ${metric} than <b>${bucketSmallerXValuesText}</b>.`;
summaries.push({
text,
validity: 1.0,
});
}
return [{
title: `Category Bucket Comparison - ${bucketPercentageTolerance}% Bucketization Tolerance`,
summaries,
}];
}));
}
private bucketizePoints(points: CategoricalPoint[], bucketPercentageTolerance: number) {
const buckets: CategoricalPoint[][] = [];
let currentBucket: CategoricalPoint[] = [];
for (const { x, y } of points) {
// Assumes that points are sorted by greatest y -> least y
const currentBucketYMax = currentBucket[0]?.y ?? null;
if (currentBucketYMax === null || currentBucketYMax - y < bucketPercentageTolerance) {
currentBucket.push({ x, y });
} else {
buckets.push(currentBucket);
currentBucket = [{ x, y }];
}
}
if (currentBucket.length > 0) |
return buckets;
}
}
| {
buckets.push(currentBucket);
} | conditional_block |
category-bucket-comparison.summarization.service.ts | import * as math from 'mathjs';
import { Injectable } from '@angular/core';
import { map } from 'rxjs/operators';
import { Observable, of } from 'rxjs';
import { SummarizationDataSourceService } from './summarization-data-source.service';
import { SummarizationService, BaseConfig } from './summarization.service';
import { SummaryGroup, Summary } from './types';
import { CategoricalPoint } from '../../datasets/metas/types';
import { formatY } from '../../utils/formatters';
export interface CategoryBucketComparisonConfig extends BaseConfig {
metric: string;
bucketPercentageTolerance: number;
}
export type CategoryBucketComparisonProperties = {
};
const defaultConfig: Partial<CategoryBucketComparisonConfig> = {
metric: 'sessions',
bucketPercentageTolerance: 5,
};
@Injectable({
providedIn: 'any',
})
export class CategoryBucketComparisonSummarizationService extends
SummarizationService<CategoricalPoint, CategoryBucketComparisonProperties, CategoryBucketComparisonConfig> {
constructor(
protected summarizationDataSourceService: SummarizationDataSourceService,
) {
super();
}
prepareConfig(config: BaseConfig & Partial<CategoryBucketComparisonConfig>): CategoryBucketComparisonConfig {
return { ...defaultConfig, ...config } as CategoryBucketComparisonConfig;
}
createDataProperties$(config: CategoryBucketComparisonConfig): Observable<CategoryBucketComparisonProperties> {
return of({});
}
/**
* Create summaries that describe the difference of y-values average between buckets.
* The bucket is a subset of data with similar y-values.
*
* Sample Summaries:
* - Desktop has 56.4 (129.4%) more sessions than Mobile and Tablet.
*/
createSummaries$(config: CategoryBucketComparisonConfig): Observable<SummaryGroup[]> {
// The length of datumLabels should be 1 for this summarization
const { datumLabels, metric, bucketPercentageTolerance } = config;
return this.summarizationDataSourceService.pointsByLabels$(datumLabels)
.pipe(map(pointsArray => {
// datum label should be unique in data, so length of pointsArray is either 0 or 1
const points = (pointsArray.length === 0 ? [] : pointsArray[0]) as CategoricalPoint[];
const maxYValue = Math.max(...points.map(({ y }) => y));
const sortedPoints = [...points].sort(({ y: y1 }, { y: y2 }) => y2 - y1);
const sortedRelativePercentagePoints = sortedPoints.map(({ x, y }) => ({ x, y: y / maxYValue * 100 }));
const buckets = this.bucketizePoints(sortedRelativePercentagePoints, bucketPercentageTolerance);
const summaries: Summary[] = [];
for (let i = 1; i < buckets.length; i++) {
const bucketGreaterYAverage = math.mean(buckets[i - 1].map(({ y }) => y));
const bucketSmallerYAverage = math.mean(buckets[i].map(({ y }) => y));
const bucketGreaterXValuesText = buckets[i - 1].map(({ x }) => x).join(', ');
const bucketSmallerXValuesText = buckets[i].map(({ x }) => x).join(', ');
const haveText = buckets[i - 1].length === 1 ? 'has' : 'have';
const yAverageDiff = bucketGreaterYAverage - bucketSmallerYAverage;
const yAverageDiffPercentage = yAverageDiff / bucketSmallerYAverage * 100;
const yAverageDiffText = formatY(yAverageDiff);
const yAverageDiffPercentageText = formatY(yAverageDiffPercentage);
const text = `<b>${bucketGreaterXValuesText}</b> ${haveText} <b>${yAverageDiffText} (${yAverageDiffPercentageText}%)</b> more ${metric} than <b>${bucketSmallerXValuesText}</b>.`;
summaries.push({
text,
validity: 1.0,
});
}
return [{
title: `Category Bucket Comparison - ${bucketPercentageTolerance}% Bucketization Tolerance`,
summaries,
}];
}));
}
private | (points: CategoricalPoint[], bucketPercentageTolerance: number) {
const buckets: CategoricalPoint[][] = [];
let currentBucket: CategoricalPoint[] = [];
for (const { x, y } of points) {
// Assumes that points are sorted by greatest y -> least y
const currentBucketYMax = currentBucket[0]?.y ?? null;
if (currentBucketYMax === null || currentBucketYMax - y < bucketPercentageTolerance) {
currentBucket.push({ x, y });
} else {
buckets.push(currentBucket);
currentBucket = [{ x, y }];
}
}
if (currentBucket.length > 0) {
buckets.push(currentBucket);
}
return buckets;
}
}
| bucketizePoints | identifier_name |
category-bucket-comparison.summarization.service.ts | import * as math from 'mathjs';
import { Injectable } from '@angular/core';
import { map } from 'rxjs/operators';
import { Observable, of } from 'rxjs';
import { SummarizationDataSourceService } from './summarization-data-source.service';
import { SummarizationService, BaseConfig } from './summarization.service';
import { SummaryGroup, Summary } from './types';
import { CategoricalPoint } from '../../datasets/metas/types';
import { formatY } from '../../utils/formatters';
export interface CategoryBucketComparisonConfig extends BaseConfig {
metric: string;
bucketPercentageTolerance: number;
}
export type CategoryBucketComparisonProperties = {
};
const defaultConfig: Partial<CategoryBucketComparisonConfig> = {
metric: 'sessions',
bucketPercentageTolerance: 5,
};
@Injectable({
providedIn: 'any',
})
export class CategoryBucketComparisonSummarizationService extends
SummarizationService<CategoricalPoint, CategoryBucketComparisonProperties, CategoryBucketComparisonConfig> {
constructor(
protected summarizationDataSourceService: SummarizationDataSourceService,
) {
super();
}
prepareConfig(config: BaseConfig & Partial<CategoryBucketComparisonConfig>): CategoryBucketComparisonConfig {
return { ...defaultConfig, ...config } as CategoryBucketComparisonConfig;
}
createDataProperties$(config: CategoryBucketComparisonConfig): Observable<CategoryBucketComparisonProperties> {
return of({});
}
/**
* Create summaries that describe the difference of y-values average between buckets.
* The bucket is a subset of data with similar y-values.
*
* Sample Summaries:
* - Desktop has 56.4 (129.4%) more sessions than Mobile and Tablet.
|
return this.summarizationDataSourceService.pointsByLabels$(datumLabels)
.pipe(map(pointsArray => {
// datum label should be unique in data, so length of pointsArray is either 0 or 1
const points = (pointsArray.length === 0 ? [] : pointsArray[0]) as CategoricalPoint[];
const maxYValue = Math.max(...points.map(({ y }) => y));
const sortedPoints = [...points].sort(({ y: y1 }, { y: y2 }) => y2 - y1);
const sortedRelativePercentagePoints = sortedPoints.map(({ x, y }) => ({ x, y: y / maxYValue * 100 }));
const buckets = this.bucketizePoints(sortedRelativePercentagePoints, bucketPercentageTolerance);
const summaries: Summary[] = [];
for (let i = 1; i < buckets.length; i++) {
const bucketGreaterYAverage = math.mean(buckets[i - 1].map(({ y }) => y));
const bucketSmallerYAverage = math.mean(buckets[i].map(({ y }) => y));
const bucketGreaterXValuesText = buckets[i - 1].map(({ x }) => x).join(', ');
const bucketSmallerXValuesText = buckets[i].map(({ x }) => x).join(', ');
const haveText = buckets[i - 1].length === 1 ? 'has' : 'have';
const yAverageDiff = bucketGreaterYAverage - bucketSmallerYAverage;
const yAverageDiffPercentage = yAverageDiff / bucketSmallerYAverage * 100;
const yAverageDiffText = formatY(yAverageDiff);
const yAverageDiffPercentageText = formatY(yAverageDiffPercentage);
const text = `<b>${bucketGreaterXValuesText}</b> ${haveText} <b>${yAverageDiffText} (${yAverageDiffPercentageText}%)</b> more ${metric} than <b>${bucketSmallerXValuesText}</b>.`;
summaries.push({
text,
validity: 1.0,
});
}
return [{
title: `Category Bucket Comparison - ${bucketPercentageTolerance}% Bucketization Tolerance`,
summaries,
}];
}));
}
private bucketizePoints(points: CategoricalPoint[], bucketPercentageTolerance: number) {
const buckets: CategoricalPoint[][] = [];
let currentBucket: CategoricalPoint[] = [];
for (const { x, y } of points) {
// Assumes that points are sorted by greatest y -> least y
const currentBucketYMax = currentBucket[0]?.y ?? null;
if (currentBucketYMax === null || currentBucketYMax - y < bucketPercentageTolerance) {
currentBucket.push({ x, y });
} else {
buckets.push(currentBucket);
currentBucket = [{ x, y }];
}
}
if (currentBucket.length > 0) {
buckets.push(currentBucket);
}
return buckets;
}
} | */
createSummaries$(config: CategoryBucketComparisonConfig): Observable<SummaryGroup[]> {
// The length of datumLabels should be 1 for this summarization
const { datumLabels, metric, bucketPercentageTolerance } = config;
| random_line_split |
category-bucket-comparison.summarization.service.ts | import * as math from 'mathjs';
import { Injectable } from '@angular/core';
import { map } from 'rxjs/operators';
import { Observable, of } from 'rxjs';
import { SummarizationDataSourceService } from './summarization-data-source.service';
import { SummarizationService, BaseConfig } from './summarization.service';
import { SummaryGroup, Summary } from './types';
import { CategoricalPoint } from '../../datasets/metas/types';
import { formatY } from '../../utils/formatters';
export interface CategoryBucketComparisonConfig extends BaseConfig {
metric: string;
bucketPercentageTolerance: number;
}
export type CategoryBucketComparisonProperties = {
};
const defaultConfig: Partial<CategoryBucketComparisonConfig> = {
metric: 'sessions',
bucketPercentageTolerance: 5,
};
@Injectable({
providedIn: 'any',
})
export class CategoryBucketComparisonSummarizationService extends
SummarizationService<CategoricalPoint, CategoryBucketComparisonProperties, CategoryBucketComparisonConfig> {
constructor(
protected summarizationDataSourceService: SummarizationDataSourceService,
) {
super();
}
prepareConfig(config: BaseConfig & Partial<CategoryBucketComparisonConfig>): CategoryBucketComparisonConfig |
createDataProperties$(config: CategoryBucketComparisonConfig): Observable<CategoryBucketComparisonProperties> {
return of({});
}
/**
* Create summaries that describe the difference of y-values average between buckets.
* The bucket is a subset of data with similar y-values.
*
* Sample Summaries:
* - Desktop has 56.4 (129.4%) more sessions than Mobile and Tablet.
*/
createSummaries$(config: CategoryBucketComparisonConfig): Observable<SummaryGroup[]> {
// The length of datumLabels should be 1 for this summarization
const { datumLabels, metric, bucketPercentageTolerance } = config;
return this.summarizationDataSourceService.pointsByLabels$(datumLabels)
.pipe(map(pointsArray => {
// datum label should be unique in data, so length of pointsArray is either 0 or 1
const points = (pointsArray.length === 0 ? [] : pointsArray[0]) as CategoricalPoint[];
const maxYValue = Math.max(...points.map(({ y }) => y));
const sortedPoints = [...points].sort(({ y: y1 }, { y: y2 }) => y2 - y1);
const sortedRelativePercentagePoints = sortedPoints.map(({ x, y }) => ({ x, y: y / maxYValue * 100 }));
const buckets = this.bucketizePoints(sortedRelativePercentagePoints, bucketPercentageTolerance);
const summaries: Summary[] = [];
for (let i = 1; i < buckets.length; i++) {
const bucketGreaterYAverage = math.mean(buckets[i - 1].map(({ y }) => y));
const bucketSmallerYAverage = math.mean(buckets[i].map(({ y }) => y));
const bucketGreaterXValuesText = buckets[i - 1].map(({ x }) => x).join(', ');
const bucketSmallerXValuesText = buckets[i].map(({ x }) => x).join(', ');
const haveText = buckets[i - 1].length === 1 ? 'has' : 'have';
const yAverageDiff = bucketGreaterYAverage - bucketSmallerYAverage;
const yAverageDiffPercentage = yAverageDiff / bucketSmallerYAverage * 100;
const yAverageDiffText = formatY(yAverageDiff);
const yAverageDiffPercentageText = formatY(yAverageDiffPercentage);
const text = `<b>${bucketGreaterXValuesText}</b> ${haveText} <b>${yAverageDiffText} (${yAverageDiffPercentageText}%)</b> more ${metric} than <b>${bucketSmallerXValuesText}</b>.`;
summaries.push({
text,
validity: 1.0,
});
}
return [{
title: `Category Bucket Comparison - ${bucketPercentageTolerance}% Bucketization Tolerance`,
summaries,
}];
}));
}
private bucketizePoints(points: CategoricalPoint[], bucketPercentageTolerance: number) {
const buckets: CategoricalPoint[][] = [];
let currentBucket: CategoricalPoint[] = [];
for (const { x, y } of points) {
// Assumes that points are sorted by greatest y -> least y
const currentBucketYMax = currentBucket[0]?.y ?? null;
if (currentBucketYMax === null || currentBucketYMax - y < bucketPercentageTolerance) {
currentBucket.push({ x, y });
} else {
buckets.push(currentBucket);
currentBucket = [{ x, y }];
}
}
if (currentBucket.length > 0) {
buckets.push(currentBucket);
}
return buckets;
}
}
| {
return { ...defaultConfig, ...config } as CategoryBucketComparisonConfig;
} | identifier_body |
phaserconversionmaps.ts | // 2388 - vertical cave wall
// 2399 - horizontal cave wall
import { invert } from 'lodash';
export const VerticalDoorGids = {
// blue wall door vert
1058: true,
// undead wall door vert
1065: true,
| 1070: true,
// stone wall door vert
1077: true,
// green wall door vert
1088: true,
// town door vert
1095: true,
// town2 door vert
1116: true,
// town3 door vert
1123: true,
// cave green wall door vert
1211: true
};
export const TrueSightMap = {
// blue wall horiz, blue wall vert
14: 1064,
15: 1065,
// cave horiz, cave vert
62: 1076,
63: 1075,
// stone horiz, stone vert
78: 1087,
79: 1086,
// green stone horiz, green stone vert
94: 1094,
95: 1093,
// town horiz, town vert
111: 1100,
112: 1101,
// town2 horiz, town2 vert
175: 1123,
176: 1124,
// green cave wall horiz, green cave wall vert
350: 1217,
351: 1216
};
export const TrueSightMapReversed = invert(TrueSightMap); | // cave wall door vert | random_line_split |
categories_request_body.py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'CategoriesRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""CategoriesRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this CategoriesRequestBody. # noqa: E501
:return: The data of this CategoriesRequestBody. # noqa: E501
:rtype: CategoriesRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CategoriesRequestBody.
:param data: The data of this CategoriesRequestBody. # noqa: E501
:type: CategoriesRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoriesRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
| """Returns true if both objects are not equal"""
return not self == other | identifier_body |
|
categories_request_body.py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'CategoriesRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""CategoriesRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this CategoriesRequestBody. # noqa: E501
:return: The data of this CategoriesRequestBody. # noqa: E501
:rtype: CategoriesRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CategoriesRequestBody.
:param data: The data of this CategoriesRequestBody. # noqa: E501
:type: CategoriesRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoriesRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesRequestBody):
return False
return self.__dict__ == other.__dict__
def | (self, other):
"""Returns true if both objects are not equal"""
return not self == other
| __ne__ | identifier_name |
categories_request_body.py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'CategoriesRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""CategoriesRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this CategoriesRequestBody. # noqa: E501
:return: The data of this CategoriesRequestBody. # noqa: E501
:rtype: CategoriesRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CategoriesRequestBody.
:param data: The data of this CategoriesRequestBody. # noqa: E501
:type: CategoriesRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
|
if issubclass(CategoriesRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| result[attr] = value | conditional_block |
categories_request_body.py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'CategoriesRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""CategoriesRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None | if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this CategoriesRequestBody. # noqa: E501
:return: The data of this CategoriesRequestBody. # noqa: E501
:rtype: CategoriesRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CategoriesRequestBody.
:param data: The data of this CategoriesRequestBody. # noqa: E501
:type: CategoriesRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoriesRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | self.discriminator = None | random_line_split |
PlayerSpec.js | describe("Player", function() {
var Player = require('../src/Player.js'); | player = new Player();
song = new Song();
});
it("should be able to play a Song", function() {
player.play(song);
expect(player.currentlyPlayingSong).toEqual(song);
//demonstrates use of custom matcher
expect(player).toBePlaying(song);
});
describe("when song has been paused", function() {
beforeEach(function() {
player.play(song);
player.pause();
});
it("should indicate that the song is currently paused", function() {
expect(player.isPlaying).toBeFalsy();
// demonstrates use of 'not' with a custom matcher
expect(player).not.toBePlaying(song);
});
it("should be possible to resume", function() {
player.resume();
expect(player.isPlaying).toBeTruthy();
expect(player.currentlyPlayingSong).toEqual(song);
});
});
// demonstrates use of spies to intercept and test method calls
it("tells the current song if the user has made it a favorite", function() {
spyOn(song, 'persistFavoriteStatus');
player.play(song);
player.makeFavorite();
expect(song.persistFavoriteStatus).toHaveBeenCalledWith(true);
});
//demonstrates use of expected exceptions
describe("#resume", function() {
it("should throw an exception if song is already playing", function() {
player.play(song);
expect(function() {
player.resume();
}).toThrowError("song is already playing");
});
});
}); | var Song = require('../src/Song.js');
var player;
var song;
beforeEach(function() { | random_line_split |
AirFileStorageProvider.js | /*
Copyright (c) 2004-2008, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dojox.storage.AirFileStorageProvider"]){
dojo._hasResource["dojox.storage.AirFileStorageProvider"]=true;
dojo.provide("dojox.storage.AirFileStorageProvider");
dojo.require("dojox.storage.manager");
dojo.require("dojox.storage.Provider");
if(dojo.isAIR){
(function(){
if(!_1){
var _1={};
}
_1.File=window.runtime.flash.filesystem.File;
_1.FileStream=window.runtime.flash.filesystem.FileStream;
_1.FileMode=window.runtime.flash.filesystem.FileMode;
dojo.declare("dojox.storage.AirFileStorageProvider",[dojox.storage.Provider],{initialized:false,_storagePath:"__DOJO_STORAGE/",initialize:function(){
this.initialized=false;
try{
var _2=_1.File.applicationStorageDirectory.resolvePath(this._storagePath);
if(!_2.exists){
_2.createDirectory();
}
this.initialized=true;
}
catch(e){
}
dojox.storage.manager.loaded();
},isAvailable:function(){
return true;
},put:function(_3,_4,_5,_6){
if(this.isValidKey(_3)==false){
throw new Error("Invalid key given: "+_3);
}
_6=_6||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_6)==false){
throw new Error("Invalid namespace given: "+_6);
}
try{
this.remove(_3,_6);
var _7=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_6);
if(!_7.exists){
_7.createDirectory();
}
var _8=_7.resolvePath(_3);
var _9=new _1.FileStream();
_9.open(_8,_1.FileMode.WRITE);
_9.writeObject(_4);
_9.close();
}
catch(e){
_5(this.FAILED,_3,e.toString(),_6);
return;
}
if(_5){
_5(this.SUCCESS,_3,null,_6);
}
},get:function(_a,_b){
if(this.isValidKey(_a)==false){
throw new Error("Invalid key given: "+_a);
}
_b=_b||this.DEFAULT_NAMESPACE;
var _c=null;
var _d=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_b+"/"+_a);
if(_d.exists&&!_d.isDirectory){
var _e=new _1.FileStream();
_e.open(_d,_1.FileMode.READ);
_c=_e.readObject();
_e.close();
}
return _c;
},getNamespaces:function(){
var _f=[this.DEFAULT_NAMESPACE];
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath);
var _11=dir.getDirectoryListing(),i;
for(i=0;i<_11.length;i++){
if(_11[i].isDirectory&&_11[i].name!=this.DEFAULT_NAMESPACE){
_f.push(_11[i].name);
}
}
return _f;
},getKeys:function(_13){
_13=_13||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_13)==false){
throw new Error("Invalid namespace given: "+_13);
}
var _14=[];
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_13);
if(dir.exists&&dir.isDirectory){
var _16=dir.getDirectoryListing(),i;
for(i=0;i<_16.length;i++){
_14.push(_16[i].name);
}
}
return _14;
},clear:function(_18){
if(this.isValidKey(_18)==false){
throw new Error("Invalid namespace given: "+_18);
}
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_18);
if(dir.exists&&dir.isDirectory){
dir.deleteDirectory(true);
}
},remove:function(key,_1b){
_1b=_1b||this.DEFAULT_NAMESPACE;
var _1c=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_1b+"/"+key);
if(_1c.exists&&!_1c.isDirectory){
_1c.deleteFile();
}
},putMultiple:function(_1d,_1e,_1f,_20){
if(this.isValidKeyArray(_1d)===false||!_1e instanceof Array||_1d.length!=_1e.length){
throw new Error("Invalid arguments: keys = ["+_1d+"], values = ["+_1e+"]");
}
if(_20==null||typeof _20=="undefined"){
_20=this.DEFAULT_NAMESPACE;
}
if(this.isValidKey(_20)==false){
throw new Error("Invalid namespace given: "+_20);
}
this._statusHandler=_1f;
try{
for(var i=0;i<_1d.length;i++){
this.put(_1d[i],_1e[i],null,_20);
}
}
catch(e){
if(_1f){
_1f(this.FAILED,_1d,e.toString(),_20);
}
return;
}
if(_1f){
_1f(this.SUCCESS,_1d,null,_20);
}
},getMultiple:function(_22,_23){
if(this.isValidKeyArray(_22)===false){
throw new Error("Invalid key array given: "+_22);
}
if(_23==null||typeof _23=="undefined"){
_23=this.DEFAULT_NAMESPACE;
}
if(this.isValidKey(_23)==false) |
var _24=[];
for(var i=0;i<_22.length;i++){
_24[i]=this.get(_22[i],_23);
}
return _24;
},removeMultiple:function(_26,_27){
_27=_27||this.DEFAULT_NAMESPACE;
for(var i=0;i<_26.length;i++){
this.remove(_26[i],_27);
}
},isPermanent:function(){
return true;
},getMaximumSize:function(){
return this.SIZE_NO_LIMIT;
},hasSettingsUI:function(){
return false;
},showSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
},hideSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
}});
dojox.storage.manager.register("dojox.storage.AirFileStorageProvider",new dojox.storage.AirFileStorageProvider());
dojox.storage.manager.initialize();
})();
}
}
| {
throw new Error("Invalid namespace given: "+_23);
} | conditional_block |
AirFileStorageProvider.js | /*
Copyright (c) 2004-2008, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dojox.storage.AirFileStorageProvider"]){
dojo._hasResource["dojox.storage.AirFileStorageProvider"]=true;
dojo.provide("dojox.storage.AirFileStorageProvider");
dojo.require("dojox.storage.manager");
dojo.require("dojox.storage.Provider");
if(dojo.isAIR){
(function(){
if(!_1){
var _1={};
}
_1.File=window.runtime.flash.filesystem.File;
_1.FileStream=window.runtime.flash.filesystem.FileStream;
_1.FileMode=window.runtime.flash.filesystem.FileMode;
dojo.declare("dojox.storage.AirFileStorageProvider",[dojox.storage.Provider],{initialized:false,_storagePath:"__DOJO_STORAGE/",initialize:function(){
this.initialized=false;
try{
var _2=_1.File.applicationStorageDirectory.resolvePath(this._storagePath);
if(!_2.exists){
_2.createDirectory();
}
this.initialized=true;
}
catch(e){
}
dojox.storage.manager.loaded();
},isAvailable:function(){
return true;
},put:function(_3,_4,_5,_6){
if(this.isValidKey(_3)==false){
throw new Error("Invalid key given: "+_3);
}
_6=_6||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_6)==false){
throw new Error("Invalid namespace given: "+_6);
}
try{
this.remove(_3,_6);
var _7=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_6);
if(!_7.exists){
_7.createDirectory();
}
var _8=_7.resolvePath(_3);
var _9=new _1.FileStream();
_9.open(_8,_1.FileMode.WRITE);
_9.writeObject(_4);
_9.close();
}
catch(e){
_5(this.FAILED,_3,e.toString(),_6);
return;
}
if(_5){ | throw new Error("Invalid key given: "+_a);
}
_b=_b||this.DEFAULT_NAMESPACE;
var _c=null;
var _d=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_b+"/"+_a);
if(_d.exists&&!_d.isDirectory){
var _e=new _1.FileStream();
_e.open(_d,_1.FileMode.READ);
_c=_e.readObject();
_e.close();
}
return _c;
},getNamespaces:function(){
var _f=[this.DEFAULT_NAMESPACE];
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath);
var _11=dir.getDirectoryListing(),i;
for(i=0;i<_11.length;i++){
if(_11[i].isDirectory&&_11[i].name!=this.DEFAULT_NAMESPACE){
_f.push(_11[i].name);
}
}
return _f;
},getKeys:function(_13){
_13=_13||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_13)==false){
throw new Error("Invalid namespace given: "+_13);
}
var _14=[];
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_13);
if(dir.exists&&dir.isDirectory){
var _16=dir.getDirectoryListing(),i;
for(i=0;i<_16.length;i++){
_14.push(_16[i].name);
}
}
return _14;
},clear:function(_18){
if(this.isValidKey(_18)==false){
throw new Error("Invalid namespace given: "+_18);
}
var dir=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_18);
if(dir.exists&&dir.isDirectory){
dir.deleteDirectory(true);
}
},remove:function(key,_1b){
_1b=_1b||this.DEFAULT_NAMESPACE;
var _1c=_1.File.applicationStorageDirectory.resolvePath(this._storagePath+_1b+"/"+key);
if(_1c.exists&&!_1c.isDirectory){
_1c.deleteFile();
}
},putMultiple:function(_1d,_1e,_1f,_20){
if(this.isValidKeyArray(_1d)===false||!_1e instanceof Array||_1d.length!=_1e.length){
throw new Error("Invalid arguments: keys = ["+_1d+"], values = ["+_1e+"]");
}
if(_20==null||typeof _20=="undefined"){
_20=this.DEFAULT_NAMESPACE;
}
if(this.isValidKey(_20)==false){
throw new Error("Invalid namespace given: "+_20);
}
this._statusHandler=_1f;
try{
for(var i=0;i<_1d.length;i++){
this.put(_1d[i],_1e[i],null,_20);
}
}
catch(e){
if(_1f){
_1f(this.FAILED,_1d,e.toString(),_20);
}
return;
}
if(_1f){
_1f(this.SUCCESS,_1d,null,_20);
}
},getMultiple:function(_22,_23){
if(this.isValidKeyArray(_22)===false){
throw new Error("Invalid key array given: "+_22);
}
if(_23==null||typeof _23=="undefined"){
_23=this.DEFAULT_NAMESPACE;
}
if(this.isValidKey(_23)==false){
throw new Error("Invalid namespace given: "+_23);
}
var _24=[];
for(var i=0;i<_22.length;i++){
_24[i]=this.get(_22[i],_23);
}
return _24;
},removeMultiple:function(_26,_27){
_27=_27||this.DEFAULT_NAMESPACE;
for(var i=0;i<_26.length;i++){
this.remove(_26[i],_27);
}
},isPermanent:function(){
return true;
},getMaximumSize:function(){
return this.SIZE_NO_LIMIT;
},hasSettingsUI:function(){
return false;
},showSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
},hideSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
}});
dojox.storage.manager.register("dojox.storage.AirFileStorageProvider",new dojox.storage.AirFileStorageProvider());
dojox.storage.manager.initialize();
})();
}
} | _5(this.SUCCESS,_3,null,_6);
}
},get:function(_a,_b){
if(this.isValidKey(_a)==false){ | random_line_split |
definitions.ts | // DRY!
class Form {
settings: object = {
method: undefined,
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
},
credentials: 'include',
body: undefined
};
get httpMethod() {
return this.settings['method'];
}
set httpMethod(method: string) {
this.settings['method'] = method;
}
}
class PortfolioItem extends Form {
id: string = undefined;
title: string = null;
description: string = null;
imagelink: string = null;
pages: object = {};
currentPage: number = 1;
parentElement: HTMLElement = undefined;
messageElement: HTMLElement = undefined;
editor: CKEDITOR.editor = undefined;
constructor(parentElement: HTMLElement, editor?: CKEDITOR.editor) {
super()
this.parentElement = parentElement;
this.editor = editor;
}
// should bind to event listener, and respond to clicks on appropriate pages
public pageHandler = (event: MouseEvent) => {
let pageNumber: DOMStringMap = event.target['dataset']['button']
let currentPageButton = <HTMLElement>event.target;
this.saveContent(this.editor.getData());
this.page = pageNumber;
this.editor.setData(this.pageContent);
}
public submitHandler = (event: MouseEvent) => {
event.preventDefault()
let submitButton = <HTMLElement> event.target
if (this.httpMethod != "DELETE") |
this.send()
.then(response => {
if (response.status == 201 || response.status == 204) {
toggleClasses(submitButton, 'disabled')
console.log(this.messageElement);
toggleClasses(this.messageElement, 'on', 'off', 'animated', 'fadeIn');
}
})
}
// parent element should follow an specific order every time
private saveAllAttributes() {
this.title = this.parentElement.querySelector('input[name="title"]')['value'];
this.description = this.parentElement.querySelector('input[name="description"]')['value'];
this.imagelink = this.parentElement.querySelector('input[name="imageUrl"]')['value'];
// save current page without the need to click another one again
this.saveContent(this.editor.getData());
}
// parent element should follow an specific order every time
private saveToParent() {
this.parentElement.querySelector('input[name="title"]')['value'] = this.title;
this.parentElement.querySelector('input[name="description"]')['value'] = this.description;
this.parentElement.querySelector('input[name="imageUrl"]')['value'] = this.imagelink;
this.editor.setData(this.pages[this.currentPage]);
}
private toJson = () => {
this.settings["body"] = JSON.stringify({
"title": this.title,
"description": this.description,
"imagelink": this.imagelink,
"pages": this.pages
})
}
public retrieve = (itemID: number): void => {
let currentItem = itemID;
getPortfolioItem(currentItem)
.then(response => {
this.id = response['id'];
this.title = response['title'];
this.description = response['description'];
this.imagelink = response['imagelink'];
this.pages = response['content'];
console.log(this.pages);
this.saveToParent();
})
}
public send(): Promise<Response> {
if (this.httpMethod == "PUT" || this.httpMethod == "DELETE") {
return sendJsonWithObj('API_portfolio', { "itemID": this.id }, this.settings);
}
return sendJson('API_portfolio', this.settings);
}
get pageContent() {
return this.pages[this.currentPage];
}
set page(pageNumber) {
this.currentPage = pageNumber;
}
private saveContent(content: string) {
if (content.length > 0) {
this.pages[`${this.currentPage}`] = content;
}
}
}
class Post extends Form {
title: string;
imagelink: string;
content: string;
tags: string;
constructor(title: string, imagelink: string, content: string, tags: string) {
super()
this.title = title
this.imagelink = imagelink;
this.content = content;
this.tags = tags;
}
get json() {
return JSON.stringify({
title: this.title,
imagelink: this.imagelink,
content: this.content,
tags: this.tags
});
}
send(flaskLocation: string) {
return sendJson(flaskLocation, this.settings);
}
}
| {
this.saveAllAttributes();
this.toJson()
} | conditional_block |
definitions.ts | // DRY!
class Form {
settings: object = {
method: undefined,
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
},
credentials: 'include',
body: undefined
};
get httpMethod() {
return this.settings['method'];
}
set httpMethod(method: string) {
this.settings['method'] = method;
}
}
class PortfolioItem extends Form {
id: string = undefined;
title: string = null;
description: string = null;
imagelink: string = null;
pages: object = {};
currentPage: number = 1;
parentElement: HTMLElement = undefined;
messageElement: HTMLElement = undefined;
editor: CKEDITOR.editor = undefined;
constructor(parentElement: HTMLElement, editor?: CKEDITOR.editor) {
super()
this.parentElement = parentElement;
this.editor = editor;
}
// should bind to event listener, and respond to clicks on appropriate pages
public pageHandler = (event: MouseEvent) => {
let pageNumber: DOMStringMap = event.target['dataset']['button']
let currentPageButton = <HTMLElement>event.target;
this.saveContent(this.editor.getData());
this.page = pageNumber;
this.editor.setData(this.pageContent);
}
public submitHandler = (event: MouseEvent) => {
event.preventDefault()
let submitButton = <HTMLElement> event.target
if (this.httpMethod != "DELETE") {
this.saveAllAttributes();
this.toJson()
}
this.send()
.then(response => {
if (response.status == 201 || response.status == 204) {
toggleClasses(submitButton, 'disabled')
console.log(this.messageElement);
toggleClasses(this.messageElement, 'on', 'off', 'animated', 'fadeIn');
}
})
}
// parent element should follow an specific order every time
private saveAllAttributes() {
this.title = this.parentElement.querySelector('input[name="title"]')['value'];
this.description = this.parentElement.querySelector('input[name="description"]')['value'];
this.imagelink = this.parentElement.querySelector('input[name="imageUrl"]')['value'];
// save current page without the need to click another one again
this.saveContent(this.editor.getData());
}
// parent element should follow an specific order every time
private saveToParent() {
this.parentElement.querySelector('input[name="title"]')['value'] = this.title;
this.parentElement.querySelector('input[name="description"]')['value'] = this.description;
this.parentElement.querySelector('input[name="imageUrl"]')['value'] = this.imagelink;
this.editor.setData(this.pages[this.currentPage]);
}
private toJson = () => {
this.settings["body"] = JSON.stringify({
"title": this.title,
"description": this.description,
"imagelink": this.imagelink,
"pages": this.pages
})
}
public retrieve = (itemID: number): void => {
let currentItem = itemID;
getPortfolioItem(currentItem)
.then(response => {
this.id = response['id'];
this.title = response['title'];
this.description = response['description'];
this.imagelink = response['imagelink'];
this.pages = response['content'];
console.log(this.pages);
this.saveToParent();
})
}
public send(): Promise<Response> {
if (this.httpMethod == "PUT" || this.httpMethod == "DELETE") {
return sendJsonWithObj('API_portfolio', { "itemID": this.id }, this.settings);
}
return sendJson('API_portfolio', this.settings);
}
get pageContent() {
return this.pages[this.currentPage];
}
set page(pageNumber) {
this.currentPage = pageNumber;
}
private saveContent(content: string) |
}
class Post extends Form {
title: string;
imagelink: string;
content: string;
tags: string;
constructor(title: string, imagelink: string, content: string, tags: string) {
super()
this.title = title
this.imagelink = imagelink;
this.content = content;
this.tags = tags;
}
get json() {
return JSON.stringify({
title: this.title,
imagelink: this.imagelink,
content: this.content,
tags: this.tags
});
}
send(flaskLocation: string) {
return sendJson(flaskLocation, this.settings);
}
}
| {
if (content.length > 0) {
this.pages[`${this.currentPage}`] = content;
}
} | identifier_body |
definitions.ts | // DRY!
class Form {
settings: object = {
method: undefined,
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
},
credentials: 'include',
body: undefined
};
get httpMethod() {
return this.settings['method'];
}
set httpMethod(method: string) {
this.settings['method'] = method;
}
}
class PortfolioItem extends Form {
id: string = undefined;
title: string = null;
description: string = null;
imagelink: string = null;
pages: object = {};
currentPage: number = 1;
parentElement: HTMLElement = undefined;
messageElement: HTMLElement = undefined;
editor: CKEDITOR.editor = undefined;
constructor(parentElement: HTMLElement, editor?: CKEDITOR.editor) {
super()
this.parentElement = parentElement;
this.editor = editor;
}
// should bind to event listener, and respond to clicks on appropriate pages
public pageHandler = (event: MouseEvent) => {
let pageNumber: DOMStringMap = event.target['dataset']['button']
let currentPageButton = <HTMLElement>event.target;
this.saveContent(this.editor.getData());
this.page = pageNumber;
this.editor.setData(this.pageContent);
}
public submitHandler = (event: MouseEvent) => {
event.preventDefault()
let submitButton = <HTMLElement> event.target
if (this.httpMethod != "DELETE") {
this.saveAllAttributes();
this.toJson()
}
this.send()
.then(response => {
if (response.status == 201 || response.status == 204) {
toggleClasses(submitButton, 'disabled')
console.log(this.messageElement);
toggleClasses(this.messageElement, 'on', 'off', 'animated', 'fadeIn');
}
})
}
// parent element should follow an specific order every time
private saveAllAttributes() {
this.title = this.parentElement.querySelector('input[name="title"]')['value'];
this.description = this.parentElement.querySelector('input[name="description"]')['value'];
this.imagelink = this.parentElement.querySelector('input[name="imageUrl"]')['value'];
// save current page without the need to click another one again
this.saveContent(this.editor.getData());
}
// parent element should follow an specific order every time
private saveToParent() {
this.parentElement.querySelector('input[name="title"]')['value'] = this.title;
this.parentElement.querySelector('input[name="description"]')['value'] = this.description;
this.parentElement.querySelector('input[name="imageUrl"]')['value'] = this.imagelink;
this.editor.setData(this.pages[this.currentPage]);
}
private toJson = () => {
this.settings["body"] = JSON.stringify({
"title": this.title,
"description": this.description,
"imagelink": this.imagelink,
"pages": this.pages
})
}
public retrieve = (itemID: number): void => {
let currentItem = itemID;
getPortfolioItem(currentItem)
.then(response => {
this.id = response['id'];
this.title = response['title'];
this.description = response['description'];
this.imagelink = response['imagelink'];
this.pages = response['content'];
console.log(this.pages);
this.saveToParent();
})
}
public send(): Promise<Response> {
if (this.httpMethod == "PUT" || this.httpMethod == "DELETE") {
return sendJsonWithObj('API_portfolio', { "itemID": this.id }, this.settings);
}
return sendJson('API_portfolio', this.settings);
}
get pageContent() {
return this.pages[this.currentPage];
}
set page(pageNumber) {
this.currentPage = pageNumber;
}
private saveContent(content: string) {
if (content.length > 0) {
this.pages[`${this.currentPage}`] = content;
}
}
}
class Post extends Form {
title: string;
imagelink: string;
content: string;
tags: string;
constructor(title: string, imagelink: string, content: string, tags: string) {
super()
this.title = title
this.imagelink = imagelink;
this.content = content;
this.tags = tags;
}
get json() {
return JSON.stringify({
title: this.title,
imagelink: this.imagelink,
content: this.content,
tags: this.tags
});
}
| send(flaskLocation: string) {
return sendJson(flaskLocation, this.settings);
}
} | random_line_split |
|
definitions.ts | // DRY!
class Form {
settings: object = {
method: undefined,
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
},
credentials: 'include',
body: undefined
};
get httpMethod() {
return this.settings['method'];
}
set httpMethod(method: string) {
this.settings['method'] = method;
}
}
class PortfolioItem extends Form {
id: string = undefined;
title: string = null;
description: string = null;
imagelink: string = null;
pages: object = {};
currentPage: number = 1;
parentElement: HTMLElement = undefined;
messageElement: HTMLElement = undefined;
editor: CKEDITOR.editor = undefined;
constructor(parentElement: HTMLElement, editor?: CKEDITOR.editor) {
super()
this.parentElement = parentElement;
this.editor = editor;
}
// should bind to event listener, and respond to clicks on appropriate pages
public pageHandler = (event: MouseEvent) => {
let pageNumber: DOMStringMap = event.target['dataset']['button']
let currentPageButton = <HTMLElement>event.target;
this.saveContent(this.editor.getData());
this.page = pageNumber;
this.editor.setData(this.pageContent);
}
public submitHandler = (event: MouseEvent) => {
event.preventDefault()
let submitButton = <HTMLElement> event.target
if (this.httpMethod != "DELETE") {
this.saveAllAttributes();
this.toJson()
}
this.send()
.then(response => {
if (response.status == 201 || response.status == 204) {
toggleClasses(submitButton, 'disabled')
console.log(this.messageElement);
toggleClasses(this.messageElement, 'on', 'off', 'animated', 'fadeIn');
}
})
}
// parent element should follow an specific order every time
private saveAllAttributes() {
this.title = this.parentElement.querySelector('input[name="title"]')['value'];
this.description = this.parentElement.querySelector('input[name="description"]')['value'];
this.imagelink = this.parentElement.querySelector('input[name="imageUrl"]')['value'];
// save current page without the need to click another one again
this.saveContent(this.editor.getData());
}
// parent element should follow an specific order every time
private saveToParent() {
this.parentElement.querySelector('input[name="title"]')['value'] = this.title;
this.parentElement.querySelector('input[name="description"]')['value'] = this.description;
this.parentElement.querySelector('input[name="imageUrl"]')['value'] = this.imagelink;
this.editor.setData(this.pages[this.currentPage]);
}
private toJson = () => {
this.settings["body"] = JSON.stringify({
"title": this.title,
"description": this.description,
"imagelink": this.imagelink,
"pages": this.pages
})
}
public retrieve = (itemID: number): void => {
let currentItem = itemID;
getPortfolioItem(currentItem)
.then(response => {
this.id = response['id'];
this.title = response['title'];
this.description = response['description'];
this.imagelink = response['imagelink'];
this.pages = response['content'];
console.log(this.pages);
this.saveToParent();
})
}
public send(): Promise<Response> {
if (this.httpMethod == "PUT" || this.httpMethod == "DELETE") {
return sendJsonWithObj('API_portfolio', { "itemID": this.id }, this.settings);
}
return sendJson('API_portfolio', this.settings);
}
get pageContent() {
return this.pages[this.currentPage];
}
set page(pageNumber) {
this.currentPage = pageNumber;
}
private saveContent(content: string) {
if (content.length > 0) {
this.pages[`${this.currentPage}`] = content;
}
}
}
class Post extends Form {
title: string;
imagelink: string;
content: string;
tags: string;
constructor(title: string, imagelink: string, content: string, tags: string) {
super()
this.title = title
this.imagelink = imagelink;
this.content = content;
this.tags = tags;
}
get json() {
return JSON.stringify({
title: this.title,
imagelink: this.imagelink,
content: this.content,
tags: this.tags
});
}
| (flaskLocation: string) {
return sendJson(flaskLocation, this.settings);
}
}
| send | identifier_name |
AttributeMarshallingMixin.js | import { booleanAttributeValue, standardBooleanAttributes } from "./dom.js";
import { rendering } from "./internal.js";
// Memoized maps of attribute to property names and vice versa.
// We initialize this with the special case of the tabindex (lowercase "i")
// attribute, which is mapped to the tabIndex (capital "I") property.
/** @type {IndexedObject<string>} */
const attributeToPropertyNames = {
tabindex: "tabIndex",
};
/** @type {IndexedObject<string>} */
const propertyNamesToAttributes = {
tabIndex: "tabindex",
};
/**
* Sets properties when the corresponding attributes change
*
* If your component exposes a setter for a property, it's generally a good
* idea to let devs using your component be able to set that property in HTML
* via an element attribute. You can code that yourself by writing an
* `attributeChangedCallback`, or you can use this mixin to get a degree of
* automatic support.
*
* This mixin implements an `attributeChangedCallback` that will attempt to
* convert a change in an element attribute into a call to the corresponding
* property setter. Attributes typically follow hyphenated names ("foo-bar"),
* whereas properties typically use camelCase names ("fooBar"). This mixin
* respects that convention, automatically mapping the hyphenated attribute
* name to the corresponding camelCase property name.
*
* Example: You define a component using this mixin:
*
* class MyElement extends AttributeMarshallingMixin(HTMLElement) {
* get fooBar() { return this._fooBar; }
* set fooBar(value) { this._fooBar = value; }
* }
*
* If someone then instantiates your component in HTML:
*
* <my-element foo-bar="Hello"></my-element>
*
* Then, after the element has been upgraded, the `fooBar` setter will
* automatically be invoked with the initial value "Hello".
*
* Attributes can only have string values. If you'd like to convert string
* attributes to other types (numbers, booleans), you must implement parsing
* yourself.
*
* @module AttributeMarshallingMixin
* @param {Constructor<CustomElement>} Base
*/
export default function AttributeMarshallingMixin(Base) {
// The class prototype added by the mixin.
class AttributeMarshalling extends Base {
/**
* Handle a change to the attribute with the given name.
*
* @ignore
* @param {string} attributeName
* @param {string} oldValue
* @param {string} newValue
*/
attributeChangedCallback(attributeName, oldValue, newValue) {
if (super.attributeChangedCallback) {
super.attributeChangedCallback(attributeName, oldValue, newValue);
}
// Sometimes this callback is invoked when there's not actually any
// change, in which we skip invoking the property setter.
//
// We also skip setting properties if we're rendering. A component may
// want to reflect property values to attributes during rendering, but
// such attribute changes shouldn't trigger property updates.
if (newValue !== oldValue && !this[rendering]) {
const propertyName = attributeToPropertyName(attributeName);
// If the attribute name corresponds to a property name, set the property.
if (propertyName in this) {
// Parse standard boolean attributes.
const parsed = standardBooleanAttributes[attributeName]
? booleanAttributeValue(attributeName, newValue)
: newValue;
this[propertyName] = parsed;
}
}
}
// Because maintaining the mapping of attributes to properties is tedious,
// this provides a default implementation for `observedAttributes` that
// assumes that your component will want to expose all public properties in
// your component's API as properties.
//
// You can override this default implementation of `observedAttributes`. For
// example, if you have a system that can statically analyze which
// properties are available to your component, you could hand-author or
// programmatically generate a definition for `observedAttributes` that
// avoids the minor run-time performance cost of inspecting the component
// prototype to determine your component's public properties.
static get observedAttributes() {
return attributesForClass(this);
}
}
return AttributeMarshalling;
}
/**
* Return the custom attributes for the given class.
*
* E.g., if the supplied class defines a `fooBar` property, then the resulting
* array of attribute names will include the "foo-bar" attribute.
*
* @private
* @param {Constructor<HTMLElement>} classFn
* @returns {string[]}
*/
function attributesForClass(classFn) {
// We treat the HTMLElement base class as if it has no attributes, since we
// don't want to receive attributeChangedCallback for it (or anything further
// up the protoype chain).
if (classFn === HTMLElement) {
return [];
}
// Get attributes for parent class.
const baseClass = Object.getPrototypeOf(classFn.prototype).constructor;
// See if parent class defines observedAttributes manually.
let baseAttributes = baseClass.observedAttributes;
if (!baseAttributes) {
// Calculate parent class attributes ourselves.
baseAttributes = attributesForClass(baseClass);
}
// Get the properties for this particular class.
const propertyNames = Object.getOwnPropertyNames(classFn.prototype);
const setterNames = propertyNames.filter((propertyName) => {
const descriptor = Object.getOwnPropertyDescriptor(
classFn.prototype,
propertyName
);
return descriptor && typeof descriptor.set === "function";
});
// Map the property names to attribute names.
const attributes = setterNames.map((setterName) =>
propertyNameToAttribute(setterName)
);
// Merge the attribute for this class and its base class.
const diff = attributes.filter(
(attribute) => baseAttributes.indexOf(attribute) < 0
);
const result = baseAttributes.concat(diff);
return result;
}
/**
* Convert hyphenated foo-bar attribute name to camel case fooBar property name.
*
* @private
* @param {string} attributeName
*/
function attributeToPropertyName(attributeName) {
let propertyName = attributeToPropertyNames[attributeName];
if (!propertyName) {
// Convert and memoize.
const hyphenRegEx = /-([a-z])/g;
propertyName = attributeName.replace(hyphenRegEx, (match) =>
match[1].toUpperCase()
);
attributeToPropertyNames[attributeName] = propertyName;
}
return propertyName;
}
/**
* Convert a camel case fooBar property name to a hyphenated foo-bar attribute.
*
* @private
* @param {string} propertyName
*/
function | (propertyName) {
let attribute = propertyNamesToAttributes[propertyName];
if (!attribute) {
// Convert and memoize.
const uppercaseRegEx = /([A-Z])/g;
attribute = propertyName.replace(uppercaseRegEx, "-$1").toLowerCase();
propertyNamesToAttributes[propertyName] = attribute;
}
return attribute;
}
| propertyNameToAttribute | identifier_name |
AttributeMarshallingMixin.js | import { booleanAttributeValue, standardBooleanAttributes } from "./dom.js";
import { rendering } from "./internal.js";
// Memoized maps of attribute to property names and vice versa.
// We initialize this with the special case of the tabindex (lowercase "i")
// attribute, which is mapped to the tabIndex (capital "I") property.
/** @type {IndexedObject<string>} */
const attributeToPropertyNames = {
tabindex: "tabIndex",
};
/** @type {IndexedObject<string>} */
const propertyNamesToAttributes = {
tabIndex: "tabindex",
};
/**
* Sets properties when the corresponding attributes change
*
* If your component exposes a setter for a property, it's generally a good
* idea to let devs using your component be able to set that property in HTML
* via an element attribute. You can code that yourself by writing an
* `attributeChangedCallback`, or you can use this mixin to get a degree of
* automatic support.
*
* This mixin implements an `attributeChangedCallback` that will attempt to
* convert a change in an element attribute into a call to the corresponding
* property setter. Attributes typically follow hyphenated names ("foo-bar"),
* whereas properties typically use camelCase names ("fooBar"). This mixin
* respects that convention, automatically mapping the hyphenated attribute
* name to the corresponding camelCase property name.
*
* Example: You define a component using this mixin:
*
* class MyElement extends AttributeMarshallingMixin(HTMLElement) {
* get fooBar() { return this._fooBar; }
* set fooBar(value) { this._fooBar = value; }
* }
*
* If someone then instantiates your component in HTML:
*
* <my-element foo-bar="Hello"></my-element>
*
* Then, after the element has been upgraded, the `fooBar` setter will
* automatically be invoked with the initial value "Hello".
*
* Attributes can only have string values. If you'd like to convert string
* attributes to other types (numbers, booleans), you must implement parsing
* yourself.
*
* @module AttributeMarshallingMixin
* @param {Constructor<CustomElement>} Base
*/
export default function AttributeMarshallingMixin(Base) {
// The class prototype added by the mixin.
class AttributeMarshalling extends Base {
/**
* Handle a change to the attribute with the given name.
*
* @ignore
* @param {string} attributeName
* @param {string} oldValue
* @param {string} newValue
*/
attributeChangedCallback(attributeName, oldValue, newValue) | }
}
}
// Because maintaining the mapping of attributes to properties is tedious,
// this provides a default implementation for `observedAttributes` that
// assumes that your component will want to expose all public properties in
// your component's API as properties.
//
// You can override this default implementation of `observedAttributes`. For
// example, if you have a system that can statically analyze which
// properties are available to your component, you could hand-author or
// programmatically generate a definition for `observedAttributes` that
// avoids the minor run-time performance cost of inspecting the component
// prototype to determine your component's public properties.
static get observedAttributes() {
return attributesForClass(this);
}
}
return AttributeMarshalling;
}
/**
* Return the custom attributes for the given class.
*
* E.g., if the supplied class defines a `fooBar` property, then the resulting
* array of attribute names will include the "foo-bar" attribute.
*
* @private
* @param {Constructor<HTMLElement>} classFn
* @returns {string[]}
*/
function attributesForClass(classFn) {
// We treat the HTMLElement base class as if it has no attributes, since we
// don't want to receive attributeChangedCallback for it (or anything further
// up the protoype chain).
if (classFn === HTMLElement) {
return [];
}
// Get attributes for parent class.
const baseClass = Object.getPrototypeOf(classFn.prototype).constructor;
// See if parent class defines observedAttributes manually.
let baseAttributes = baseClass.observedAttributes;
if (!baseAttributes) {
// Calculate parent class attributes ourselves.
baseAttributes = attributesForClass(baseClass);
}
// Get the properties for this particular class.
const propertyNames = Object.getOwnPropertyNames(classFn.prototype);
const setterNames = propertyNames.filter((propertyName) => {
const descriptor = Object.getOwnPropertyDescriptor(
classFn.prototype,
propertyName
);
return descriptor && typeof descriptor.set === "function";
});
// Map the property names to attribute names.
const attributes = setterNames.map((setterName) =>
propertyNameToAttribute(setterName)
);
// Merge the attribute for this class and its base class.
const diff = attributes.filter(
(attribute) => baseAttributes.indexOf(attribute) < 0
);
const result = baseAttributes.concat(diff);
return result;
}
/**
* Convert hyphenated foo-bar attribute name to camel case fooBar property name.
*
* @private
* @param {string} attributeName
*/
function attributeToPropertyName(attributeName) {
let propertyName = attributeToPropertyNames[attributeName];
if (!propertyName) {
// Convert and memoize.
const hyphenRegEx = /-([a-z])/g;
propertyName = attributeName.replace(hyphenRegEx, (match) =>
match[1].toUpperCase()
);
attributeToPropertyNames[attributeName] = propertyName;
}
return propertyName;
}
/**
* Convert a camel case fooBar property name to a hyphenated foo-bar attribute.
*
* @private
* @param {string} propertyName
*/
function propertyNameToAttribute(propertyName) {
let attribute = propertyNamesToAttributes[propertyName];
if (!attribute) {
// Convert and memoize.
const uppercaseRegEx = /([A-Z])/g;
attribute = propertyName.replace(uppercaseRegEx, "-$1").toLowerCase();
propertyNamesToAttributes[propertyName] = attribute;
}
return attribute;
}
| {
if (super.attributeChangedCallback) {
super.attributeChangedCallback(attributeName, oldValue, newValue);
}
// Sometimes this callback is invoked when there's not actually any
// change, in which we skip invoking the property setter.
//
// We also skip setting properties if we're rendering. A component may
// want to reflect property values to attributes during rendering, but
// such attribute changes shouldn't trigger property updates.
if (newValue !== oldValue && !this[rendering]) {
const propertyName = attributeToPropertyName(attributeName);
// If the attribute name corresponds to a property name, set the property.
if (propertyName in this) {
// Parse standard boolean attributes.
const parsed = standardBooleanAttributes[attributeName]
? booleanAttributeValue(attributeName, newValue)
: newValue;
this[propertyName] = parsed; | identifier_body |
AttributeMarshallingMixin.js | import { booleanAttributeValue, standardBooleanAttributes } from "./dom.js";
import { rendering } from "./internal.js";
// Memoized maps of attribute to property names and vice versa.
// We initialize this with the special case of the tabindex (lowercase "i")
// attribute, which is mapped to the tabIndex (capital "I") property.
/** @type {IndexedObject<string>} */
const attributeToPropertyNames = {
tabindex: "tabIndex",
};
/** @type {IndexedObject<string>} */
const propertyNamesToAttributes = {
tabIndex: "tabindex",
};
/**
* Sets properties when the corresponding attributes change
*
* If your component exposes a setter for a property, it's generally a good
* idea to let devs using your component be able to set that property in HTML
* via an element attribute. You can code that yourself by writing an
* `attributeChangedCallback`, or you can use this mixin to get a degree of
* automatic support.
*
* This mixin implements an `attributeChangedCallback` that will attempt to
* convert a change in an element attribute into a call to the corresponding
* property setter. Attributes typically follow hyphenated names ("foo-bar"),
* whereas properties typically use camelCase names ("fooBar"). This mixin
* respects that convention, automatically mapping the hyphenated attribute
* name to the corresponding camelCase property name.
*
* Example: You define a component using this mixin:
*
* class MyElement extends AttributeMarshallingMixin(HTMLElement) {
* get fooBar() { return this._fooBar; }
* set fooBar(value) { this._fooBar = value; }
* }
*
* If someone then instantiates your component in HTML:
*
* <my-element foo-bar="Hello"></my-element>
*
* Then, after the element has been upgraded, the `fooBar` setter will
* automatically be invoked with the initial value "Hello".
*
* Attributes can only have string values. If you'd like to convert string
* attributes to other types (numbers, booleans), you must implement parsing
* yourself.
*
* @module AttributeMarshallingMixin
* @param {Constructor<CustomElement>} Base
*/
export default function AttributeMarshallingMixin(Base) {
// The class prototype added by the mixin.
class AttributeMarshalling extends Base {
/**
* Handle a change to the attribute with the given name.
*
* @ignore
* @param {string} attributeName
* @param {string} oldValue
* @param {string} newValue
*/
attributeChangedCallback(attributeName, oldValue, newValue) {
if (super.attributeChangedCallback) {
super.attributeChangedCallback(attributeName, oldValue, newValue);
}
// Sometimes this callback is invoked when there's not actually any
// change, in which we skip invoking the property setter.
//
// We also skip setting properties if we're rendering. A component may
// want to reflect property values to attributes during rendering, but
// such attribute changes shouldn't trigger property updates.
if (newValue !== oldValue && !this[rendering]) |
}
// Because maintaining the mapping of attributes to properties is tedious,
// this provides a default implementation for `observedAttributes` that
// assumes that your component will want to expose all public properties in
// your component's API as properties.
//
// You can override this default implementation of `observedAttributes`. For
// example, if you have a system that can statically analyze which
// properties are available to your component, you could hand-author or
// programmatically generate a definition for `observedAttributes` that
// avoids the minor run-time performance cost of inspecting the component
// prototype to determine your component's public properties.
static get observedAttributes() {
return attributesForClass(this);
}
}
return AttributeMarshalling;
}
/**
* Return the custom attributes for the given class.
*
* E.g., if the supplied class defines a `fooBar` property, then the resulting
* array of attribute names will include the "foo-bar" attribute.
*
* @private
* @param {Constructor<HTMLElement>} classFn
* @returns {string[]}
*/
function attributesForClass(classFn) {
// We treat the HTMLElement base class as if it has no attributes, since we
// don't want to receive attributeChangedCallback for it (or anything further
// up the protoype chain).
if (classFn === HTMLElement) {
return [];
}
// Get attributes for parent class.
const baseClass = Object.getPrototypeOf(classFn.prototype).constructor;
// See if parent class defines observedAttributes manually.
let baseAttributes = baseClass.observedAttributes;
if (!baseAttributes) {
// Calculate parent class attributes ourselves.
baseAttributes = attributesForClass(baseClass);
}
// Get the properties for this particular class.
const propertyNames = Object.getOwnPropertyNames(classFn.prototype);
const setterNames = propertyNames.filter((propertyName) => {
const descriptor = Object.getOwnPropertyDescriptor(
classFn.prototype,
propertyName
);
return descriptor && typeof descriptor.set === "function";
});
// Map the property names to attribute names.
const attributes = setterNames.map((setterName) =>
propertyNameToAttribute(setterName)
);
// Merge the attribute for this class and its base class.
const diff = attributes.filter(
(attribute) => baseAttributes.indexOf(attribute) < 0
);
const result = baseAttributes.concat(diff);
return result;
}
/**
* Convert hyphenated foo-bar attribute name to camel case fooBar property name.
*
* @private
* @param {string} attributeName
*/
function attributeToPropertyName(attributeName) {
let propertyName = attributeToPropertyNames[attributeName];
if (!propertyName) {
// Convert and memoize.
const hyphenRegEx = /-([a-z])/g;
propertyName = attributeName.replace(hyphenRegEx, (match) =>
match[1].toUpperCase()
);
attributeToPropertyNames[attributeName] = propertyName;
}
return propertyName;
}
/**
* Convert a camel case fooBar property name to a hyphenated foo-bar attribute.
*
* @private
* @param {string} propertyName
*/
function propertyNameToAttribute(propertyName) {
let attribute = propertyNamesToAttributes[propertyName];
if (!attribute) {
// Convert and memoize.
const uppercaseRegEx = /([A-Z])/g;
attribute = propertyName.replace(uppercaseRegEx, "-$1").toLowerCase();
propertyNamesToAttributes[propertyName] = attribute;
}
return attribute;
}
| {
const propertyName = attributeToPropertyName(attributeName);
// If the attribute name corresponds to a property name, set the property.
if (propertyName in this) {
// Parse standard boolean attributes.
const parsed = standardBooleanAttributes[attributeName]
? booleanAttributeValue(attributeName, newValue)
: newValue;
this[propertyName] = parsed;
}
} | conditional_block |
AttributeMarshallingMixin.js | import { booleanAttributeValue, standardBooleanAttributes } from "./dom.js";
import { rendering } from "./internal.js";
// Memoized maps of attribute to property names and vice versa.
// We initialize this with the special case of the tabindex (lowercase "i")
// attribute, which is mapped to the tabIndex (capital "I") property.
/** @type {IndexedObject<string>} */
const attributeToPropertyNames = {
tabindex: "tabIndex",
};
/** @type {IndexedObject<string>} */
const propertyNamesToAttributes = {
tabIndex: "tabindex",
};
/**
* Sets properties when the corresponding attributes change
*
* If your component exposes a setter for a property, it's generally a good
* idea to let devs using your component be able to set that property in HTML
* via an element attribute. You can code that yourself by writing an
* `attributeChangedCallback`, or you can use this mixin to get a degree of
* automatic support.
*
* This mixin implements an `attributeChangedCallback` that will attempt to
* convert a change in an element attribute into a call to the corresponding
* property setter. Attributes typically follow hyphenated names ("foo-bar"),
* whereas properties typically use camelCase names ("fooBar"). This mixin
* respects that convention, automatically mapping the hyphenated attribute
* name to the corresponding camelCase property name.
*
* Example: You define a component using this mixin:
*
* class MyElement extends AttributeMarshallingMixin(HTMLElement) {
* get fooBar() { return this._fooBar; }
* set fooBar(value) { this._fooBar = value; }
* }
*
* If someone then instantiates your component in HTML:
*
* <my-element foo-bar="Hello"></my-element>
*
* Then, after the element has been upgraded, the `fooBar` setter will
* automatically be invoked with the initial value "Hello".
*
* Attributes can only have string values. If you'd like to convert string
* attributes to other types (numbers, booleans), you must implement parsing
* yourself.
*
* @module AttributeMarshallingMixin
* @param {Constructor<CustomElement>} Base
*/
export default function AttributeMarshallingMixin(Base) {
// The class prototype added by the mixin.
class AttributeMarshalling extends Base {
/**
* Handle a change to the attribute with the given name.
*
* @ignore
* @param {string} attributeName
* @param {string} oldValue
* @param {string} newValue
*/
attributeChangedCallback(attributeName, oldValue, newValue) {
if (super.attributeChangedCallback) {
super.attributeChangedCallback(attributeName, oldValue, newValue);
}
// Sometimes this callback is invoked when there's not actually any
// change, in which we skip invoking the property setter.
//
// We also skip setting properties if we're rendering. A component may
// want to reflect property values to attributes during rendering, but
// such attribute changes shouldn't trigger property updates.
if (newValue !== oldValue && !this[rendering]) {
const propertyName = attributeToPropertyName(attributeName);
// If the attribute name corresponds to a property name, set the property.
if (propertyName in this) {
// Parse standard boolean attributes.
const parsed = standardBooleanAttributes[attributeName]
? booleanAttributeValue(attributeName, newValue)
: newValue;
this[propertyName] = parsed;
}
}
}
// Because maintaining the mapping of attributes to properties is tedious,
// this provides a default implementation for `observedAttributes` that
// assumes that your component will want to expose all public properties in
// your component's API as properties.
//
// You can override this default implementation of `observedAttributes`. For
// example, if you have a system that can statically analyze which
// properties are available to your component, you could hand-author or
// programmatically generate a definition for `observedAttributes` that
// avoids the minor run-time performance cost of inspecting the component
// prototype to determine your component's public properties.
static get observedAttributes() {
return attributesForClass(this);
}
}
return AttributeMarshalling;
}
/**
* Return the custom attributes for the given class.
*
* E.g., if the supplied class defines a `fooBar` property, then the resulting
* array of attribute names will include the "foo-bar" attribute.
*
* @private
* @param {Constructor<HTMLElement>} classFn
* @returns {string[]}
*/
function attributesForClass(classFn) {
// We treat the HTMLElement base class as if it has no attributes, since we
// don't want to receive attributeChangedCallback for it (or anything further
// up the protoype chain).
if (classFn === HTMLElement) {
return [];
}
// Get attributes for parent class.
const baseClass = Object.getPrototypeOf(classFn.prototype).constructor;
// See if parent class defines observedAttributes manually.
let baseAttributes = baseClass.observedAttributes;
if (!baseAttributes) {
// Calculate parent class attributes ourselves.
baseAttributes = attributesForClass(baseClass);
}
// Get the properties for this particular class.
const propertyNames = Object.getOwnPropertyNames(classFn.prototype);
const setterNames = propertyNames.filter((propertyName) => {
const descriptor = Object.getOwnPropertyDescriptor(
classFn.prototype,
propertyName
);
return descriptor && typeof descriptor.set === "function";
});
// Map the property names to attribute names.
const attributes = setterNames.map((setterName) =>
propertyNameToAttribute(setterName)
);
// Merge the attribute for this class and its base class.
const diff = attributes.filter(
(attribute) => baseAttributes.indexOf(attribute) < 0
);
const result = baseAttributes.concat(diff);
return result;
}
/**
* Convert hyphenated foo-bar attribute name to camel case fooBar property name.
*
* @private
* @param {string} attributeName
*/
function attributeToPropertyName(attributeName) {
let propertyName = attributeToPropertyNames[attributeName];
if (!propertyName) {
// Convert and memoize.
const hyphenRegEx = /-([a-z])/g;
propertyName = attributeName.replace(hyphenRegEx, (match) =>
match[1].toUpperCase()
);
attributeToPropertyNames[attributeName] = propertyName;
}
return propertyName;
}
/**
* Convert a camel case fooBar property name to a hyphenated foo-bar attribute.
*
* @private
* @param {string} propertyName
*/
function propertyNameToAttribute(propertyName) {
let attribute = propertyNamesToAttributes[propertyName];
if (!attribute) {
// Convert and memoize.
const uppercaseRegEx = /([A-Z])/g; | } | attribute = propertyName.replace(uppercaseRegEx, "-$1").toLowerCase();
propertyNamesToAttributes[propertyName] = attribute;
}
return attribute; | random_line_split |
netio.js | var etc=require("./etc.js"),
msgtype=require("./msgtype.js");
function constructMessage(type,args){
var len=6+args.map(function(a){return 4+a.length;}).reduce(function(a,b){return a+b;},0);
var buf=new Buffer(len);
//console.log("constructing message with len",len)
buf.writeUInt32BE(len,0);
buf.writeUInt8(type,4);
buf.writeUInt8(args.length,5);
var cursor=6;
for(var i=0;i<args.length;i++){
if(!(args[i] instanceof Buffer))args[i]=new Buffer(""+args[i]);
buf.writeUInt32BE(args[i].length,cursor);
cursor+=4;
args[i].copy(buf,cursor);
cursor+=args[i].length;
}
//console.log("constructing message with len",len,"result:",buf);
return buf;
}
function | (buf){
var buflen=buf.length;
if(buflen<4)return false;
var len=buf.readUInt32BE(0);
if(buflen<len)return false;
console.log(buf.slice(0,len));
var type=buf.readUInt8(4);
var numargs=buf.readUInt8(5);
var cursor=6;
var args=new Array(numargs),arglen;
for(var i=0;i<numargs;i++){
//console.log("pM: i="+i);
if(cursor+4>len)return {type:null,args:null,len:len};
arglen=buf.readUInt32BE(cursor);
cursor+=4;
//console.log("pM: cursor="+cursor);
if(cursor+arglen>len)return {type:null,args:null,len:len};
args[i]=new Buffer(arglen);
buf.copy(args[i],0,cursor,cursor+arglen);
cursor+=arglen;
}
return {type:type,args:args,len:len};
}
function makeBufferedProtocolHandler(onmessage,obj){
var buffer=new Buffer(0);
return function(data){
if(typeof data=="string")data=new Buffer(data);
//console.log("received",data);
//first append new data to buffer
var tmp=new Buffer(buffer.length+data.length);
if(buffer.length)buffer.copy(tmp);
data.copy(tmp,buffer.length);
buffer=tmp;
//console.log("buffer+data",buffer);
//then while there's a message in there
do {
//try to parse it
var messageBuffer=new Buffer(buffer.length);
buffer.copy(messageBuffer);
var msg=parseMessage(messageBuffer);
if(msg==false)return; //more data needed
//console.log("messageBuffer",messageBuffer);
//console.log("msg.len",msg.len);
//replace buffer with the data that's left
if(buffer.length-msg.len>0){
tmp=new Buffer(buffer.length-msg.len);
buffer.copy(tmp,0,msg.len);
buffer=tmp;
} else {
buffer=new Buffer(0);
}
//console.log("buffer",buffer);
//now all administration is done, we've got ourselves a message
if(msg.type==null)throw new Error("Invalid message received!");
onmessage(msg,obj,messageBuffer);
} while(buffer.length);
};
}
module.exports.constructMessage=constructMessage;
module.exports.parseMessage=parseMessage;
module.exports.makeBufferedProtocolHandler=makeBufferedProtocolHandler;
| parseMessage | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.