file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
netio.js | var etc=require("./etc.js"),
msgtype=require("./msgtype.js");
function constructMessage(type,args){
var len=6+args.map(function(a){return 4+a.length;}).reduce(function(a,b){return a+b;},0);
var buf=new Buffer(len); | var cursor=6;
for(var i=0;i<args.length;i++){
if(!(args[i] instanceof Buffer))args[i]=new Buffer(""+args[i]);
buf.writeUInt32BE(args[i].length,cursor);
cursor+=4;
args[i].copy(buf,cursor);
cursor+=args[i].length;
}
//console.log("constructing message with len",len,"result:",buf);
return buf;
}
function parseMessage(buf){
var buflen=buf.length;
if(buflen<4)return false;
var len=buf.readUInt32BE(0);
if(buflen<len)return false;
console.log(buf.slice(0,len));
var type=buf.readUInt8(4);
var numargs=buf.readUInt8(5);
var cursor=6;
var args=new Array(numargs),arglen;
for(var i=0;i<numargs;i++){
//console.log("pM: i="+i);
if(cursor+4>len)return {type:null,args:null,len:len};
arglen=buf.readUInt32BE(cursor);
cursor+=4;
//console.log("pM: cursor="+cursor);
if(cursor+arglen>len)return {type:null,args:null,len:len};
args[i]=new Buffer(arglen);
buf.copy(args[i],0,cursor,cursor+arglen);
cursor+=arglen;
}
return {type:type,args:args,len:len};
}
function makeBufferedProtocolHandler(onmessage,obj){
var buffer=new Buffer(0);
return function(data){
if(typeof data=="string")data=new Buffer(data);
//console.log("received",data);
//first append new data to buffer
var tmp=new Buffer(buffer.length+data.length);
if(buffer.length)buffer.copy(tmp);
data.copy(tmp,buffer.length);
buffer=tmp;
//console.log("buffer+data",buffer);
//then while there's a message in there
do {
//try to parse it
var messageBuffer=new Buffer(buffer.length);
buffer.copy(messageBuffer);
var msg=parseMessage(messageBuffer);
if(msg==false)return; //more data needed
//console.log("messageBuffer",messageBuffer);
//console.log("msg.len",msg.len);
//replace buffer with the data that's left
if(buffer.length-msg.len>0){
tmp=new Buffer(buffer.length-msg.len);
buffer.copy(tmp,0,msg.len);
buffer=tmp;
} else {
buffer=new Buffer(0);
}
//console.log("buffer",buffer);
//now all administration is done, we've got ourselves a message
if(msg.type==null)throw new Error("Invalid message received!");
onmessage(msg,obj,messageBuffer);
} while(buffer.length);
};
}
module.exports.constructMessage=constructMessage;
module.exports.parseMessage=parseMessage;
module.exports.makeBufferedProtocolHandler=makeBufferedProtocolHandler; | //console.log("constructing message with len",len)
buf.writeUInt32BE(len,0);
buf.writeUInt8(type,4);
buf.writeUInt8(args.length,5); | random_line_split |
netio.js | var etc=require("./etc.js"),
msgtype=require("./msgtype.js");
function constructMessage(type,args) |
function parseMessage(buf){
var buflen=buf.length;
if(buflen<4)return false;
var len=buf.readUInt32BE(0);
if(buflen<len)return false;
console.log(buf.slice(0,len));
var type=buf.readUInt8(4);
var numargs=buf.readUInt8(5);
var cursor=6;
var args=new Array(numargs),arglen;
for(var i=0;i<numargs;i++){
//console.log("pM: i="+i);
if(cursor+4>len)return {type:null,args:null,len:len};
arglen=buf.readUInt32BE(cursor);
cursor+=4;
//console.log("pM: cursor="+cursor);
if(cursor+arglen>len)return {type:null,args:null,len:len};
args[i]=new Buffer(arglen);
buf.copy(args[i],0,cursor,cursor+arglen);
cursor+=arglen;
}
return {type:type,args:args,len:len};
}
function makeBufferedProtocolHandler(onmessage,obj){
var buffer=new Buffer(0);
return function(data){
if(typeof data=="string")data=new Buffer(data);
//console.log("received",data);
//first append new data to buffer
var tmp=new Buffer(buffer.length+data.length);
if(buffer.length)buffer.copy(tmp);
data.copy(tmp,buffer.length);
buffer=tmp;
//console.log("buffer+data",buffer);
//then while there's a message in there
do {
//try to parse it
var messageBuffer=new Buffer(buffer.length);
buffer.copy(messageBuffer);
var msg=parseMessage(messageBuffer);
if(msg==false)return; //more data needed
//console.log("messageBuffer",messageBuffer);
//console.log("msg.len",msg.len);
//replace buffer with the data that's left
if(buffer.length-msg.len>0){
tmp=new Buffer(buffer.length-msg.len);
buffer.copy(tmp,0,msg.len);
buffer=tmp;
} else {
buffer=new Buffer(0);
}
//console.log("buffer",buffer);
//now all administration is done, we've got ourselves a message
if(msg.type==null)throw new Error("Invalid message received!");
onmessage(msg,obj,messageBuffer);
} while(buffer.length);
};
}
module.exports.constructMessage=constructMessage;
module.exports.parseMessage=parseMessage;
module.exports.makeBufferedProtocolHandler=makeBufferedProtocolHandler;
| {
var len=6+args.map(function(a){return 4+a.length;}).reduce(function(a,b){return a+b;},0);
var buf=new Buffer(len);
//console.log("constructing message with len",len)
buf.writeUInt32BE(len,0);
buf.writeUInt8(type,4);
buf.writeUInt8(args.length,5);
var cursor=6;
for(var i=0;i<args.length;i++){
if(!(args[i] instanceof Buffer))args[i]=new Buffer(""+args[i]);
buf.writeUInt32BE(args[i].length,cursor);
cursor+=4;
args[i].copy(buf,cursor);
cursor+=args[i].length;
}
//console.log("constructing message with len",len,"result:",buf);
return buf;
} | identifier_body |
create-connection-pool.mock.ts | export const request = {
"headers": {
"Content-Type": "application/json",
},
"body": {
"name": "backend-pool",
"mode": "transaction",
"size": 10,
"db": "defaultdb",
"user": "doadmin"
},
};
export const response = {
"body": {
"pool": {
"user": "doadmin",
"name": "backend-pool",
"size": 10,
"db": "defaultdb",
"mode": "transaction",
"connection": {
"uri": "postgres://doadmin:wv78n3zpz42xezdk@backend-do-user-19081923-0.db.ondigitalocean.com:25061/backend-pool?sslmode=require",
"database": "backend-pool",
"host": "backend-do-user-19081923-0.db.ondigitalocean.com",
"port": 25061,
"user": "doadmin",
"password": "wv78n3zpz42xezdk",
"ssl": true
}
}
}, | "status": 200,
"ratelimit-limit": 1200,
"ratelimit-remaining": 1137,
"ratelimit-reset": 1415984218
},
}; | "headers": {
"content-type": "application/json; charset=utf-8", | random_line_split |
NOEtools.py | # Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def | (peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
| predictNOE | identifier_name |
NOEtools.py | # Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions | Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n | originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str | random_line_split |
NOEtools.py | # Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
|
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
| """Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak. | identifier_body |
NOEtools.py | # Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
|
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
| aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol] | conditional_block |
upload-service.ts | import { Injectable } from '@angular/core';
import { Http, Response } from '@angular/http';
import 'rxjs/add/observable/fromPromise';
import 'rxjs/add/operator/map';
import { Observable } from 'rxjs/Observable';
import { EmptyObservable } from 'rxjs/observable/EmptyObservable';
import { Entry } from '../models/entry'; | import { ImsHeaders } from '../models/ims-headers';
import { Token } from '../models/token';
import { AuthService } from './auth-service';
import { ContainerUploadService } from './container-upload-service';
import { ImsService } from './ims-service';
import { TokenService } from './token-service';
@Injectable()
export class UploadService {
constructor(public http: Http, public tokenService: TokenService, public imsService: ImsService, public containerUploadService: ContainerUploadService, public authService: AuthService) { }
public uploadImages(filterId: number, imageEntry: Entry, images: Image[]): Observable<Response> {
let observables: Observable<Response> = new EmptyObservable();
for (const image of images) {
observables = Observable.concat(observables, this.uploadImage(this.authService.filterId, imageEntry, image));
}
return observables;
}
public uploadImage(filterId: number, imageEntry: Entry, image: Image): Observable<Response> {
return this.tokenService.getToken().flatMap(token =>
this.createContainerLocation(filterId, token).flatMap(adress =>
this.containerUploadService.postToContainer(adress, token, image).flatMap(response =>
this.createImageEntry(adress, token, imageEntry))));
}
public createContainerLocation(filterId: number, token: Token): Observable<string> {
return this.imsService.getUploadsLink(filterId, token).flatMap(url =>
this.http.post(url, undefined, { headers: new ImsHeaders(this.authService.currentCredential, token) }).map(response => response.headers.get('location')));
}
public createImageEntry(url: string, token: Token, imageEntry: Entry): Observable<Response> {
return this.http.post(url, imageEntry.json(), { headers: new ImsHeaders(this.authService.currentCredential, token) });
}
} | import { Image } from '../models/image'; | random_line_split |
upload-service.ts | import { Injectable } from '@angular/core';
import { Http, Response } from '@angular/http';
import 'rxjs/add/observable/fromPromise';
import 'rxjs/add/operator/map';
import { Observable } from 'rxjs/Observable';
import { EmptyObservable } from 'rxjs/observable/EmptyObservable';
import { Entry } from '../models/entry';
import { Image } from '../models/image';
import { ImsHeaders } from '../models/ims-headers';
import { Token } from '../models/token';
import { AuthService } from './auth-service';
import { ContainerUploadService } from './container-upload-service';
import { ImsService } from './ims-service';
import { TokenService } from './token-service';
@Injectable()
export class UploadService {
constructor(public http: Http, public tokenService: TokenService, public imsService: ImsService, public containerUploadService: ContainerUploadService, public authService: AuthService) { }
public uploadImages(filterId: number, imageEntry: Entry, images: Image[]): Observable<Response> {
let observables: Observable<Response> = new EmptyObservable();
for (const image of images) {
observables = Observable.concat(observables, this.uploadImage(this.authService.filterId, imageEntry, image));
}
return observables;
}
public uploadImage(filterId: number, imageEntry: Entry, image: Image): Observable<Response> {
return this.tokenService.getToken().flatMap(token =>
this.createContainerLocation(filterId, token).flatMap(adress =>
this.containerUploadService.postToContainer(adress, token, image).flatMap(response =>
this.createImageEntry(adress, token, imageEntry))));
}
public createContainerLocation(filterId: number, token: Token): Observable<string> {
return this.imsService.getUploadsLink(filterId, token).flatMap(url =>
this.http.post(url, undefined, { headers: new ImsHeaders(this.authService.currentCredential, token) }).map(response => response.headers.get('location')));
}
public | (url: string, token: Token, imageEntry: Entry): Observable<Response> {
return this.http.post(url, imageEntry.json(), { headers: new ImsHeaders(this.authService.currentCredential, token) });
}
}
| createImageEntry | identifier_name |
upload-service.ts | import { Injectable } from '@angular/core';
import { Http, Response } from '@angular/http';
import 'rxjs/add/observable/fromPromise';
import 'rxjs/add/operator/map';
import { Observable } from 'rxjs/Observable';
import { EmptyObservable } from 'rxjs/observable/EmptyObservable';
import { Entry } from '../models/entry';
import { Image } from '../models/image';
import { ImsHeaders } from '../models/ims-headers';
import { Token } from '../models/token';
import { AuthService } from './auth-service';
import { ContainerUploadService } from './container-upload-service';
import { ImsService } from './ims-service';
import { TokenService } from './token-service';
@Injectable()
export class UploadService {
constructor(public http: Http, public tokenService: TokenService, public imsService: ImsService, public containerUploadService: ContainerUploadService, public authService: AuthService) |
public uploadImages(filterId: number, imageEntry: Entry, images: Image[]): Observable<Response> {
let observables: Observable<Response> = new EmptyObservable();
for (const image of images) {
observables = Observable.concat(observables, this.uploadImage(this.authService.filterId, imageEntry, image));
}
return observables;
}
public uploadImage(filterId: number, imageEntry: Entry, image: Image): Observable<Response> {
return this.tokenService.getToken().flatMap(token =>
this.createContainerLocation(filterId, token).flatMap(adress =>
this.containerUploadService.postToContainer(adress, token, image).flatMap(response =>
this.createImageEntry(adress, token, imageEntry))));
}
public createContainerLocation(filterId: number, token: Token): Observable<string> {
return this.imsService.getUploadsLink(filterId, token).flatMap(url =>
this.http.post(url, undefined, { headers: new ImsHeaders(this.authService.currentCredential, token) }).map(response => response.headers.get('location')));
}
public createImageEntry(url: string, token: Token, imageEntry: Entry): Observable<Response> {
return this.http.post(url, imageEntry.json(), { headers: new ImsHeaders(this.authService.currentCredential, token) });
}
}
| { } | identifier_body |
model_support.py | from cuescience_shop.models import Client, Address, Order
from natspec_utils.decorators import TextSyntax
from cart.cart import Cart
from django.test.client import Client as TestClient
class ClientTestSupport(object):
def __init__(self, test_case):
self.test_case = test_case
self.client = TestClient()
@TextSyntax("Create address #1 #2 #3 #4", types=["str", "str", "str", "str"], return_type="Address")
def create_address(self, street, number, postcode, city):
address = Address(street=street, number=number, postcode=postcode, city=city)
address.save()
return address
@TextSyntax("Create client #1 #2", types=["str", "str", "Address"], return_type="Client") | @TextSyntax("Create order", types=["Client"], return_type="Order")
def create_order(self, client):
cart = Cart(self.client)
cart.create_cart()
cart = cart.cart
order = Order(client=client, cart=cart)
order.save()
return order
@TextSyntax("Assert client number is #1", types=["str", "Client"])
def assert_client_number(self, client_number, client):
self.test_case.assertEqual(client_number, client.client_number)
@TextSyntax("Assert order number is #1", types=["str", "Order"])
def assert_order_number(self, order_number, order):
self.test_case.assertEqual(order_number, order.order_number) | def create_client(self, first_name, last_name, address):
client = Client(first_name=first_name, last_name=last_name, shipping_address=address, billing_address=address)
client.save()
return client
| random_line_split |
model_support.py | from cuescience_shop.models import Client, Address, Order
from natspec_utils.decorators import TextSyntax
from cart.cart import Cart
from django.test.client import Client as TestClient
class ClientTestSupport(object):
def __init__(self, test_case):
self.test_case = test_case
self.client = TestClient()
@TextSyntax("Create address #1 #2 #3 #4", types=["str", "str", "str", "str"], return_type="Address")
def create_address(self, street, number, postcode, city):
address = Address(street=street, number=number, postcode=postcode, city=city)
address.save()
return address
@TextSyntax("Create client #1 #2", types=["str", "str", "Address"], return_type="Client")
def create_client(self, first_name, last_name, address):
client = Client(first_name=first_name, last_name=last_name, shipping_address=address, billing_address=address)
client.save()
return client
@TextSyntax("Create order", types=["Client"], return_type="Order")
def create_order(self, client):
cart = Cart(self.client)
cart.create_cart()
cart = cart.cart
order = Order(client=client, cart=cart)
order.save()
return order
@TextSyntax("Assert client number is #1", types=["str", "Client"])
def assert_client_number(self, client_number, client):
self.test_case.assertEqual(client_number, client.client_number)
@TextSyntax("Assert order number is #1", types=["str", "Order"])
def | (self, order_number, order):
self.test_case.assertEqual(order_number, order.order_number)
| assert_order_number | identifier_name |
model_support.py | from cuescience_shop.models import Client, Address, Order
from natspec_utils.decorators import TextSyntax
from cart.cart import Cart
from django.test.client import Client as TestClient
class ClientTestSupport(object):
def __init__(self, test_case):
self.test_case = test_case
self.client = TestClient()
@TextSyntax("Create address #1 #2 #3 #4", types=["str", "str", "str", "str"], return_type="Address")
def create_address(self, street, number, postcode, city):
address = Address(street=street, number=number, postcode=postcode, city=city)
address.save()
return address
@TextSyntax("Create client #1 #2", types=["str", "str", "Address"], return_type="Client")
def create_client(self, first_name, last_name, address):
|
@TextSyntax("Create order", types=["Client"], return_type="Order")
def create_order(self, client):
cart = Cart(self.client)
cart.create_cart()
cart = cart.cart
order = Order(client=client, cart=cart)
order.save()
return order
@TextSyntax("Assert client number is #1", types=["str", "Client"])
def assert_client_number(self, client_number, client):
self.test_case.assertEqual(client_number, client.client_number)
@TextSyntax("Assert order number is #1", types=["str", "Order"])
def assert_order_number(self, order_number, order):
self.test_case.assertEqual(order_number, order.order_number)
| client = Client(first_name=first_name, last_name=last_name, shipping_address=address, billing_address=address)
client.save()
return client | identifier_body |
Animator.js | specify the values at 0% and 100%, the start and ending values. There is also a {@link #keyframe}
* event that fires after each key frame is reached.
*
* ## Example
*
* In the example below, we modify the values of the element at each fifth throughout the animation.
*
* @example
* Ext.create('Ext.fx.Animator', {
* target: Ext.getBody().createChild({
* style: {
* width: '100px',
* height: '100px',
* 'background-color': 'red'
* }
* }),
* duration: 10000, // 10 seconds
* keyframes: {
* 0: {
* opacity: 1,
* backgroundColor: 'FF0000'
* },
* 20: {
* x: 30,
* opacity: 0.5
* },
* 40: {
* x: 130,
* backgroundColor: '0000FF'
* },
* 60: {
* y: 80,
* opacity: 0.3
* },
* 80: {
* width: 200,
* y: 200
* },
* 100: {
* opacity: 1,
* backgroundColor: '00FF00'
* }
* }
* });
*/
Ext.define('Ext.fx.Animator', {
/* Begin Definitions */
mixins: {
observable: 'Ext.util.Observable'
},
requires: ['Ext.fx.Manager'],
/* End Definitions */
/**
* @property {Boolean} isAnimator
* `true` in this class to identify an object as an instantiated Animator, or subclass thereof.
*/
isAnimator: true,
/**
* @cfg {Number} duration
* Time in milliseconds for the animation to last. Defaults to 250.
*/
duration: 250,
/**
* @cfg {Number} delay
* Time to delay before starting the animation. Defaults to 0.
*/
delay: 0,
/* private used to track a delayed starting time */
delayStart: 0,
/**
* @cfg {Boolean} dynamic
* Currently only for Component Animation: Only set a component's outer element size bypassing layouts. Set to true to do full layouts for every frame of the animation. Defaults to false.
*/
dynamic: false,
/**
* @cfg {String} easing
*
* This describes how the intermediate values used during a transition will be calculated. It allows for a transition to change
* speed over its duration.
*
* - backIn
* - backOut
* - bounceIn
* - bounceOut
* - ease
* - easeIn
* - easeOut
* - easeInOut
* - elasticIn
* - elasticOut
* - cubic-bezier(x1, y1, x2, y2)
*
* Note that cubic-bezier will create a custom easing curve following the CSS3 [transition-timing-function][0]
* specification. The four values specify points P1 and P2 of the curve as (x1, y1, x2, y2). All values must
* be in the range [0, 1] or the definition is invalid.
*
* [0]: http://www.w3.org/TR/css3-transitions/#transition-timing-function_tag
*/
easing: 'ease',
/**
* Flag to determine if the animation has started
* @property running
* @type Boolean
*/
running: false,
/**
* Flag to determine if the animation is paused. Only set this to true if you need to
* keep the Anim instance around to be unpaused later; otherwise call {@link #end}.
* @property paused
* @type Boolean
*/
paused: false,
/**
* @private
*/
damper: 1,
/**
* @cfg {Number} iterations
* Number of times to execute the animation. Defaults to 1.
*/
iterations: 1,
/**
* Current iteration the animation is running.
* @property currentIteration
* @type Number
*/
currentIteration: 0,
/**
* Current keyframe step of the animation.
* @property keyframeStep
* @type Number
*/
keyframeStep: 0,
/**
* @private
*/
animKeyFramesRE: /^(from|to|\d+%?)$/,
/**
* @cfg {Ext.fx.target.Target} target
* The Ext.fx.target to apply the animation to. If not specified during initialization, this can be passed to the applyAnimator
* method to apply the same animation to many targets.
*/
/**
* @cfg {Object} keyframes
* Animation keyframes follow the CSS3 Animation configuration pattern. 'from' is always considered '0%' and 'to' | * is considered '100%'.<b>Every keyframe declaration must have a keyframe rule for 0% and 100%, possibly defined using
* "from" or "to"</b>. A keyframe declaration without these keyframe selectors is invalid and will not be available for
* animation. The keyframe declaration for a keyframe rule consists of properties and values. Properties that are unable to
* be animated are ignored in these rules, with the exception of 'easing' which can be changed at each keyframe. For example:
<pre><code>
keyframes : {
'0%': {
left: 100
},
'40%': {
left: 150
},
'60%': {
left: 75
},
'100%': {
left: 100
}
}
</code></pre>
*/
constructor: function(config) {
var me = this;
config = Ext.apply(me, config || {});
me.config = config;
me.id = Ext.id(null, 'ext-animator-');
me.addEvents(
/**
* @event beforeanimate
* Fires before the animation starts. A handler can return false to cancel the animation.
* @param {Ext.fx.Animator} this
*/
'beforeanimate',
/**
* @event keyframe
* Fires at each keyframe.
* @param {Ext.fx.Animator} this
* @param {Number} keyframe step number
*/
'keyframe',
/**
* @event afteranimate
* Fires when the animation is complete.
* @param {Ext.fx.Animator} this
* @param {Date} startTime
*/
'afteranimate'
);
me.mixins.observable.constructor.call(me, config);
me.timeline = [];
me.createTimeline(me.keyframes);
if (me.target) {
me.applyAnimator(me.target);
Ext.fx.Manager.addAnim(me);
}
},
/**
* @private
*/
sorter: function (a, b) {
return a.pct - b.pct;
},
/**
* @private
* Takes the given keyframe configuration object and converts it into an ordered array with the passed attributes per keyframe
* or applying the 'to' configuration to all keyframes. Also calculates the proper animation duration per keyframe.
*/
createTimeline: function(keyframes) {
var me = this,
attrs = [],
to = me.to || {},
duration = me.duration,
prevMs, ms, i, ln, pct, attr;
for (pct in keyframes) {
if (keyframes.hasOwnProperty(pct) && me.animKeyFramesRE.test(pct)) {
attr = {attrs: Ext.apply(keyframes[pct], to)};
// CSS3 spec allow for from/to to be specified.
if (pct == "from") {
pct = 0;
}
else if (pct == "to") {
pct = 100;
}
// convert % values into integers
attr.pct = parseInt(pct, 10);
attrs.push(attr);
}
}
// Sort by pct property
Ext.Array.sort(attrs, me.sorter);
// Only an end
//if (attrs[0].pct) {
// attrs.unshift({pct: 0, attrs: element.attrs});
//}
ln = attrs.length;
for (i = 0; i < ln; i++) {
prevMs = (attrs[i - 1]) ? duration * (attrs[i - 1].pct / 100) : 0;
ms = duration * (attrs[i].pct / 100);
me.timeline.push({
duration: ms - prevMs,
attrs: attrs[i].attrs
});
}
},
/**
* Applies animation to the Ext.fx.target
* @private
* @param target
* @type String/Object
*/
applyAnimator: function(target) {
var me = this | random_line_split |
|
Animator.js | an instantiated Animator, or subclass thereof.
*/
isAnimator: true,
/**
* @cfg {Number} duration
* Time in milliseconds for the animation to last. Defaults to 250.
*/
duration: 250,
/**
* @cfg {Number} delay
* Time to delay before starting the animation. Defaults to 0.
*/
delay: 0,
/* private used to track a delayed starting time */
delayStart: 0,
/**
* @cfg {Boolean} dynamic
* Currently only for Component Animation: Only set a component's outer element size bypassing layouts. Set to true to do full layouts for every frame of the animation. Defaults to false.
*/
dynamic: false,
/**
* @cfg {String} easing
*
* This describes how the intermediate values used during a transition will be calculated. It allows for a transition to change
* speed over its duration.
*
* - backIn
* - backOut
* - bounceIn
* - bounceOut
* - ease
* - easeIn
* - easeOut
* - easeInOut
* - elasticIn
* - elasticOut
* - cubic-bezier(x1, y1, x2, y2)
*
* Note that cubic-bezier will create a custom easing curve following the CSS3 [transition-timing-function][0]
* specification. The four values specify points P1 and P2 of the curve as (x1, y1, x2, y2). All values must
* be in the range [0, 1] or the definition is invalid.
*
* [0]: http://www.w3.org/TR/css3-transitions/#transition-timing-function_tag
*/
easing: 'ease',
/**
* Flag to determine if the animation has started
* @property running
* @type Boolean
*/
running: false,
/**
* Flag to determine if the animation is paused. Only set this to true if you need to
* keep the Anim instance around to be unpaused later; otherwise call {@link #end}.
* @property paused
* @type Boolean
*/
paused: false,
/**
* @private
*/
damper: 1,
/**
* @cfg {Number} iterations
* Number of times to execute the animation. Defaults to 1.
*/
iterations: 1,
/**
* Current iteration the animation is running.
* @property currentIteration
* @type Number
*/
currentIteration: 0,
/**
* Current keyframe step of the animation.
* @property keyframeStep
* @type Number
*/
keyframeStep: 0,
/**
* @private
*/
animKeyFramesRE: /^(from|to|\d+%?)$/,
/**
* @cfg {Ext.fx.target.Target} target
* The Ext.fx.target to apply the animation to. If not specified during initialization, this can be passed to the applyAnimator
* method to apply the same animation to many targets.
*/
/**
* @cfg {Object} keyframes
* Animation keyframes follow the CSS3 Animation configuration pattern. 'from' is always considered '0%' and 'to'
* is considered '100%'.<b>Every keyframe declaration must have a keyframe rule for 0% and 100%, possibly defined using
* "from" or "to"</b>. A keyframe declaration without these keyframe selectors is invalid and will not be available for
* animation. The keyframe declaration for a keyframe rule consists of properties and values. Properties that are unable to
* be animated are ignored in these rules, with the exception of 'easing' which can be changed at each keyframe. For example:
<pre><code>
keyframes : {
'0%': {
left: 100
},
'40%': {
left: 150
},
'60%': {
left: 75
},
'100%': {
left: 100
}
}
</code></pre>
*/
constructor: function(config) {
var me = this;
config = Ext.apply(me, config || {});
me.config = config;
me.id = Ext.id(null, 'ext-animator-');
me.addEvents(
/**
* @event beforeanimate
* Fires before the animation starts. A handler can return false to cancel the animation.
* @param {Ext.fx.Animator} this
*/
'beforeanimate',
/**
* @event keyframe
* Fires at each keyframe.
* @param {Ext.fx.Animator} this
* @param {Number} keyframe step number
*/
'keyframe',
/**
* @event afteranimate
* Fires when the animation is complete.
* @param {Ext.fx.Animator} this
* @param {Date} startTime
*/
'afteranimate'
);
me.mixins.observable.constructor.call(me, config);
me.timeline = [];
me.createTimeline(me.keyframes);
if (me.target) {
me.applyAnimator(me.target);
Ext.fx.Manager.addAnim(me);
}
},
/**
* @private
*/
sorter: function (a, b) {
return a.pct - b.pct;
},
/**
* @private
* Takes the given keyframe configuration object and converts it into an ordered array with the passed attributes per keyframe
* or applying the 'to' configuration to all keyframes. Also calculates the proper animation duration per keyframe.
*/
createTimeline: function(keyframes) {
var me = this,
attrs = [],
to = me.to || {},
duration = me.duration,
prevMs, ms, i, ln, pct, attr;
for (pct in keyframes) {
if (keyframes.hasOwnProperty(pct) && me.animKeyFramesRE.test(pct)) {
attr = {attrs: Ext.apply(keyframes[pct], to)};
// CSS3 spec allow for from/to to be specified.
if (pct == "from") {
pct = 0;
}
else if (pct == "to") {
pct = 100;
}
// convert % values into integers
attr.pct = parseInt(pct, 10);
attrs.push(attr);
}
}
// Sort by pct property
Ext.Array.sort(attrs, me.sorter);
// Only an end
//if (attrs[0].pct) {
// attrs.unshift({pct: 0, attrs: element.attrs});
//}
ln = attrs.length;
for (i = 0; i < ln; i++) {
prevMs = (attrs[i - 1]) ? duration * (attrs[i - 1].pct / 100) : 0;
ms = duration * (attrs[i].pct / 100);
me.timeline.push({
duration: ms - prevMs,
attrs: attrs[i].attrs
});
}
},
/**
* Applies animation to the Ext.fx.target
* @private
* @param target
* @type String/Object
*/
applyAnimator: function(target) {
var me = this,
anims = [],
timeline = me.timeline,
ln = timeline.length,
anim, easing, damper, attrs, i;
if (me.fireEvent('beforeanimate', me) !== false) {
for (i = 0; i < ln; i++) {
anim = timeline[i];
attrs = anim.attrs;
easing = attrs.easing || me.easing;
damper = attrs.damper || me.damper;
delete attrs.easing;
delete attrs.damper;
anim = new Ext.fx.Anim({
target: target,
easing: easing,
damper: damper,
duration: anim.duration,
paused: true,
to: attrs
});
anims.push(anim);
}
me.animations = anims;
me.target = anim.target;
for (i = 0; i < ln - 1; i++) {
anim = anims[i];
anim.nextAnim = anims[i + 1];
anim.on('afteranimate', function() {
this.nextAnim.paused = false;
});
anim.on('afteranimate', function() {
this.fireEvent('keyframe', this, ++this.keyframeStep);
}, me);
}
anims[ln - 1].on('afteranimate', function() {
this.lastFrame();
}, me);
}
},
/**
* @private
* Fires beforeanimate and sets the running flag.
*/
start: function(startTime) {
var me = this,
delay = me.delay,
delayStart = me.delayStart,
delayDelta;
if (delay) {
if (!delayStart) {
me.delayStart = startTime;
return;
}
else {
delayDelta = startTime - delayStart;
if (delayDelta < delay) | {
return;
} | conditional_block |
|
Two arrays.py | """
ou are given two integer arrays, A and B, each containing N integers. The size of the array is less than or equal to
1000. You are free to permute the order of the elements in the arrays.
Now here's the real question: Is there an permutation A', B' possible of A and B, such that, A'i+B'i >= K for all i,
where A'i denotes the ith element in the array A' and B'i denotes ith element in the array B'.
Input Format
The first line contains an integer, T, the number of test-cases. T test cases follow. Each test case has the following
format:
The first line contains two integers, N and K. The second line contains N space separated integers, denoting array A.
The third line describes array B in a same format.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
main solution function
:param cipher: the cipher
"""
N, K, A, B = cipher
A.sort()
B.sort(reverse=True) # dynamic typed, then cannot detect list()
for i in xrange(N):
if not A[i] + B[i] >= K:
return "NO"
return "YES"
if __name__ == "__main__":
| import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N, K = map(int, f.readline().strip().split(" "))
A = map(int, f.readline().strip().split(' '))
B = map(int, f.readline().strip().split(' '))
cipher = N, K, A, B
# solve
s = "%s\n" % (Solution().solve(cipher))
print s, | conditional_block |
|
Two arrays.py | Now here's the real question: Is there an permutation A', B' possible of A and B, such that, A'i+B'i >= K for all i,
where A'i denotes the ith element in the array A' and B'i denotes ith element in the array B'.
Input Format
The first line contains an integer, T, the number of test-cases. T test cases follow. Each test case has the following
format:
The first line contains two integers, N and K. The second line contains N space separated integers, denoting array A.
The third line describes array B in a same format.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
main solution function
:param cipher: the cipher
"""
N, K, A, B = cipher
A.sort()
B.sort(reverse=True) # dynamic typed, then cannot detect list()
for i in xrange(N):
if not A[i] + B[i] >= K:
return "NO"
return "YES"
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N, K = map(int, f.readline().strip().split(" "))
A = map(int, f.readline().strip().split(' '))
B = map(int, f.readline().strip().split(' '))
cipher = N, K, A, B
# solve
s = "%s\n" % (Solution().solve(cipher))
print s, | """
ou are given two integer arrays, A and B, each containing N integers. The size of the array is less than or equal to
1000. You are free to permute the order of the elements in the arrays.
| random_line_split |
|
Two arrays.py | """
ou are given two integer arrays, A and B, each containing N integers. The size of the array is less than or equal to
1000. You are free to permute the order of the elements in the arrays.
Now here's the real question: Is there an permutation A', B' possible of A and B, such that, A'i+B'i >= K for all i,
where A'i denotes the ith element in the array A' and B'i denotes ith element in the array B'.
Input Format
The first line contains an integer, T, the number of test-cases. T test cases follow. Each test case has the following
format:
The first line contains two integers, N and K. The second line contains N space separated integers, denoting array A.
The third line describes array B in a same format.
"""
__author__ = 'Danyang'
class Solution(object):
def | (self, cipher):
"""
main solution function
:param cipher: the cipher
"""
N, K, A, B = cipher
A.sort()
B.sort(reverse=True) # dynamic typed, then cannot detect list()
for i in xrange(N):
if not A[i] + B[i] >= K:
return "NO"
return "YES"
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N, K = map(int, f.readline().strip().split(" "))
A = map(int, f.readline().strip().split(' '))
B = map(int, f.readline().strip().split(' '))
cipher = N, K, A, B
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| solve | identifier_name |
Two arrays.py | """
ou are given two integer arrays, A and B, each containing N integers. The size of the array is less than or equal to
1000. You are free to permute the order of the elements in the arrays.
Now here's the real question: Is there an permutation A', B' possible of A and B, such that, A'i+B'i >= K for all i,
where A'i denotes the ith element in the array A' and B'i denotes ith element in the array B'.
Input Format
The first line contains an integer, T, the number of test-cases. T test cases follow. Each test case has the following
format:
The first line contains two integers, N and K. The second line contains N space separated integers, denoting array A.
The third line describes array B in a same format.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
|
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N, K = map(int, f.readline().strip().split(" "))
A = map(int, f.readline().strip().split(' '))
B = map(int, f.readline().strip().split(' '))
cipher = N, K, A, B
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| """
main solution function
:param cipher: the cipher
"""
N, K, A, B = cipher
A.sort()
B.sort(reverse=True) # dynamic typed, then cannot detect list()
for i in xrange(N):
if not A[i] + B[i] >= K:
return "NO"
return "YES" | identifier_body |
StringSpecialization.ts | /**
* Based on taint analysis, could check how input strings are used,
* and inform the search about it
*/
export enum StringSpecialization {
/**
* String used as a Date with unknown format
*/
DATE_FORMAT_UNKNOWN_PATTERN = "DATE_FORMAT_UNKNOWN_PATTERN",
/**
* String used as a Date with not explicitly supported format
*/
DATE_FORMAT_PATTERN = "DATE_FORMAT_PATTERN",
/**
* String used as a Date in YYYY_MM_DD format
*/
DATE_YYYY_MM_DD = "DATE_YYYY_MM_DD",
/**
* String used as a Date in YYYY_MM_DD_HH_MM format
*/
DATE_YYYY_MM_DD_HH_MM = "DATE_YYYY_MM_DD_HH_MM",
/**
* An ISO Local Date Time (i.e. ISO_LOCAL_DATE + 'T' + ISO_LOCAL_TIME)
*/
ISO_LOCAL_DATE_TIME = "ISO_LOCAL_DATE_TIME",
/**
* An ISO Local Time (with or without no seconds)
*/
ISO_LOCAL_TIME = "ISO_LOCAL_TIME",
/**
* String used as an integer
*/
INTEGER = "INTEGER",
/**
* String used with a specific, constant value
*/
CONSTANT = "CONSTANT",
/**
* String used with a specific, constant value, ignoring its case
*/
CONSTANT_IGNORE_CASE = "CONSTANT_IGNORE_CASE",
/**
* String constrained by a regular expression
*/
REGEX = "REGEX",
/**
* String parsed to double
*/
DOUBLE = "DOUBLE",
| /**
* String parsed to long
*/
LONG = "LONG",
/**
* String parsed to boolean
*/
BOOLEAN = "BOOLEAN",
/**
* String parsed to float
*/
FLOAT = "FLOAT",
/**
* String should be equal to another string variable,
* ie 2 (or more) different variables should be keep their
* value in sync
*/
EQUAL = "EQUAL"
} | random_line_split |
|
tests.js | 'use strict';
var helpers = require('opent2t-testcase-helpers');
var uuidRegExMatch = /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/i;
function runTranslatorTests(settings) {
helpers.updateSettings(settings);
var test = settings.test;
var opent2t = settings.opent2t;
var SchemaName = settings.schemaName;
var deviceId = settings.deviceId;
var translator;
test.before(() => {
return opent2t.createTranslatorAsync(settings.translatorPath, 'thingTranslator', settings.getDeviceInfo()).then(trans => {
translator = trans;
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
if(deviceId === undefined) {
deviceId = response.entities[0].di;
}
});
});
});
test.serial('Valid Translator', t => {
t.is(typeof translator, 'object') && t.truthy(translator);
});
/**
* Verify that the ouput from GetPlatform includes all of the required properties
*/
test.serial('GetPlatform', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
// Verify required platform properties are present.
// This is helpful for new translators that do not have a snapshot yet.
t.truthy(response.availability, `Platform requires platform availability (availability)`);
t.truthy(response.pi, `Platform requires an id (pi)`);
t.truthy(response.pi.match(uuidRegExMatch), `Platform id must be a UUID (pi)`);
t.truthy(response.mnmn, `Platform requires a manufacturer name (mnmn)`);
t.truthy(response.mnmo, `Platform requires a model name (mnmo)`);
t.truthy(response.n, `Platform requires a friendly name (n)`);
// Verify that the platform includes the correct opent2t schema
t.not(response.rt.indexOf(SchemaName), -1, `Platform must include '${SchemaName}' in resources (rt)`);
// Verify each entity has the required properties
for(var i = 0; i < response.entities.length; i++) {
let entity = response.entities[i];
t.truthy(entity.icv, `Entity ${i} requires a core version (icv)`);
t.truthy(entity.dmv, `Entity ${i} requires a device model version (dmv)`);
t.truthy(entity.n, `Entity ${i} requires a friendly name (n)`);
t.truthy(entity.di, `Entity ${i} requires an id (di)`);
t.truthy(entity.di.match(uuidRegExMatch), `Entity ${i} id must be a UUID (di)`);
for(var j = 0; j < entity.resources.length; j++) {
let resource = entity.resources[j];
t.truthy(resource.href, `Resource ${i},${j} requires an href (href)`);
t.truthy(resource.rt, `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(Array.isArray(resource.rt), `Resource ${i},${j} requires an array of schemas (rt)`); | t.true(Array.isArray(resource.if), `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(resource.if.length > 0, `Resource ${i},${j} requires an array of interfaces (if)`);
// Check for oic.if.a XOR oic.if.s
t.true(
(resource.if.indexOf('oic.if.a') > -1) != (resource.if.indexOf('oic.if.s') > -1),
`Resource ${i},${j} requires an interface be either an actuator or a sensor (if)`
);
// And it needs oic.r.baseline too
t.true(resource.if.indexOf('oic.if.baseline') > -1, `Resource ${i},${j} requires an interface to include 'oic.r.baseline' (if)`);
}
}
t.snapshot(response);
});
});
});
test.serial('GetPlatformExpanded', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', [true])
.then((response) => {
// GetPlatform covers the required properties, so just verify a snapshot here.
t.snapshot(response);
});
});
});
test.skip.serial('GetSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('PostSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('DeleteSubscribe', t => {
t.fail("Not Implemented");
});
}
module.exports = runTranslatorTests; | t.true(resource.rt.length > 0, `Resource ${i},${j} requires an array of schemas (rt)`);
t.truthy(resource.if, `Resource ${i},${j} requires an array of interfaces (if)`); | random_line_split |
tests.js | 'use strict';
var helpers = require('opent2t-testcase-helpers');
var uuidRegExMatch = /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/i;
function runTranslatorTests(settings) {
helpers.updateSettings(settings);
var test = settings.test;
var opent2t = settings.opent2t;
var SchemaName = settings.schemaName;
var deviceId = settings.deviceId;
var translator;
test.before(() => {
return opent2t.createTranslatorAsync(settings.translatorPath, 'thingTranslator', settings.getDeviceInfo()).then(trans => {
translator = trans;
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
if(deviceId === undefined) {
deviceId = response.entities[0].di;
}
});
});
});
test.serial('Valid Translator', t => {
t.is(typeof translator, 'object') && t.truthy(translator);
});
/**
* Verify that the ouput from GetPlatform includes all of the required properties
*/
test.serial('GetPlatform', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
// Verify required platform properties are present.
// This is helpful for new translators that do not have a snapshot yet.
t.truthy(response.availability, `Platform requires platform availability (availability)`);
t.truthy(response.pi, `Platform requires an id (pi)`);
t.truthy(response.pi.match(uuidRegExMatch), `Platform id must be a UUID (pi)`);
t.truthy(response.mnmn, `Platform requires a manufacturer name (mnmn)`);
t.truthy(response.mnmo, `Platform requires a model name (mnmo)`);
t.truthy(response.n, `Platform requires a friendly name (n)`);
// Verify that the platform includes the correct opent2t schema
t.not(response.rt.indexOf(SchemaName), -1, `Platform must include '${SchemaName}' in resources (rt)`);
// Verify each entity has the required properties
for(var i = 0; i < response.entities.length; i++) | // Check for oic.if.a XOR oic.if.s
t.true(
(resource.if.indexOf('oic.if.a') > -1) != (resource.if.indexOf('oic.if.s') > -1),
`Resource ${i},${j} requires an interface be either an actuator or a sensor (if)`
);
// And it needs oic.r.baseline too
t.true(resource.if.indexOf('oic.if.baseline') > -1, `Resource ${i},${j} requires an interface to include 'oic.r.baseline' (if)`);
}
}
t.snapshot(response);
});
});
});
test.serial('GetPlatformExpanded', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', [true])
.then((response) => {
// GetPlatform covers the required properties, so just verify a snapshot here.
t.snapshot(response);
});
});
});
test.skip.serial('GetSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('PostSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('DeleteSubscribe', t => {
t.fail("Not Implemented");
});
}
module.exports = runTranslatorTests; | {
let entity = response.entities[i];
t.truthy(entity.icv, `Entity ${i} requires a core version (icv)`);
t.truthy(entity.dmv, `Entity ${i} requires a device model version (dmv)`);
t.truthy(entity.n, `Entity ${i} requires a friendly name (n)`);
t.truthy(entity.di, `Entity ${i} requires an id (di)`);
t.truthy(entity.di.match(uuidRegExMatch), `Entity ${i} id must be a UUID (di)`);
for(var j = 0; j < entity.resources.length; j++) {
let resource = entity.resources[j];
t.truthy(resource.href, `Resource ${i},${j} requires an href (href)`);
t.truthy(resource.rt, `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(Array.isArray(resource.rt), `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(resource.rt.length > 0, `Resource ${i},${j} requires an array of schemas (rt)`);
t.truthy(resource.if, `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(Array.isArray(resource.if), `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(resource.if.length > 0, `Resource ${i},${j} requires an array of interfaces (if)`);
| conditional_block |
tests.js | 'use strict';
var helpers = require('opent2t-testcase-helpers');
var uuidRegExMatch = /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/i;
function runTranslatorTests(settings) | t.is(typeof translator, 'object') && t.truthy(translator);
});
/**
* Verify that the ouput from GetPlatform includes all of the required properties
*/
test.serial('GetPlatform', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
// Verify required platform properties are present.
// This is helpful for new translators that do not have a snapshot yet.
t.truthy(response.availability, `Platform requires platform availability (availability)`);
t.truthy(response.pi, `Platform requires an id (pi)`);
t.truthy(response.pi.match(uuidRegExMatch), `Platform id must be a UUID (pi)`);
t.truthy(response.mnmn, `Platform requires a manufacturer name (mnmn)`);
t.truthy(response.mnmo, `Platform requires a model name (mnmo)`);
t.truthy(response.n, `Platform requires a friendly name (n)`);
// Verify that the platform includes the correct opent2t schema
t.not(response.rt.indexOf(SchemaName), -1, `Platform must include '${SchemaName}' in resources (rt)`);
// Verify each entity has the required properties
for(var i = 0; i < response.entities.length; i++) {
let entity = response.entities[i];
t.truthy(entity.icv, `Entity ${i} requires a core version (icv)`);
t.truthy(entity.dmv, `Entity ${i} requires a device model version (dmv)`);
t.truthy(entity.n, `Entity ${i} requires a friendly name (n)`);
t.truthy(entity.di, `Entity ${i} requires an id (di)`);
t.truthy(entity.di.match(uuidRegExMatch), `Entity ${i} id must be a UUID (di)`);
for(var j = 0; j < entity.resources.length; j++) {
let resource = entity.resources[j];
t.truthy(resource.href, `Resource ${i},${j} requires an href (href)`);
t.truthy(resource.rt, `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(Array.isArray(resource.rt), `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(resource.rt.length > 0, `Resource ${i},${j} requires an array of schemas (rt)`);
t.truthy(resource.if, `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(Array.isArray(resource.if), `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(resource.if.length > 0, `Resource ${i},${j} requires an array of interfaces (if)`);
// Check for oic.if.a XOR oic.if.s
t.true(
(resource.if.indexOf('oic.if.a') > -1) != (resource.if.indexOf('oic.if.s') > -1),
`Resource ${i},${j} requires an interface be either an actuator or a sensor (if)`
);
// And it needs oic.r.baseline too
t.true(resource.if.indexOf('oic.if.baseline') > -1, `Resource ${i},${j} requires an interface to include 'oic.r.baseline' (if)`);
}
}
t.snapshot(response);
});
});
});
test.serial('GetPlatformExpanded', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', [true])
.then((response) => {
// GetPlatform covers the required properties, so just verify a snapshot here.
t.snapshot(response);
});
});
});
test.skip.serial('GetSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('PostSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('DeleteSubscribe', t => {
t.fail("Not Implemented");
});
}
module.exports = runTranslatorTests; | {
helpers.updateSettings(settings);
var test = settings.test;
var opent2t = settings.opent2t;
var SchemaName = settings.schemaName;
var deviceId = settings.deviceId;
var translator;
test.before(() => {
return opent2t.createTranslatorAsync(settings.translatorPath, 'thingTranslator', settings.getDeviceInfo()).then(trans => {
translator = trans;
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
if(deviceId === undefined) {
deviceId = response.entities[0].di;
}
});
});
});
test.serial('Valid Translator', t => { | identifier_body |
tests.js | 'use strict';
var helpers = require('opent2t-testcase-helpers');
var uuidRegExMatch = /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/i;
function | (settings) {
helpers.updateSettings(settings);
var test = settings.test;
var opent2t = settings.opent2t;
var SchemaName = settings.schemaName;
var deviceId = settings.deviceId;
var translator;
test.before(() => {
return opent2t.createTranslatorAsync(settings.translatorPath, 'thingTranslator', settings.getDeviceInfo()).then(trans => {
translator = trans;
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
if(deviceId === undefined) {
deviceId = response.entities[0].di;
}
});
});
});
test.serial('Valid Translator', t => {
t.is(typeof translator, 'object') && t.truthy(translator);
});
/**
* Verify that the ouput from GetPlatform includes all of the required properties
*/
test.serial('GetPlatform', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', []).then((response) => {
// Verify required platform properties are present.
// This is helpful for new translators that do not have a snapshot yet.
t.truthy(response.availability, `Platform requires platform availability (availability)`);
t.truthy(response.pi, `Platform requires an id (pi)`);
t.truthy(response.pi.match(uuidRegExMatch), `Platform id must be a UUID (pi)`);
t.truthy(response.mnmn, `Platform requires a manufacturer name (mnmn)`);
t.truthy(response.mnmo, `Platform requires a model name (mnmo)`);
t.truthy(response.n, `Platform requires a friendly name (n)`);
// Verify that the platform includes the correct opent2t schema
t.not(response.rt.indexOf(SchemaName), -1, `Platform must include '${SchemaName}' in resources (rt)`);
// Verify each entity has the required properties
for(var i = 0; i < response.entities.length; i++) {
let entity = response.entities[i];
t.truthy(entity.icv, `Entity ${i} requires a core version (icv)`);
t.truthy(entity.dmv, `Entity ${i} requires a device model version (dmv)`);
t.truthy(entity.n, `Entity ${i} requires a friendly name (n)`);
t.truthy(entity.di, `Entity ${i} requires an id (di)`);
t.truthy(entity.di.match(uuidRegExMatch), `Entity ${i} id must be a UUID (di)`);
for(var j = 0; j < entity.resources.length; j++) {
let resource = entity.resources[j];
t.truthy(resource.href, `Resource ${i},${j} requires an href (href)`);
t.truthy(resource.rt, `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(Array.isArray(resource.rt), `Resource ${i},${j} requires an array of schemas (rt)`);
t.true(resource.rt.length > 0, `Resource ${i},${j} requires an array of schemas (rt)`);
t.truthy(resource.if, `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(Array.isArray(resource.if), `Resource ${i},${j} requires an array of interfaces (if)`);
t.true(resource.if.length > 0, `Resource ${i},${j} requires an array of interfaces (if)`);
// Check for oic.if.a XOR oic.if.s
t.true(
(resource.if.indexOf('oic.if.a') > -1) != (resource.if.indexOf('oic.if.s') > -1),
`Resource ${i},${j} requires an interface be either an actuator or a sensor (if)`
);
// And it needs oic.r.baseline too
t.true(resource.if.indexOf('oic.if.baseline') > -1, `Resource ${i},${j} requires an interface to include 'oic.r.baseline' (if)`);
}
}
t.snapshot(response);
});
});
});
test.serial('GetPlatformExpanded', t => {
return helpers.runTest(settings, t, () => {
return opent2t.invokeMethodAsync(translator, SchemaName, 'get', [true])
.then((response) => {
// GetPlatform covers the required properties, so just verify a snapshot here.
t.snapshot(response);
});
});
});
test.skip.serial('GetSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('PostSubscribe', t => {
t.fail("Not Implemented");
});
test.skip.serial('DeleteSubscribe', t => {
t.fail("Not Implemented");
});
}
module.exports = runTranslatorTests; | runTranslatorTests | identifier_name |
_state-storage.service.ts | <%#
Copyright 2013-2017 the original author or authors from the StackStack project.
This file is part of the StackStack project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { SessionStorageService } from 'ng2-webstorage';
@Injectable()
export class StateStorageService {
constructor(
private $sessionStorage: SessionStorageService
) {}
getPreviousState() {
return this.$sessionStorage.retrieve('previousState');
}
resetPreviousState() {
this.$sessionStorage.clear('previousState');
}
storePreviousState(previousStateName, previousStateParams) {
const previousState = { 'name': previousStateName, 'params': previousStateParams };
this.$sessionStorage.store('previousState', previousState);
}
getDestinationState() {
return this.$sessionStorage.retrieve('destinationState');
}
storeUrl(url: string) {
this.$sessionStorage.store('previousUrl', url);
}
getUrl() {
return this.$sessionStorage.retrieve('previousUrl'); | }
storeDestinationState(destinationState, destinationStateParams, fromState) {
const destinationInfo = {
'destination': {
'name': destinationState.name,
'data': destinationState.data,
},
'params': destinationStateParams,
'from': {
'name': fromState.name,
}
};
this.$sessionStorage.store('destinationState', destinationInfo);
}
} | random_line_split |
|
_state-storage.service.ts | <%#
Copyright 2013-2017 the original author or authors from the StackStack project.
This file is part of the StackStack project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { SessionStorageService } from 'ng2-webstorage';
@Injectable()
export class StateStorageService {
constructor(
private $sessionStorage: SessionStorageService
) {}
getPreviousState() {
return this.$sessionStorage.retrieve('previousState');
}
resetPreviousState() {
this.$sessionStorage.clear('previousState');
}
storePreviousState(previousStateName, previousStateParams) {
const previousState = { 'name': previousStateName, 'params': previousStateParams };
this.$sessionStorage.store('previousState', previousState);
}
| () {
return this.$sessionStorage.retrieve('destinationState');
}
storeUrl(url: string) {
this.$sessionStorage.store('previousUrl', url);
}
getUrl() {
return this.$sessionStorage.retrieve('previousUrl');
}
storeDestinationState(destinationState, destinationStateParams, fromState) {
const destinationInfo = {
'destination': {
'name': destinationState.name,
'data': destinationState.data,
},
'params': destinationStateParams,
'from': {
'name': fromState.name,
}
};
this.$sessionStorage.store('destinationState', destinationInfo);
}
}
| getDestinationState | identifier_name |
_state-storage.service.ts | <%#
Copyright 2013-2017 the original author or authors from the StackStack project.
This file is part of the StackStack project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { SessionStorageService } from 'ng2-webstorage';
@Injectable()
export class StateStorageService {
constructor(
private $sessionStorage: SessionStorageService
) {}
getPreviousState() {
return this.$sessionStorage.retrieve('previousState');
}
resetPreviousState() {
this.$sessionStorage.clear('previousState');
}
storePreviousState(previousStateName, previousStateParams) {
const previousState = { 'name': previousStateName, 'params': previousStateParams };
this.$sessionStorage.store('previousState', previousState);
}
getDestinationState() |
storeUrl(url: string) {
this.$sessionStorage.store('previousUrl', url);
}
getUrl() {
return this.$sessionStorage.retrieve('previousUrl');
}
storeDestinationState(destinationState, destinationStateParams, fromState) {
const destinationInfo = {
'destination': {
'name': destinationState.name,
'data': destinationState.data,
},
'params': destinationStateParams,
'from': {
'name': fromState.name,
}
};
this.$sessionStorage.store('destinationState', destinationInfo);
}
}
| {
return this.$sessionStorage.retrieve('destinationState');
} | identifier_body |
jsonInterfaces.ts |
// This corresponds to the [operator] syntactic class defined in the
// OOPSLA'15 submission. When adopting the "hybrid AST" point of view,
// an expression is decomposed as a series of tokens. The [JOperator]
// interface covers operators (assignment, comparison, boolean and
// arithmetic operators), but also *digits*.
//
// For instance, "1 + 10 = 11" will generate:
// [JOperator 1; JOperator +; JOperator 0; JOperator =; JOperator 1; JOperator 1]
export interface JOperator extends JToken { op:string; }
// A reference to a "property", i.e. something defined for an object of that
// type. There is no good way of figuring out what should the [parent] be
// when generating such properties; probably the best way is to dump a
// TouchDevelop AST.
export interface JPropertyRef extends JToken
{
name:string;
parent: JTypeRef; // if used as token this is ignored when building
// if used as JCall it's needed for operators
declId?: JNodeRef; // filled when the property is user-defined
}
export interface JStringLiteral extends JExpr {
value:string;
enumValue?:string;
}
export interface JBooleanLiteral extends JExpr { value:boolean; }
// A number literal is only used when adopting the "tree" view for
// expressions (see comment on [JExprHolder]).
export interface JNumberLiteral extends JExpr {
value:number;
// If parsing 'stringForm' yields 'value', 'stringForm' is used
// Otherwise stringified form of 'value' is used
stringForm?:string;
}
// when building expressions of these three types you can provide localId/type or name;
// if you provide both, name is ignored
export interface JLocalRef extends JExpr
{
name:string;
localId:JNodeRef;
}
export interface JPlaceholder extends JExpr
{
name:string;
type:JTypeRef;
}
// A singleton (probably) references one of the top-level categories such as
// libraries or data. When trying to call "♻ l → foo(x1, x2)", one may
// understand that the following call takes place:
// ♻ -> l -> foo(x1, x2)
// and the following AST is generated:
// JCall { name: foo, parent: l, args: [
// JCall { name: l, parent: ♻, args: [ JSingletonRef ♻ ] },
// x1,
// x2
// ]}
// this is surprising, because when calling "1 + 2", we generate a call that
// has two arguments only.
export interface JSingletonRef extends JExpr
{
name:string;
// type is ignored when building
type:JTypeRef;
libraryName?:string; // if this is a reference to a namespace in a library, this gives the name of library
}
// It seems like TouchDevelop has an extra invariant that a [JCall] must
// *always* be wrapped in a [JExprHolder].
export interface JCall extends JPropertyRef, JExpr
{
args:JExpr[];
// If we are calling a *type* T on an expression (e.g. create ->
// Collection of -> T), then T will be in there.
typeArgs?: JTypeRef[];
// The field below, if present, determines without ambiguity the nature
// of the call.
// - extension (the new special syntax)
// - field (reading a record field)
// Other types of calls can be determined by careful inspection of the
// receiver. See the C++ code emitter.
callType?: string;
}
// Expressions can be represented in two different manners.
// - The first one is as a series of tokens. This would correspond to the
// "hybrid AST" described in the OOPSLA'15 submission. In that
// representation, the [tree] field is null and the [tokens] field
// contains the list of tokens.
// - The second one is as an actual AST, with a proper tree structure. In
// that case, the [tokens] field is null and [tree] must contain a proper
// tree.
//
// TouchDevelop conflates variable binding and expressions. This means that
// every expression is flagged with the variables that are introduced at
// this stage. For instance, "var x = 1" will be translated as a
// [JExprHolder] where [locals] contains a [JLocalDef x], and either:
// - [tokens] is [JLocalRef x; JOperator :=; JOperator 1], or
// - [tree] is [JCall { name: ":=", parent: "Unknown", args: [JLocalRef x, JNumberLiteral 1] }]
//
// This is not the traditional notion of binding! The variable's scope is
// not limited to the tokens, but rather extends until the end of the parent
// block.
export interface JExprHolder extends JNode
{
// if tokens is unset, will try to use tree
tokens:JToken[];
tree:JExpr;
locals:JLocalDef[]; // locals variables defined in this expression
}
/*abstract*/ export interface JStmt extends JNode
{
// this is available when using the short form
locals?: JLocalDef[];
}
export interface JComment extends JStmt { text: string; }
export interface JFor extends JStmt
{
index:JLocalDef;
bound:JExprHolder;
body:JStmt[];
}
export interface JForeach extends JStmt
{
iterator:JLocalDef;
collection:JExprHolder;
conditions:JCondition[];
body:JStmt[];
}
/*abstract*/ export interface JCondition extends JNode {
// this is available when using the short form
locals?: JLocalDef[];
}
export interface JWhere extends JCondition { condition: JExprHolder; }
export interface JWhile extends JStmt
{
condition:JExprHolder;
body:JStmt[];
}
export interface JContinue extends JStmt {}
export interface JBreak extends JStmt {}
export interface JReturn extends JExprStmt {}
export interface JShow extends JExprStmt {}
// Sequences of if / else if / else statements are not represented the usual
// way. That is, instead of having a structured AST:
//
// if
// |- condition1
// |- then-branch1 = ...
// |- else-branch = if
// |- condition2
// |- then-branch2
// |- else-branch2
//
// the TouchDevelop AST adopts the following (unusual) representation.
//
// if
// |- condition1
// |- then-branch1 = ...
// |- else-branch = null
// if
// |- condition2
// |- then-branch2
// |- else-branch2
// |- isElseIf = true
//
// This is NOT equivalent to the representation above (condition2 may
// subsume condition1), so the extra flag "isElseIf" is set and (I suppose)
// gets some special treatment when it comes to running / compiling the
// program.
export interface JIf extends JStmt
{
condition:JExprHolder;
thenBody:JStmt[];
elseBody:JStmt[];
isElseIf:boolean;
}
export interface JBoxed extends JStmt { body:JStmt[]; }
export interface JExprStmt extends JStmt { expr:JExprHolder; }
export interface JInlineActions extends JExprStmt { actions:JInlineAction[]; }
export interface JInlineAction extends JNode
{
reference:JLocalDef;
inParameters:JLocalDef[];
outParameters:JLocalDef[];
body:JStmt[];
locals?:JLocalDef[]; // this contains the reference in short mode; it never contains anything else
isImplicit:boolean;
isOptional:boolean;
}
export interface JOptionalParameter extends JNode
{
name:string;
declId:JNodeRef;
expr:JExprHolder;
}
/*abstract*/ export interface JActionBase extends JDecl
{
inParameters:JLocalDef[];
outParameters:JLocalDef[];
// note that events should be always treated as private, but for historical reasons this field can be true or false
isPrivate:boolean;
isOffline: boolean;
isQuery: boolean;
isTest: boolean;
isAsync:boolean;
description: string;
}
export interface JActionType extends JActionBase
{
}
export interface JAction extends JActionBase { body: JStmt[]; }
export interface JPage extends JActionBase
{
initBody:JStmt[];
displayBody:JStmt[];
| /*abstract*/ export interface JExpr extends JToken { } | random_line_split |
|
util.py | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def | ():
"""Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
return False
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage')
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
| is_coverage_running | identifier_name |
util.py | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def is_coverage_running():
|
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
| """Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
return False
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage') | identifier_body |
util.py | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile | sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def is_coverage_running():
"""Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
return False
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage')
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs) | # time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try: | random_line_split |
util.py | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def is_coverage_running():
"""Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
|
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage')
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
| return False | conditional_block |
driver_decl.py | see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Declarator for registering drivers.
"""
from atom.api import Str, Dict, Property, Enum
from enaml.core.api import d_
from ...utils.traceback import format_exc
from ...utils.declarator import Declarator, GroupDeclarator, import_and_get
from ..infos import DriverInfos, INSTRUMENT_KINDS
class Driver(Declarator):
"""Declarator used to register a new driver for an instrument.
"""
#: Path to the driver object. Path should be dot separated and the class
#: name preceded by ':'.
#: TODO complete : ex: exopy_hqc_legacy.instruments.
#: The path of any parent Drivers object will be prepended to it.
driver = d_(Str())
#: Name identifying the system the driver is built on top of (lantz, hqc,
#: slave, etc ...). Allow to handle properly multiple drivers declared in
#: a single extension package for the same instrument.
architecture = d_(Str())
#: Name of the instrument manufacturer. Can be inferred from parent
#: Drivers.
manufacturer = d_(Str())
#: Serie this instrument is part of. This is optional as it does not always
#: make sense to be specified but in some cases it can help finding a
#: a driver. Can be inferred from parent Drivers.
serie = d_(Str())
#: Model of the instrument this driver has been written for.
model = d_(Str())
#: Kind of the instrument, to ease instrument look up. If no kind match,
#: leave 'Other' as the kind. Can be inferred from parent
#: Drivers.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use when initializing/finialzing this driver.
#: Can be inferred from parent Drivers.
starter = d_(Str())
#: Supported connections and default values for some parameters. The
#: admissible values for a given kind can be determined by looking at the
#: Connection object whose id match.
#: ex : {'VisaTCPIP' : {'port': 7500, 'resource_class': 'SOCKET'}}
#: Can be inferred from parent Drivers.
connections = d_(Dict())
#: Special settings for the driver, not fitting the connections. Multiple
#: identical connection infos with different settings can co-exist in a
#: profile. The admissible values for a given kind can be determined by
#: looking at the Settings object whose id match.
#: ex : {'lantz': {'resource_manager': '@py'}}
#: Can be inferred from parent Drivers.
settings = d_(Dict())
#: Id of the driver computed from the top-level package and the driver name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect driver and add infos to the DeclaratorCollector
contributions member.
"""
# Build the driver id by assembling the package name, the architecture
# and the class name
try:
driver_id = self.id
except KeyError: # Handle the lack of architecture
traceback[self.driver] = format_exc()
return
# Determine the path to the driver.
path = self.get_path()
try:
d_path, driver = (path + '.' + self.driver
if path else self.driver).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
traceback[driver_id] = msg % (driver_id, self.driver)
return
# Check that the driver does not already exist.
if driver_id in collector.contributions or driver_id in traceback:
i = 0
while True:
i += 1
err_id = '%s_duplicate%d' % (driver_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(self.architecture + '.' + driver,
d_path)
return
try:
meta_infos = {k: getattr(self, k)
for k in ('architecture', 'manufacturer', 'serie',
'model', 'kind')
}
infos = DriverInfos(id=driver_id,
infos=meta_infos,
starter=self.starter,
connections=self.connections,
settings=self.settings)
# ValueError catch wrong kind value
except (KeyError, ValueError):
traceback[driver_id] = format_exc()
return
# Get the driver class.
d_cls = import_and_get(d_path, driver, traceback, driver_id)
if d_cls is None:
return
try:
infos.cls = d_cls
except TypeError:
msg = '{} should be a callable.\n{}'
traceback[driver_id] = msg.format(d_cls, format_exc())
return
collector.contributions[driver_id] = infos
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
# Remove infos.
driver_id = self.id
try:
del collector.contributions[driver_id]
except KeyError:
pass
self.is_registered = False
def | (self):
"""Identify the decl by its members.
"""
members = ('driver', 'architecture', 'manufacturer', 'serie', 'model',
'kind', 'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}'
st_m = '\n'.join(' - {} : {}'.format(m, v)
for m, v in [(m, getattr(self, m)) for m in members]
)
return st.format(type(self).__name__, st_m)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _default_manufacturer(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('manufacturer')
def _default_serie(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('serie')
def _default_kind(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('kind')
def _default_architecture(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('architecture')
def _default_starter(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('starter')
def _default_connections(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('connections')
def _default_settings(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('settings')
def _get_inherited_member(self, member, parent=None):
"""Get the value of a member found in a parent declarator.
"""
parent = parent or self.parent
if isinstance(parent, Drivers):
value = getattr(parent, member)
if value:
return value
else:
parent = parent.parent
else:
parent = None
if parent is None:
if member == 'settings':
return {} # Settings can be empty
elif member == 'serie':
return '' # An instrument can have no serie
elif member == 'kind':
return 'Other'
raise KeyError('No inherited member was found for %s' %
member)
return self._get_inherited_member(member, parent)
def _get_id(self):
"""Create the unique identifier of the driver using the top level
package the architecture and the class name.
"""
if ':' in self.driver:
path = self.get_path()
d_path, d = (path + '.' + self.driver
if path else self.driver).split(':')
# Build the driver id by assembling the package name, architecture
# and the class name
return '.'.join((d_path.split('.', 1)[0], self.architecture, d))
else:
return self.driver
class Drivers(GroupDeclarator):
"""Declarator to group driver declarations.
For the full documentation of the members values please the Driver class.
"""
#: Name identifying the system the driver is built on top of for the
#: declared children.
architecture = d_(Str())
#: Instrument manufacturer of the declared children.
manufacturer = d_(Str())
#: Serie of the declared children.
serie = d_(Str())
#: Kind of the declared children.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use for the declared children.
starter = d_(Str())
#: Supported connections of the declared children.
connections = d_(Dict())
#: Settings of the declared children.
settings = d_(Dict())
def __str__(self):
"""Identify the group by its mmebers and declared children.
"""
members = ('path', 'architecture', 'manufacturer', 'serie', 'kind',
'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}\n and declaring :\n{}'
st_m = '\n'.join(' - {} | __str__ | identifier_name |
driver_decl.py | see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Declarator for registering drivers.
"""
from atom.api import Str, Dict, Property, Enum
from enaml.core.api import d_
from ...utils.traceback import format_exc
from ...utils.declarator import Declarator, GroupDeclarator, import_and_get
from ..infos import DriverInfos, INSTRUMENT_KINDS
class Driver(Declarator):
"""Declarator used to register a new driver for an instrument.
"""
#: Path to the driver object. Path should be dot separated and the class
#: name preceded by ':'.
#: TODO complete : ex: exopy_hqc_legacy.instruments.
#: The path of any parent Drivers object will be prepended to it.
driver = d_(Str())
#: Name identifying the system the driver is built on top of (lantz, hqc,
#: slave, etc ...). Allow to handle properly multiple drivers declared in
#: a single extension package for the same instrument.
architecture = d_(Str())
#: Name of the instrument manufacturer. Can be inferred from parent
#: Drivers.
manufacturer = d_(Str())
#: Serie this instrument is part of. This is optional as it does not always
#: make sense to be specified but in some cases it can help finding a
#: a driver. Can be inferred from parent Drivers.
serie = d_(Str())
#: Model of the instrument this driver has been written for.
model = d_(Str())
#: Kind of the instrument, to ease instrument look up. If no kind match,
#: leave 'Other' as the kind. Can be inferred from parent
#: Drivers.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use when initializing/finialzing this driver.
#: Can be inferred from parent Drivers.
starter = d_(Str())
#: Supported connections and default values for some parameters. The
#: admissible values for a given kind can be determined by looking at the
#: Connection object whose id match.
#: ex : {'VisaTCPIP' : {'port': 7500, 'resource_class': 'SOCKET'}}
#: Can be inferred from parent Drivers.
connections = d_(Dict())
#: Special settings for the driver, not fitting the connections. Multiple
#: identical connection infos with different settings can co-exist in a
#: profile. The admissible values for a given kind can be determined by
#: looking at the Settings object whose id match.
#: ex : {'lantz': {'resource_manager': '@py'}}
#: Can be inferred from parent Drivers.
settings = d_(Dict())
#: Id of the driver computed from the top-level package and the driver name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect driver and add infos to the DeclaratorCollector
contributions member.
"""
# Build the driver id by assembling the package name, the architecture
# and the class name
try:
driver_id = self.id
except KeyError: # Handle the lack of architecture
traceback[self.driver] = format_exc()
return
# Determine the path to the driver.
path = self.get_path()
try:
d_path, driver = (path + '.' + self.driver
if path else self.driver).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
traceback[driver_id] = msg % (driver_id, self.driver)
return
# Check that the driver does not already exist.
if driver_id in collector.contributions or driver_id in traceback:
i = 0
while True:
i += 1
err_id = '%s_duplicate%d' % (driver_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(self.architecture + '.' + driver,
d_path)
return
try:
meta_infos = {k: getattr(self, k)
for k in ('architecture', 'manufacturer', 'serie',
'model', 'kind')
}
infos = DriverInfos(id=driver_id,
infos=meta_infos,
starter=self.starter,
connections=self.connections,
settings=self.settings)
# ValueError catch wrong kind value
except (KeyError, ValueError):
traceback[driver_id] = format_exc()
return
# Get the driver class.
d_cls = import_and_get(d_path, driver, traceback, driver_id)
if d_cls is None:
return
try:
infos.cls = d_cls
except TypeError:
msg = '{} should be a callable.\n{}'
traceback[driver_id] = msg.format(d_cls, format_exc())
return
collector.contributions[driver_id] = infos
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
# Remove infos.
driver_id = self.id
try:
del collector.contributions[driver_id]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Identify the decl by its members.
"""
members = ('driver', 'architecture', 'manufacturer', 'serie', 'model',
'kind', 'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}'
st_m = '\n'.join(' - {} : {}'.format(m, v)
for m, v in [(m, getattr(self, m)) for m in members]
)
return st.format(type(self).__name__, st_m)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _default_manufacturer(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('manufacturer')
def _default_serie(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('serie')
def _default_kind(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('kind')
def _default_architecture(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('architecture')
def _default_starter(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('starter')
def _default_connections(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('connections')
def _default_settings(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('settings')
def _get_inherited_member(self, member, parent=None):
"""Get the value of a member found in a parent declarator.
"""
parent = parent or self.parent
if isinstance(parent, Drivers):
value = getattr(parent, member)
if value:
return value
else:
parent = parent.parent
else:
parent = None
if parent is None:
if member == 'settings':
return {} # Settings can be empty
elif member == 'serie':
return '' # An instrument can have no serie
elif member == 'kind':
return 'Other'
raise KeyError('No inherited member was found for %s' %
member)
return self._get_inherited_member(member, parent)
def _get_id(self):
|
class Drivers(GroupDeclarator):
"""Declarator to group driver declarations.
For the full documentation of the members values please the Driver class.
"""
#: Name identifying the system the driver is built on top of for the
#: declared children.
architecture = d_(Str())
#: Instrument manufacturer of the declared children.
manufacturer = d_(Str())
#: Serie of the declared children.
serie = d_(Str())
#: Kind of the declared children.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use for the declared children.
starter = d_(Str())
#: Supported connections of the declared children.
connections = d_(Dict())
#: Settings of the declared children.
settings = d_(Dict())
def __str__(self):
"""Identify the group by its mmebers and declared children.
"""
members = ('path', 'architecture', 'manufacturer', 'serie', 'kind',
'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}\n and declaring :\n{}'
st_m = '\n'.join(' - {} : | """Create the unique identifier of the driver using the top level
package the architecture and the class name.
"""
if ':' in self.driver:
path = self.get_path()
d_path, d = (path + '.' + self.driver
if path else self.driver).split(':')
# Build the driver id by assembling the package name, architecture
# and the class name
return '.'.join((d_path.split('.', 1)[0], self.architecture, d))
else:
return self.driver | identifier_body |
driver_decl.py | see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Declarator for registering drivers.
"""
from atom.api import Str, Dict, Property, Enum
from enaml.core.api import d_
from ...utils.traceback import format_exc
from ...utils.declarator import Declarator, GroupDeclarator, import_and_get
from ..infos import DriverInfos, INSTRUMENT_KINDS
class Driver(Declarator):
"""Declarator used to register a new driver for an instrument.
"""
#: Path to the driver object. Path should be dot separated and the class
#: name preceded by ':'.
#: TODO complete : ex: exopy_hqc_legacy.instruments.
#: The path of any parent Drivers object will be prepended to it.
driver = d_(Str())
#: Name identifying the system the driver is built on top of (lantz, hqc,
#: slave, etc ...). Allow to handle properly multiple drivers declared in
#: a single extension package for the same instrument.
architecture = d_(Str())
#: Name of the instrument manufacturer. Can be inferred from parent
#: Drivers.
manufacturer = d_(Str())
#: Serie this instrument is part of. This is optional as it does not always
#: make sense to be specified but in some cases it can help finding a
#: a driver. Can be inferred from parent Drivers.
serie = d_(Str())
#: Model of the instrument this driver has been written for.
model = d_(Str())
#: Kind of the instrument, to ease instrument look up. If no kind match,
#: leave 'Other' as the kind. Can be inferred from parent
#: Drivers.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use when initializing/finialzing this driver.
#: Can be inferred from parent Drivers.
starter = d_(Str())
#: Supported connections and default values for some parameters. The
#: admissible values for a given kind can be determined by looking at the
#: Connection object whose id match.
#: ex : {'VisaTCPIP' : {'port': 7500, 'resource_class': 'SOCKET'}}
#: Can be inferred from parent Drivers.
connections = d_(Dict())
#: Special settings for the driver, not fitting the connections. Multiple
#: identical connection infos with different settings can co-exist in a
#: profile. The admissible values for a given kind can be determined by
#: looking at the Settings object whose id match.
#: ex : {'lantz': {'resource_manager': '@py'}}
#: Can be inferred from parent Drivers.
settings = d_(Dict())
#: Id of the driver computed from the top-level package and the driver name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect driver and add infos to the DeclaratorCollector
contributions member.
"""
# Build the driver id by assembling the package name, the architecture
# and the class name
try:
driver_id = self.id
except KeyError: # Handle the lack of architecture
traceback[self.driver] = format_exc() | path = self.get_path()
try:
d_path, driver = (path + '.' + self.driver
if path else self.driver).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
traceback[driver_id] = msg % (driver_id, self.driver)
return
# Check that the driver does not already exist.
if driver_id in collector.contributions or driver_id in traceback:
i = 0
while True:
i += 1
err_id = '%s_duplicate%d' % (driver_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(self.architecture + '.' + driver,
d_path)
return
try:
meta_infos = {k: getattr(self, k)
for k in ('architecture', 'manufacturer', 'serie',
'model', 'kind')
}
infos = DriverInfos(id=driver_id,
infos=meta_infos,
starter=self.starter,
connections=self.connections,
settings=self.settings)
# ValueError catch wrong kind value
except (KeyError, ValueError):
traceback[driver_id] = format_exc()
return
# Get the driver class.
d_cls = import_and_get(d_path, driver, traceback, driver_id)
if d_cls is None:
return
try:
infos.cls = d_cls
except TypeError:
msg = '{} should be a callable.\n{}'
traceback[driver_id] = msg.format(d_cls, format_exc())
return
collector.contributions[driver_id] = infos
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
# Remove infos.
driver_id = self.id
try:
del collector.contributions[driver_id]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Identify the decl by its members.
"""
members = ('driver', 'architecture', 'manufacturer', 'serie', 'model',
'kind', 'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}'
st_m = '\n'.join(' - {} : {}'.format(m, v)
for m, v in [(m, getattr(self, m)) for m in members]
)
return st.format(type(self).__name__, st_m)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _default_manufacturer(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('manufacturer')
def _default_serie(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('serie')
def _default_kind(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('kind')
def _default_architecture(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('architecture')
def _default_starter(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('starter')
def _default_connections(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('connections')
def _default_settings(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('settings')
def _get_inherited_member(self, member, parent=None):
"""Get the value of a member found in a parent declarator.
"""
parent = parent or self.parent
if isinstance(parent, Drivers):
value = getattr(parent, member)
if value:
return value
else:
parent = parent.parent
else:
parent = None
if parent is None:
if member == 'settings':
return {} # Settings can be empty
elif member == 'serie':
return '' # An instrument can have no serie
elif member == 'kind':
return 'Other'
raise KeyError('No inherited member was found for %s' %
member)
return self._get_inherited_member(member, parent)
def _get_id(self):
"""Create the unique identifier of the driver using the top level
package the architecture and the class name.
"""
if ':' in self.driver:
path = self.get_path()
d_path, d = (path + '.' + self.driver
if path else self.driver).split(':')
# Build the driver id by assembling the package name, architecture
# and the class name
return '.'.join((d_path.split('.', 1)[0], self.architecture, d))
else:
return self.driver
class Drivers(GroupDeclarator):
"""Declarator to group driver declarations.
For the full documentation of the members values please the Driver class.
"""
#: Name identifying the system the driver is built on top of for the
#: declared children.
architecture = d_(Str())
#: Instrument manufacturer of the declared children.
manufacturer = d_(Str())
#: Serie of the declared children.
serie = d_(Str())
#: Kind of the declared children.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use for the declared children.
starter = d_(Str())
#: Supported connections of the declared children.
connections = d_(Dict())
#: Settings of the declared children.
settings = d_(Dict())
def __str__(self):
"""Identify the group by its mmebers and declared children.
"""
members = ('path', 'architecture', 'manufacturer', 'serie', 'kind',
'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}\n and declaring :\n{}'
st_m = '\n'.join(' - {} : | return
# Determine the path to the driver. | random_line_split |
driver_decl.py | see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Declarator for registering drivers.
"""
from atom.api import Str, Dict, Property, Enum
from enaml.core.api import d_
from ...utils.traceback import format_exc
from ...utils.declarator import Declarator, GroupDeclarator, import_and_get
from ..infos import DriverInfos, INSTRUMENT_KINDS
class Driver(Declarator):
"""Declarator used to register a new driver for an instrument.
"""
#: Path to the driver object. Path should be dot separated and the class
#: name preceded by ':'.
#: TODO complete : ex: exopy_hqc_legacy.instruments.
#: The path of any parent Drivers object will be prepended to it.
driver = d_(Str())
#: Name identifying the system the driver is built on top of (lantz, hqc,
#: slave, etc ...). Allow to handle properly multiple drivers declared in
#: a single extension package for the same instrument.
architecture = d_(Str())
#: Name of the instrument manufacturer. Can be inferred from parent
#: Drivers.
manufacturer = d_(Str())
#: Serie this instrument is part of. This is optional as it does not always
#: make sense to be specified but in some cases it can help finding a
#: a driver. Can be inferred from parent Drivers.
serie = d_(Str())
#: Model of the instrument this driver has been written for.
model = d_(Str())
#: Kind of the instrument, to ease instrument look up. If no kind match,
#: leave 'Other' as the kind. Can be inferred from parent
#: Drivers.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use when initializing/finialzing this driver.
#: Can be inferred from parent Drivers.
starter = d_(Str())
#: Supported connections and default values for some parameters. The
#: admissible values for a given kind can be determined by looking at the
#: Connection object whose id match.
#: ex : {'VisaTCPIP' : {'port': 7500, 'resource_class': 'SOCKET'}}
#: Can be inferred from parent Drivers.
connections = d_(Dict())
#: Special settings for the driver, not fitting the connections. Multiple
#: identical connection infos with different settings can co-exist in a
#: profile. The admissible values for a given kind can be determined by
#: looking at the Settings object whose id match.
#: ex : {'lantz': {'resource_manager': '@py'}}
#: Can be inferred from parent Drivers.
settings = d_(Dict())
#: Id of the driver computed from the top-level package and the driver name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect driver and add infos to the DeclaratorCollector
contributions member.
"""
# Build the driver id by assembling the package name, the architecture
# and the class name
try:
driver_id = self.id
except KeyError: # Handle the lack of architecture
traceback[self.driver] = format_exc()
return
# Determine the path to the driver.
path = self.get_path()
try:
d_path, driver = (path + '.' + self.driver
if path else self.driver).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
traceback[driver_id] = msg % (driver_id, self.driver)
return
# Check that the driver does not already exist.
if driver_id in collector.contributions or driver_id in traceback:
i = 0
while True:
i += 1
err_id = '%s_duplicate%d' % (driver_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(self.architecture + '.' + driver,
d_path)
return
try:
meta_infos = {k: getattr(self, k)
for k in ('architecture', 'manufacturer', 'serie',
'model', 'kind')
}
infos = DriverInfos(id=driver_id,
infos=meta_infos,
starter=self.starter,
connections=self.connections,
settings=self.settings)
# ValueError catch wrong kind value
except (KeyError, ValueError):
traceback[driver_id] = format_exc()
return
# Get the driver class.
d_cls = import_and_get(d_path, driver, traceback, driver_id)
if d_cls is None:
|
try:
infos.cls = d_cls
except TypeError:
msg = '{} should be a callable.\n{}'
traceback[driver_id] = msg.format(d_cls, format_exc())
return
collector.contributions[driver_id] = infos
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
# Remove infos.
driver_id = self.id
try:
del collector.contributions[driver_id]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Identify the decl by its members.
"""
members = ('driver', 'architecture', 'manufacturer', 'serie', 'model',
'kind', 'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}'
st_m = '\n'.join(' - {} : {}'.format(m, v)
for m, v in [(m, getattr(self, m)) for m in members]
)
return st.format(type(self).__name__, st_m)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _default_manufacturer(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('manufacturer')
def _default_serie(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('serie')
def _default_kind(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('kind')
def _default_architecture(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('architecture')
def _default_starter(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('starter')
def _default_connections(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('connections')
def _default_settings(self):
"""Default value grabbed from parent if not provided explicitely.
"""
return self._get_inherited_member('settings')
def _get_inherited_member(self, member, parent=None):
"""Get the value of a member found in a parent declarator.
"""
parent = parent or self.parent
if isinstance(parent, Drivers):
value = getattr(parent, member)
if value:
return value
else:
parent = parent.parent
else:
parent = None
if parent is None:
if member == 'settings':
return {} # Settings can be empty
elif member == 'serie':
return '' # An instrument can have no serie
elif member == 'kind':
return 'Other'
raise KeyError('No inherited member was found for %s' %
member)
return self._get_inherited_member(member, parent)
def _get_id(self):
"""Create the unique identifier of the driver using the top level
package the architecture and the class name.
"""
if ':' in self.driver:
path = self.get_path()
d_path, d = (path + '.' + self.driver
if path else self.driver).split(':')
# Build the driver id by assembling the package name, architecture
# and the class name
return '.'.join((d_path.split('.', 1)[0], self.architecture, d))
else:
return self.driver
class Drivers(GroupDeclarator):
"""Declarator to group driver declarations.
For the full documentation of the members values please the Driver class.
"""
#: Name identifying the system the driver is built on top of for the
#: declared children.
architecture = d_(Str())
#: Instrument manufacturer of the declared children.
manufacturer = d_(Str())
#: Serie of the declared children.
serie = d_(Str())
#: Kind of the declared children.
kind = d_(Enum(None, *INSTRUMENT_KINDS))
#: Starter to use for the declared children.
starter = d_(Str())
#: Supported connections of the declared children.
connections = d_(Dict())
#: Settings of the declared children.
settings = d_(Dict())
def __str__(self):
"""Identify the group by its mmebers and declared children.
"""
members = ('path', 'architecture', 'manufacturer', 'serie', 'kind',
'starter', 'connections', 'settings')
st = '{} whose known members are :\n{}\n and declaring :\n{}'
st_m = '\n'.join(' - {} : | return | conditional_block |
http-driver.ts | import xs, {Stream, MemoryStream} from 'xstream';
import {Driver} from '@cycle/run';
import {adapt} from '@cycle/run/lib/adapt';
import {MainHTTPSource} from './MainHTTPSource';
import * as superagent from 'superagent';
import {
HTTPSource,
ResponseStream,
RequestOptions,
RequestInput,
Response,
} from './interfaces';
function preprocessReqOptions(reqOptions: RequestOptions): RequestOptions |
export function optionsToSuperagent(rawReqOptions: RequestOptions) {
const reqOptions = preprocessReqOptions(rawReqOptions);
if (typeof reqOptions.url !== `string`) {
throw new Error(
`Please provide a \`url\` property in the request options.`
);
}
const lowerCaseMethod = (reqOptions.method || 'GET').toLowerCase();
const sanitizedMethod =
lowerCaseMethod === `delete` ? `del` : lowerCaseMethod;
let request = superagent[sanitizedMethod](reqOptions.url);
if (typeof request.redirects === `function`) {
request = request.redirects(reqOptions.redirects);
}
if (reqOptions.type) {
request = request.type(reqOptions.type);
}
if (reqOptions.send) {
request = request.send(reqOptions.send);
}
if (reqOptions.accept) {
request = request.accept(reqOptions.accept);
}
if (reqOptions.query) {
request = request.query(reqOptions.query);
}
if (reqOptions.withCredentials) {
request = request.withCredentials();
}
if (reqOptions.agent) {
request = request.key(reqOptions.agent.key);
request = request.cert(reqOptions.agent.cert);
}
if (
typeof reqOptions.user === 'string' &&
typeof reqOptions.password === 'string'
) {
request = request.auth(reqOptions.user, reqOptions.password);
}
if (reqOptions.headers) {
for (const key in reqOptions.headers) {
if (reqOptions.headers.hasOwnProperty(key)) {
request = request.set(key, reqOptions.headers[key]);
}
}
}
if (reqOptions.field) {
for (const key in reqOptions.field) {
if (reqOptions.field.hasOwnProperty(key)) {
request = request.field(key, reqOptions.field[key]);
}
}
}
if (reqOptions.attach) {
for (let i = reqOptions.attach.length - 1; i >= 0; i--) {
const a = reqOptions.attach[i];
request = request.attach(a.name, a.path, a.filename);
}
}
if (reqOptions.responseType) {
request = request.responseType(reqOptions.responseType);
}
if (reqOptions.ok) {
request = request.ok(reqOptions.ok);
}
return request;
}
export function createResponse$(reqInput: RequestInput): Stream<Response> {
let request: any;
return xs.create<Response>({
start: function startResponseStream(listener) {
try {
const reqOptions = normalizeRequestInput(reqInput);
request = optionsToSuperagent(reqOptions);
if (reqOptions.progress) {
request = request.on('progress', (res: Response) => {
res.request = reqOptions;
listener.next(res);
});
}
request.end((err: any, res: Response) => {
if (err) {
if (err.response) {
err.response.request = reqOptions;
}
listener.error(err);
} else {
res.request = reqOptions;
listener.next(res);
listener.complete();
}
});
} catch (err) {
listener.error(err);
}
},
stop: function stopResponseStream() {
if (request && request.abort) {
request.abort();
request = null;
}
},
});
}
function softNormalizeRequestInput(reqInput: RequestInput): RequestOptions {
let reqOptions: RequestOptions;
try {
reqOptions = normalizeRequestInput(reqInput);
} catch (err) {
reqOptions = {url: 'Error', _error: err};
}
return reqOptions;
}
function normalizeRequestInput(reqInput: RequestInput): RequestOptions {
if (typeof reqInput === 'string') {
return {url: reqInput};
} else if (typeof reqInput === 'object') {
return reqInput;
} else {
throw new Error(
`Observable of requests given to HTTP Driver must emit ` +
`either URL strings or objects with parameters.`
);
}
}
export type ResponseMemoryStream = MemoryStream<Response> & ResponseStream;
function requestInputToResponse$(reqInput: RequestInput): ResponseMemoryStream {
let response$ = createResponse$(reqInput).remember();
const reqOptions = softNormalizeRequestInput(reqInput);
if (!reqOptions.lazy) {
response$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
}
response$ = adapt(response$);
Object.defineProperty(response$, 'request', {
value: reqOptions,
writable: false,
});
return response$ as ResponseMemoryStream;
}
export function makeHTTPDriver(): Driver<Stream<RequestInput>, HTTPSource> {
function httpDriver(
request$: Stream<RequestInput>,
name: string = 'HTTP'
): HTTPSource {
const response$$ = request$.map(requestInputToResponse$);
const httpSource = new MainHTTPSource(response$$, name, []);
response$$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
return httpSource;
}
return httpDriver;
}
| {
reqOptions.withCredentials = reqOptions.withCredentials || false;
reqOptions.redirects =
typeof reqOptions.redirects === 'number' ? reqOptions.redirects : 5;
reqOptions.method = reqOptions.method || `get`;
return reqOptions;
} | identifier_body |
http-driver.ts | import xs, {Stream, MemoryStream} from 'xstream';
import {Driver} from '@cycle/run';
import {adapt} from '@cycle/run/lib/adapt';
import {MainHTTPSource} from './MainHTTPSource';
import * as superagent from 'superagent';
import {
HTTPSource,
ResponseStream,
RequestOptions,
RequestInput,
Response,
} from './interfaces';
function preprocessReqOptions(reqOptions: RequestOptions): RequestOptions {
reqOptions.withCredentials = reqOptions.withCredentials || false;
reqOptions.redirects =
typeof reqOptions.redirects === 'number' ? reqOptions.redirects : 5;
reqOptions.method = reqOptions.method || `get`;
return reqOptions;
}
export function optionsToSuperagent(rawReqOptions: RequestOptions) {
const reqOptions = preprocessReqOptions(rawReqOptions);
if (typeof reqOptions.url !== `string`) {
throw new Error(
`Please provide a \`url\` property in the request options.`
);
}
const lowerCaseMethod = (reqOptions.method || 'GET').toLowerCase();
const sanitizedMethod =
lowerCaseMethod === `delete` ? `del` : lowerCaseMethod;
let request = superagent[sanitizedMethod](reqOptions.url);
if (typeof request.redirects === `function`) {
request = request.redirects(reqOptions.redirects);
}
if (reqOptions.type) {
request = request.type(reqOptions.type);
}
if (reqOptions.send) {
request = request.send(reqOptions.send);
}
if (reqOptions.accept) {
request = request.accept(reqOptions.accept);
}
if (reqOptions.query) {
request = request.query(reqOptions.query);
}
if (reqOptions.withCredentials) {
request = request.withCredentials();
}
if (reqOptions.agent) {
request = request.key(reqOptions.agent.key);
request = request.cert(reqOptions.agent.cert);
}
if (
typeof reqOptions.user === 'string' &&
typeof reqOptions.password === 'string'
) {
request = request.auth(reqOptions.user, reqOptions.password);
}
if (reqOptions.headers) {
for (const key in reqOptions.headers) {
if (reqOptions.headers.hasOwnProperty(key)) {
request = request.set(key, reqOptions.headers[key]);
}
}
}
if (reqOptions.field) {
for (const key in reqOptions.field) {
if (reqOptions.field.hasOwnProperty(key)) {
request = request.field(key, reqOptions.field[key]);
}
}
}
if (reqOptions.attach) {
for (let i = reqOptions.attach.length - 1; i >= 0; i--) {
const a = reqOptions.attach[i];
request = request.attach(a.name, a.path, a.filename);
}
}
if (reqOptions.responseType) {
request = request.responseType(reqOptions.responseType);
}
if (reqOptions.ok) {
request = request.ok(reqOptions.ok);
}
return request;
}
export function createResponse$(reqInput: RequestInput): Stream<Response> {
let request: any;
return xs.create<Response>({
start: function startResponseStream(listener) {
try {
const reqOptions = normalizeRequestInput(reqInput);
request = optionsToSuperagent(reqOptions);
if (reqOptions.progress) {
request = request.on('progress', (res: Response) => {
res.request = reqOptions;
listener.next(res);
});
}
request.end((err: any, res: Response) => {
if (err) {
if (err.response) {
err.response.request = reqOptions;
}
listener.error(err);
} else {
res.request = reqOptions;
listener.next(res);
listener.complete();
}
});
} catch (err) {
listener.error(err);
}
},
stop: function stopResponseStream() {
if (request && request.abort) {
request.abort();
request = null;
}
},
});
}
function softNormalizeRequestInput(reqInput: RequestInput): RequestOptions {
let reqOptions: RequestOptions;
try {
reqOptions = normalizeRequestInput(reqInput);
} catch (err) {
reqOptions = {url: 'Error', _error: err};
}
return reqOptions;
}
function normalizeRequestInput(reqInput: RequestInput): RequestOptions {
if (typeof reqInput === 'string') {
return {url: reqInput};
} else if (typeof reqInput === 'object') {
return reqInput;
} else {
throw new Error(
`Observable of requests given to HTTP Driver must emit ` +
`either URL strings or objects with parameters.`
);
}
}
export type ResponseMemoryStream = MemoryStream<Response> & ResponseStream;
function requestInputToResponse$(reqInput: RequestInput): ResponseMemoryStream {
let response$ = createResponse$(reqInput).remember();
const reqOptions = softNormalizeRequestInput(reqInput);
if (!reqOptions.lazy) |
response$ = adapt(response$);
Object.defineProperty(response$, 'request', {
value: reqOptions,
writable: false,
});
return response$ as ResponseMemoryStream;
}
export function makeHTTPDriver(): Driver<Stream<RequestInput>, HTTPSource> {
function httpDriver(
request$: Stream<RequestInput>,
name: string = 'HTTP'
): HTTPSource {
const response$$ = request$.map(requestInputToResponse$);
const httpSource = new MainHTTPSource(response$$, name, []);
response$$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
return httpSource;
}
return httpDriver;
}
| {
response$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
} | conditional_block |
http-driver.ts | import xs, {Stream, MemoryStream} from 'xstream';
import {Driver} from '@cycle/run';
import {adapt} from '@cycle/run/lib/adapt';
import {MainHTTPSource} from './MainHTTPSource';
import * as superagent from 'superagent';
import {
HTTPSource,
ResponseStream,
RequestOptions,
RequestInput,
Response,
} from './interfaces';
function preprocessReqOptions(reqOptions: RequestOptions): RequestOptions {
reqOptions.withCredentials = reqOptions.withCredentials || false;
reqOptions.redirects =
typeof reqOptions.redirects === 'number' ? reqOptions.redirects : 5;
reqOptions.method = reqOptions.method || `get`;
return reqOptions;
}
export function optionsToSuperagent(rawReqOptions: RequestOptions) {
const reqOptions = preprocessReqOptions(rawReqOptions);
if (typeof reqOptions.url !== `string`) {
throw new Error(
`Please provide a \`url\` property in the request options.`
);
}
const lowerCaseMethod = (reqOptions.method || 'GET').toLowerCase();
const sanitizedMethod =
lowerCaseMethod === `delete` ? `del` : lowerCaseMethod;
let request = superagent[sanitizedMethod](reqOptions.url);
if (typeof request.redirects === `function`) {
request = request.redirects(reqOptions.redirects);
}
if (reqOptions.type) {
request = request.type(reqOptions.type);
}
if (reqOptions.send) {
request = request.send(reqOptions.send);
}
if (reqOptions.accept) {
request = request.accept(reqOptions.accept);
}
if (reqOptions.query) {
request = request.query(reqOptions.query);
}
if (reqOptions.withCredentials) {
request = request.withCredentials();
}
if (reqOptions.agent) {
request = request.key(reqOptions.agent.key);
request = request.cert(reqOptions.agent.cert);
}
if (
typeof reqOptions.user === 'string' &&
typeof reqOptions.password === 'string'
) {
request = request.auth(reqOptions.user, reqOptions.password);
} | }
}
if (reqOptions.field) {
for (const key in reqOptions.field) {
if (reqOptions.field.hasOwnProperty(key)) {
request = request.field(key, reqOptions.field[key]);
}
}
}
if (reqOptions.attach) {
for (let i = reqOptions.attach.length - 1; i >= 0; i--) {
const a = reqOptions.attach[i];
request = request.attach(a.name, a.path, a.filename);
}
}
if (reqOptions.responseType) {
request = request.responseType(reqOptions.responseType);
}
if (reqOptions.ok) {
request = request.ok(reqOptions.ok);
}
return request;
}
export function createResponse$(reqInput: RequestInput): Stream<Response> {
let request: any;
return xs.create<Response>({
start: function startResponseStream(listener) {
try {
const reqOptions = normalizeRequestInput(reqInput);
request = optionsToSuperagent(reqOptions);
if (reqOptions.progress) {
request = request.on('progress', (res: Response) => {
res.request = reqOptions;
listener.next(res);
});
}
request.end((err: any, res: Response) => {
if (err) {
if (err.response) {
err.response.request = reqOptions;
}
listener.error(err);
} else {
res.request = reqOptions;
listener.next(res);
listener.complete();
}
});
} catch (err) {
listener.error(err);
}
},
stop: function stopResponseStream() {
if (request && request.abort) {
request.abort();
request = null;
}
},
});
}
function softNormalizeRequestInput(reqInput: RequestInput): RequestOptions {
let reqOptions: RequestOptions;
try {
reqOptions = normalizeRequestInput(reqInput);
} catch (err) {
reqOptions = {url: 'Error', _error: err};
}
return reqOptions;
}
function normalizeRequestInput(reqInput: RequestInput): RequestOptions {
if (typeof reqInput === 'string') {
return {url: reqInput};
} else if (typeof reqInput === 'object') {
return reqInput;
} else {
throw new Error(
`Observable of requests given to HTTP Driver must emit ` +
`either URL strings or objects with parameters.`
);
}
}
export type ResponseMemoryStream = MemoryStream<Response> & ResponseStream;
function requestInputToResponse$(reqInput: RequestInput): ResponseMemoryStream {
let response$ = createResponse$(reqInput).remember();
const reqOptions = softNormalizeRequestInput(reqInput);
if (!reqOptions.lazy) {
response$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
}
response$ = adapt(response$);
Object.defineProperty(response$, 'request', {
value: reqOptions,
writable: false,
});
return response$ as ResponseMemoryStream;
}
export function makeHTTPDriver(): Driver<Stream<RequestInput>, HTTPSource> {
function httpDriver(
request$: Stream<RequestInput>,
name: string = 'HTTP'
): HTTPSource {
const response$$ = request$.map(requestInputToResponse$);
const httpSource = new MainHTTPSource(response$$, name, []);
response$$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
return httpSource;
}
return httpDriver;
} | if (reqOptions.headers) {
for (const key in reqOptions.headers) {
if (reqOptions.headers.hasOwnProperty(key)) {
request = request.set(key, reqOptions.headers[key]);
} | random_line_split |
http-driver.ts | import xs, {Stream, MemoryStream} from 'xstream';
import {Driver} from '@cycle/run';
import {adapt} from '@cycle/run/lib/adapt';
import {MainHTTPSource} from './MainHTTPSource';
import * as superagent from 'superagent';
import {
HTTPSource,
ResponseStream,
RequestOptions,
RequestInput,
Response,
} from './interfaces';
function preprocessReqOptions(reqOptions: RequestOptions): RequestOptions {
reqOptions.withCredentials = reqOptions.withCredentials || false;
reqOptions.redirects =
typeof reqOptions.redirects === 'number' ? reqOptions.redirects : 5;
reqOptions.method = reqOptions.method || `get`;
return reqOptions;
}
export function optionsToSuperagent(rawReqOptions: RequestOptions) {
const reqOptions = preprocessReqOptions(rawReqOptions);
if (typeof reqOptions.url !== `string`) {
throw new Error(
`Please provide a \`url\` property in the request options.`
);
}
const lowerCaseMethod = (reqOptions.method || 'GET').toLowerCase();
const sanitizedMethod =
lowerCaseMethod === `delete` ? `del` : lowerCaseMethod;
let request = superagent[sanitizedMethod](reqOptions.url);
if (typeof request.redirects === `function`) {
request = request.redirects(reqOptions.redirects);
}
if (reqOptions.type) {
request = request.type(reqOptions.type);
}
if (reqOptions.send) {
request = request.send(reqOptions.send);
}
if (reqOptions.accept) {
request = request.accept(reqOptions.accept);
}
if (reqOptions.query) {
request = request.query(reqOptions.query);
}
if (reqOptions.withCredentials) {
request = request.withCredentials();
}
if (reqOptions.agent) {
request = request.key(reqOptions.agent.key);
request = request.cert(reqOptions.agent.cert);
}
if (
typeof reqOptions.user === 'string' &&
typeof reqOptions.password === 'string'
) {
request = request.auth(reqOptions.user, reqOptions.password);
}
if (reqOptions.headers) {
for (const key in reqOptions.headers) {
if (reqOptions.headers.hasOwnProperty(key)) {
request = request.set(key, reqOptions.headers[key]);
}
}
}
if (reqOptions.field) {
for (const key in reqOptions.field) {
if (reqOptions.field.hasOwnProperty(key)) {
request = request.field(key, reqOptions.field[key]);
}
}
}
if (reqOptions.attach) {
for (let i = reqOptions.attach.length - 1; i >= 0; i--) {
const a = reqOptions.attach[i];
request = request.attach(a.name, a.path, a.filename);
}
}
if (reqOptions.responseType) {
request = request.responseType(reqOptions.responseType);
}
if (reqOptions.ok) {
request = request.ok(reqOptions.ok);
}
return request;
}
export function createResponse$(reqInput: RequestInput): Stream<Response> {
let request: any;
return xs.create<Response>({
start: function startResponseStream(listener) {
try {
const reqOptions = normalizeRequestInput(reqInput);
request = optionsToSuperagent(reqOptions);
if (reqOptions.progress) {
request = request.on('progress', (res: Response) => {
res.request = reqOptions;
listener.next(res);
});
}
request.end((err: any, res: Response) => {
if (err) {
if (err.response) {
err.response.request = reqOptions;
}
listener.error(err);
} else {
res.request = reqOptions;
listener.next(res);
listener.complete();
}
});
} catch (err) {
listener.error(err);
}
},
stop: function stopResponseStream() {
if (request && request.abort) {
request.abort();
request = null;
}
},
});
}
function softNormalizeRequestInput(reqInput: RequestInput): RequestOptions {
let reqOptions: RequestOptions;
try {
reqOptions = normalizeRequestInput(reqInput);
} catch (err) {
reqOptions = {url: 'Error', _error: err};
}
return reqOptions;
}
function | (reqInput: RequestInput): RequestOptions {
if (typeof reqInput === 'string') {
return {url: reqInput};
} else if (typeof reqInput === 'object') {
return reqInput;
} else {
throw new Error(
`Observable of requests given to HTTP Driver must emit ` +
`either URL strings or objects with parameters.`
);
}
}
export type ResponseMemoryStream = MemoryStream<Response> & ResponseStream;
function requestInputToResponse$(reqInput: RequestInput): ResponseMemoryStream {
let response$ = createResponse$(reqInput).remember();
const reqOptions = softNormalizeRequestInput(reqInput);
if (!reqOptions.lazy) {
response$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
}
response$ = adapt(response$);
Object.defineProperty(response$, 'request', {
value: reqOptions,
writable: false,
});
return response$ as ResponseMemoryStream;
}
export function makeHTTPDriver(): Driver<Stream<RequestInput>, HTTPSource> {
function httpDriver(
request$: Stream<RequestInput>,
name: string = 'HTTP'
): HTTPSource {
const response$$ = request$.map(requestInputToResponse$);
const httpSource = new MainHTTPSource(response$$, name, []);
response$$.addListener({
next: () => {},
error: () => {},
complete: () => {},
});
return httpSource;
}
return httpDriver;
}
| normalizeRequestInput | identifier_name |
longbeach_crime_stats.py | = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
| '''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break | identifier_body |
|
longbeach_crime_stats.py | 1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
''' |
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages | For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set. | random_line_split |
longbeach_crime_stats.py | 094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def | (pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages | convert_all_pdfs | identifier_name |
longbeach_crime_stats.py | 1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
|
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages | ddf = ddf[mptbltxt] | conditional_block |
arm_Redis_Cache_create_cache_with_same_name_should_fail.nock.js | // This file has been autogenerated.
var profile = require('../../../lib/util/profile');
exports.getMockedProfile = function () {
var newProfile = new profile.Profile();
newProfile.addSubscription(new profile.Subscription({
id: '00977cdb-163f-435f-9c32-39ec8ae61f4d',
name: 'node',
user: {
name: '[email protected]',
type: 'user'
},
tenantId: '72f988bf-86f1-41af-91ab-2d7cd011db47',
state: 'Enabled',
registeredProviders: [],
isDefault: true
}, newProfile.environments['AzureCloud']));
return newProfile;
};
exports.setEnvironment = function() {
process.env['AZURE_ARM_TEST_LOCATION'] = 'West US';
process.env['AZURE_ARM_TEST_RESOURCE_GROUP'] = 'xplatTestCacheRG';
};
exports.scopes = [[function (nock) {
var result =
nock('http://management.azure.com:443')
.get('/subscriptions/00977cdb-163f-435f-9c32-39ec8ae61f4d/resourceGroups/xplatTestCacheRG/providers/Microsoft.Cache/Redis/xplatTestCache1665?api-version=2015-08-01')
.reply(200, "{\"id\":\"/subscriptions/00977cdb-163f-435f-9c32-39ec8ae61f4d/resourceGroups/xplatTestCacheRG/providers/Microsoft.Cache/Redis/xplatTestCache1665\",\"location\":\"West US\",\"name\":\"xplatTestCache1665\",\"type\":\"Microsoft.Cache/Redis\",\"tags\":{},\"properties\":{\"provisioningState\":\"Creating\",\"redisVersion\":\"3.0\",\"sku\":{\"name\":\"Standard\",\"family\":\"C\",\"capacity\":1},\"enableNonSslPort\":false,\"redisConfiguration\":{\"maxmemory-policy\":\"allkeys-lru\",\"maxclients\":\"1000\",\"maxmemory-reserved\":\"50\",\"maxmemory-delta\":\"50\"},\"accessKeys\":null,\"hostName\":\"xplatTestCache1665.redis.cache.windows.net\",\"port\":6379,\"sslPort\":6380}}", { 'cache-control': 'no-cache',
pragma: 'no-cache',
'content-length': '603',
'content-type': 'application/json; charset=utf-8',
expires: '-1',
'x-ms-request-id': '86ea59b0-b293-4e9d-8ae6-c780be43b32a',
'x-rp-server-mvid': '34bb3c7e-5d41-4769-9a39-f21a2e547245',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
server: 'Microsoft-HTTPAPI/2.0',
'x-ms-ratelimit-remaining-subscription-reads': '14999',
'x-ms-correlation-request-id': 'c19c7d3b-16e5-4dd8-ae9d-e752158e9eb6',
'x-ms-routing-request-id': 'WESTUS:20151106T043332Z:c19c7d3b-16e5-4dd8-ae9d-e752158e9eb6',
date: 'Fri, 06 Nov 2015 04:33:32 GMT',
connection: 'close' });
return result; },
function (nock) {
var result =
nock('https://management.azure.com:443')
.get('/subscriptions/00977cdb-163f-435f-9c32-39ec8ae61f4d/resourceGroups/xplatTestCacheRG/providers/Microsoft.Cache/Redis/xplatTestCache1665?api-version=2015-08-01')
.reply(200, "{\"id\":\"/subscriptions/00977cdb-163f-435f-9c32-39ec8ae61f4d/resourceGroups/xplatTestCacheRG/providers/Microsoft.Cache/Redis/xplatTestCache1665\",\"location\":\"West US\",\"name\":\"xplatTestCache1665\",\"type\":\"Microsoft.Cache/Redis\",\"tags\":{},\"properties\":{\"provisioningState\":\"Creating\",\"redisVersion\":\"3.0\",\"sku\":{\"name\":\"Standard\",\"family\":\"C\",\"capacity\":1},\"enableNonSslPort\":false,\"redisConfiguration\":{\"maxmemory-policy\":\"allkeys-lru\",\"maxclients\":\"1000\",\"maxmemory-reserved\":\"50\",\"maxmemory-delta\":\"50\"},\"accessKeys\":null,\"hostName\":\"xplatTestCache1665.redis.cache.windows.net\",\"port\":6379,\"sslPort\":6380}}", { 'cache-control': 'no-cache',
pragma: 'no-cache',
'content-length': '603',
'content-type': 'application/json; charset=utf-8',
expires: '-1',
'x-ms-request-id': '86ea59b0-b293-4e9d-8ae6-c780be43b32a', | 'x-ms-ratelimit-remaining-subscription-reads': '14999',
'x-ms-correlation-request-id': 'c19c7d3b-16e5-4dd8-ae9d-e752158e9eb6',
'x-ms-routing-request-id': 'WESTUS:20151106T043332Z:c19c7d3b-16e5-4dd8-ae9d-e752158e9eb6',
date: 'Fri, 06 Nov 2015 04:33:32 GMT',
connection: 'close' });
return result; }]]; | 'x-rp-server-mvid': '34bb3c7e-5d41-4769-9a39-f21a2e547245',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
server: 'Microsoft-HTTPAPI/2.0', | random_line_split |
ts-errores.ts | interface User{
name: string
userName: string
id: number
date: object
UserInfoObject()
RegisterUserData(info: object)
}
let db : any[] = []
class User implements User{
name: string
userName: string
id: number
date: object
constructor(name: string, userName: string, id: number, date: object = new Date()){
this.name = name
this.userName = userName
this.id = id
this.date = date
}
UserInfoObject() |
RegisterUserData(info?: object){
db.push(info || this.UserInfoObject())
}
}
class premiumUser extends User{
premium: boolean
constructor(name: string, userName: string, id:number, date:Object = new Date(), premium: boolean = true){
super(name,userName,id,date)
this.premium = premium
}
PremiumUserInfo(){
return {id: this.id, name: this.name, userName: this.userName, date: this.date, premium: this.premium}
}
RegisterUserData(){
super.RegisterUserData(this.PremiumUserInfo())
}
}
const jose = new premiumUser("jose","jose2018",1)
jose.RegisterUserData()
const victor = new User("victor", "victorl", 2)
victor.RegisterUserData() | {
return {id: this.id, name: this.name, userName: this.userName, date: this.date }
} | identifier_body |
ts-errores.ts | interface User{
name: string
userName: string
id: number
date: object
UserInfoObject()
RegisterUserData(info: object)
}
let db : any[] = []
class User implements User{
name: string
userName: string
id: number
date: object
constructor(name: string, userName: string, id: number, date: object = new Date()){
| this.id = id
this.date = date
}
UserInfoObject(){
return {id: this.id, name: this.name, userName: this.userName, date: this.date }
}
RegisterUserData(info?: object){
db.push(info || this.UserInfoObject())
}
}
class premiumUser extends User{
premium: boolean
constructor(name: string, userName: string, id:number, date:Object = new Date(), premium: boolean = true){
super(name,userName,id,date)
this.premium = premium
}
PremiumUserInfo(){
return {id: this.id, name: this.name, userName: this.userName, date: this.date, premium: this.premium}
}
RegisterUserData(){
super.RegisterUserData(this.PremiumUserInfo())
}
}
const jose = new premiumUser("jose","jose2018",1)
jose.RegisterUserData()
const victor = new User("victor", "victorl", 2)
victor.RegisterUserData() | this.name = name
this.userName = userName
| random_line_split |
ts-errores.ts | interface User{
name: string
userName: string
id: number
date: object
UserInfoObject()
RegisterUserData(info: object)
}
let db : any[] = []
class | implements User{
name: string
userName: string
id: number
date: object
constructor(name: string, userName: string, id: number, date: object = new Date()){
this.name = name
this.userName = userName
this.id = id
this.date = date
}
UserInfoObject(){
return {id: this.id, name: this.name, userName: this.userName, date: this.date }
}
RegisterUserData(info?: object){
db.push(info || this.UserInfoObject())
}
}
class premiumUser extends User{
premium: boolean
constructor(name: string, userName: string, id:number, date:Object = new Date(), premium: boolean = true){
super(name,userName,id,date)
this.premium = premium
}
PremiumUserInfo(){
return {id: this.id, name: this.name, userName: this.userName, date: this.date, premium: this.premium}
}
RegisterUserData(){
super.RegisterUserData(this.PremiumUserInfo())
}
}
const jose = new premiumUser("jose","jose2018",1)
jose.RegisterUserData()
const victor = new User("victor", "victorl", 2)
victor.RegisterUserData() | User | identifier_name |
attempts.js | import React, { Component } from 'react';
import { Text, View } from 'react-native';
import { connect } from 'react-redux';
import styles from '../styles';
export const Attempts = (props) => {
const { attempts } = props;
const renderLetter = (letterWithScore, idx) => {
let style = styles.attemptLetter;
if (letterWithScore.score === 2) {
style = styles.attemptLetterOnRightLocation;
} else if (letterWithScore.score === 1) {
style = styles.attemptLetterOnOtherLocation;
}
return (<Text style={ style } key={ idx }>{ letterWithScore.letter }</Text>)
};
const renderAttempt = (attempt, attemptId) => {
const letters = attempt.word.split('');
const lettersWithScore = letters.map((elem, index) => {
return {
letter: elem,
score: attempt.score[index]
}
});
return (
<View style={ styles.attempt } key={ attemptId }>
{ lettersWithScore.map((elem, index) => renderLetter(elem, `${attemptId}.${index}`)) }
</View>
); | return (
<View style={ styles.attempts }>
{ attempts.map(renderAttempt) }
</View>
);
};
const mapStateToProps = (state) => {
return {
attempts: state.game.attempts
};
};
const mapDispatchToProps = (dispatch) => {
return { };
};
/**
* Props:
* attempts: [{
* word: string
* }]
*/
export default connect(mapStateToProps, mapDispatchToProps)(Attempts); | };
| random_line_split |
Interactions.py | def fail_acquire_settings(log_printer, settings_names_dict):
"""
This method throws an exception if any setting needs to be acquired.
:param log_printer: Printer responsible for logging the messages.
:param settings: A dictionary with the settings name as key and
a list containing a description in [0] and the
name of the bears who need this setting in [1]
and following.
:raises AssertionError: If any setting is required.
:raises TypeError: If `settings_names_dict` is not a dictionary.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError("The settings_names_dict parameter has to be a "
"dictionary.")
required_settings = settings_names_dict.keys()
if len(required_settings) != 0:
msg = ("During execution, we found that some required"
"settings were not provided. They are:\n")
for name, setting in settings_names_dict.items():
|
log_printer.err(msg)
raise AssertionError
| msg += "{} (from {}) - {}".format(name, setting[1], setting[0]) | conditional_block |
Interactions.py | def fail_acquire_settings(log_printer, settings_names_dict):
| for name, setting in settings_names_dict.items():
msg += "{} (from {}) - {}".format(name, setting[1], setting[0])
log_printer.err(msg)
raise AssertionError
| """
This method throws an exception if any setting needs to be acquired.
:param log_printer: Printer responsible for logging the messages.
:param settings: A dictionary with the settings name as key and
a list containing a description in [0] and the
name of the bears who need this setting in [1]
and following.
:raises AssertionError: If any setting is required.
:raises TypeError: If `settings_names_dict` is not a dictionary.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError("The settings_names_dict parameter has to be a "
"dictionary.")
required_settings = settings_names_dict.keys()
if len(required_settings) != 0:
msg = ("During execution, we found that some required"
"settings were not provided. They are:\n")
| identifier_body |
Interactions.py | def | (log_printer, settings_names_dict):
"""
This method throws an exception if any setting needs to be acquired.
:param log_printer: Printer responsible for logging the messages.
:param settings: A dictionary with the settings name as key and
a list containing a description in [0] and the
name of the bears who need this setting in [1]
and following.
:raises AssertionError: If any setting is required.
:raises TypeError: If `settings_names_dict` is not a dictionary.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError("The settings_names_dict parameter has to be a "
"dictionary.")
required_settings = settings_names_dict.keys()
if len(required_settings) != 0:
msg = ("During execution, we found that some required"
"settings were not provided. They are:\n")
for name, setting in settings_names_dict.items():
msg += "{} (from {}) - {}".format(name, setting[1], setting[0])
log_printer.err(msg)
raise AssertionError
| fail_acquire_settings | identifier_name |
Interactions.py | def fail_acquire_settings(log_printer, settings_names_dict):
"""
This method throws an exception if any setting needs to be acquired.
:param log_printer: Printer responsible for logging the messages.
:param settings: A dictionary with the settings name as key and
a list containing a description in [0] and the
name of the bears who need this setting in [1]
and following.
:raises AssertionError: If any setting is required.
:raises TypeError: If `settings_names_dict` is not a dictionary.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError("The settings_names_dict parameter has to be a "
"dictionary.")
required_settings = settings_names_dict.keys()
if len(required_settings) != 0:
msg = ("During execution, we found that some required"
"settings were not provided. They are:\n")
for name, setting in settings_names_dict.items():
msg += "{} (from {}) - {}".format(name, setting[1], setting[0])
| log_printer.err(msg)
raise AssertionError | random_line_split |
|
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let .. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else |
}
pub fn main() {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
}
| {
// `panic!(...)` would break
panic!("Break the compiler");
} | conditional_block |
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let .. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn | () {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
}
| main | identifier_name |
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let .. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn main() | {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
} | identifier_body |
|
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let .. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn main() {
// {} would break
let _r = {};
let mut slot = None; | // `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
} | random_line_split |
|
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn | (&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
}
}
| sorted_results | identifier_name |
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
* | * This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn sorted_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
}
} | * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
* | random_line_split |
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn sorted_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> |
}
| {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
} | identifier_body |
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct DecoratedConflictTarget<T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>, | DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
}
} | random_line_split |
|
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct | <T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>,
DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
}
}
| DecoratedConflictTarget | identifier_name |
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct DecoratedConflictTarget<T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>,
DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> |
}
| {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
} | identifier_body |
node-loaders.js | 'use strict';
var fs = require('fs');
var path = require('path');
var lib = require('./lib');
var Loader = require('./loader');
var chokidar = require('chokidar');
// Node <0.7.1 compatibility
var existsSync = fs.existsSync || path.existsSync;
var FileSystemLoader = Loader.extend({
init: function(searchPaths, noWatch, noCache) {
this.pathsToNames = {};
this.noCache = !!noCache;
if(searchPaths) {
searchPaths = lib.isArray(searchPaths) ? searchPaths : [searchPaths];
// For windows, convert to forward slashes
this.searchPaths = searchPaths.map(path.normalize);
}
else {
this.searchPaths = ['.'];
}
if(!noWatch) {
// Watch all the templates in the paths and fire an event when
// they change
lib.each(this.searchPaths, function(p) {
if(existsSync(p)) {
var watcher = chokidar.watch(p);
watcher.on('all', function(event, fullname) {
fullname = path.resolve(fullname);
if(event === 'change' && fullname in this.pathsToNames) {
this.emit('update', this.pathsToNames[fullname]);
}
}.bind(this));
}
}.bind(this));
}
},
getSource: function(name) {
var fullpath = null;
var paths = this.searchPaths;
for(var i=0; i<paths.length; i++) {
var basePath = path.resolve(paths[i]);
var p = path.resolve(paths[i], name);
// Only allow the current directory and anything
// underneath it to be searched
if(p.indexOf(basePath) === 0 && existsSync(p)) {
fullpath = p;
break;
}
}
if(!fullpath) {
return null;
}
this.pathsToNames[fullpath] = name;
return { src: fs.readFileSync(fullpath, 'utf-8'),
path: fullpath,
noCache: this.noCache }; | FileSystemLoader: FileSystemLoader
}; | }
});
module.exports = { | random_line_split |
node-loaders.js | 'use strict';
var fs = require('fs');
var path = require('path');
var lib = require('./lib');
var Loader = require('./loader');
var chokidar = require('chokidar');
// Node <0.7.1 compatibility
var existsSync = fs.existsSync || path.existsSync;
var FileSystemLoader = Loader.extend({
init: function(searchPaths, noWatch, noCache) {
this.pathsToNames = {};
this.noCache = !!noCache;
if(searchPaths) {
searchPaths = lib.isArray(searchPaths) ? searchPaths : [searchPaths];
// For windows, convert to forward slashes
this.searchPaths = searchPaths.map(path.normalize);
}
else {
this.searchPaths = ['.'];
}
if(!noWatch) {
// Watch all the templates in the paths and fire an event when
// they change
lib.each(this.searchPaths, function(p) {
if(existsSync(p)) |
}.bind(this));
}
},
getSource: function(name) {
var fullpath = null;
var paths = this.searchPaths;
for(var i=0; i<paths.length; i++) {
var basePath = path.resolve(paths[i]);
var p = path.resolve(paths[i], name);
// Only allow the current directory and anything
// underneath it to be searched
if(p.indexOf(basePath) === 0 && existsSync(p)) {
fullpath = p;
break;
}
}
if(!fullpath) {
return null;
}
this.pathsToNames[fullpath] = name;
return { src: fs.readFileSync(fullpath, 'utf-8'),
path: fullpath,
noCache: this.noCache };
}
});
module.exports = {
FileSystemLoader: FileSystemLoader
};
| {
var watcher = chokidar.watch(p);
watcher.on('all', function(event, fullname) {
fullname = path.resolve(fullname);
if(event === 'change' && fullname in this.pathsToNames) {
this.emit('update', this.pathsToNames[fullname]);
}
}.bind(this));
} | conditional_block |
visualstudio_multi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
|
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
| pass | identifier_body |
visualstudio_multi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0') |
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current} | random_line_split |
|
visualstudio_multi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
|
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
| break | conditional_block |
visualstudio_multi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def | (self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
| filename | identifier_name |
createproduct.js | /* jshint node: true */
'use strict';
var util = require('util');
var _ = require('underscore');
var defaults = require('../defaults');
var options = require('../options');
var descriptor = defaults.defaultDescriptor({
'productName': {
name: 'Product Name',
required: true
},
'productDesc': {
name: 'Description'
},
'proxies': {
name: 'API Proxies',
required: true
},
'environments':{
name: 'Environments',
required: true
},
'approvalType': {
name: 'Approval Type',
required: true
},
'quota' : {
name: 'Quota',
},
'quotaInterval':{
name: 'Quota Interval'
},
'quotaTimeUnit': {
name:'Quota Time Unit'
},
'scopes': {
name: "Scope",
}
});
module.exports.descriptor = descriptor;
module.exports.run = function(opts, cb) {
options.validateSync(opts, descriptor);
if (opts.debug) {
console.log('createProduct: %j', opts);
}
var request = defaults.defaultRequest(opts);
createProduct(opts, request, function(err, results) {
if (err) {
cb(err);
} else {
if (opts.debug) {
console.log('results: %j', results);
}
cb(undefined, results);
}
});
};
function createProduct(opts,request,done){
var product = {
"approvalType": "auto",
"attributes":
[ {"name": "access", "value": "public"} ],
"scopes": []
}
product.name = opts.productName
product.displayName = opts.productName
product.description = opts.productDesc
product.proxies = []
if(opts.proxies){
var split = opts.proxies.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.proxies.push(s.trim())
}
})
}
product.apiResources = []
if(opts.apiResources){
var split = opts.apiResources.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.apiResources.push(s.trim())
}
})
}
if(opts.scopes){
var split = opts.scopes.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.scopes.push(s.trim())
}
})
}
product.environments = []
if(opts.environments){
var split = opts.environments.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.environments.push(s.trim())
}
})
}
if(opts.quota && opts.quotaInterval && opts.quotaTimeUnit) |
var uri = util.format('%s/v1/o/%s/apiproducts', opts.baseuri, opts.organization);
request({
uri: uri,
method:'POST',
body: product,
json:true
},function(err,res,body){
var jsonBody = body
if(err){
if (opts.debug) {
console.log('Error occured %s', err);
}
done(err)
}else if (res.statusCode === 201) {
if (opts.verbose) {
console.log('Create successful');
}
if (opts.debug) {
console.log('%s', body);
}
done(undefined, jsonBody);
}else {
if (opts.verbose) {
console.error('Create Product result: %j', body);
}
var errMsg;
if (jsonBody && (jsonBody.message)) {
errMsg = jsonBody.message;
} else {
errMsg = util.format('Create Product failed with status code %d', res.statusCode);
}
done(new Error(errMsg));
}
})
}
| {
product.quota = opts.quota
product.quotaInterval = opts.quotaInterval
product.quotaTimeUnit = opts.quotaTimeUnit
} | conditional_block |
createproduct.js | /* jshint node: true */
'use strict';
var util = require('util');
var _ = require('underscore');
var defaults = require('../defaults');
var options = require('../options');
var descriptor = defaults.defaultDescriptor({
'productName': {
name: 'Product Name',
required: true
},
'productDesc': {
name: 'Description'
},
'proxies': {
name: 'API Proxies',
required: true
},
'environments':{
name: 'Environments',
required: true
},
'approvalType': {
name: 'Approval Type',
required: true
},
'quota' : {
name: 'Quota',
},
'quotaInterval':{
name: 'Quota Interval'
},
'quotaTimeUnit': {
name:'Quota Time Unit'
},
'scopes': {
name: "Scope",
}
});
module.exports.descriptor = descriptor;
module.exports.run = function(opts, cb) {
options.validateSync(opts, descriptor);
if (opts.debug) {
console.log('createProduct: %j', opts);
}
var request = defaults.defaultRequest(opts);
createProduct(opts, request, function(err, results) {
if (err) {
cb(err);
} else {
if (opts.debug) {
console.log('results: %j', results);
}
cb(undefined, results);
}
});
};
function | (opts,request,done){
var product = {
"approvalType": "auto",
"attributes":
[ {"name": "access", "value": "public"} ],
"scopes": []
}
product.name = opts.productName
product.displayName = opts.productName
product.description = opts.productDesc
product.proxies = []
if(opts.proxies){
var split = opts.proxies.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.proxies.push(s.trim())
}
})
}
product.apiResources = []
if(opts.apiResources){
var split = opts.apiResources.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.apiResources.push(s.trim())
}
})
}
if(opts.scopes){
var split = opts.scopes.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.scopes.push(s.trim())
}
})
}
product.environments = []
if(opts.environments){
var split = opts.environments.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.environments.push(s.trim())
}
})
}
if(opts.quota && opts.quotaInterval && opts.quotaTimeUnit){
product.quota = opts.quota
product.quotaInterval = opts.quotaInterval
product.quotaTimeUnit = opts.quotaTimeUnit
}
var uri = util.format('%s/v1/o/%s/apiproducts', opts.baseuri, opts.organization);
request({
uri: uri,
method:'POST',
body: product,
json:true
},function(err,res,body){
var jsonBody = body
if(err){
if (opts.debug) {
console.log('Error occured %s', err);
}
done(err)
}else if (res.statusCode === 201) {
if (opts.verbose) {
console.log('Create successful');
}
if (opts.debug) {
console.log('%s', body);
}
done(undefined, jsonBody);
}else {
if (opts.verbose) {
console.error('Create Product result: %j', body);
}
var errMsg;
if (jsonBody && (jsonBody.message)) {
errMsg = jsonBody.message;
} else {
errMsg = util.format('Create Product failed with status code %d', res.statusCode);
}
done(new Error(errMsg));
}
})
}
| createProduct | identifier_name |
createproduct.js | /* jshint node: true */
'use strict';
var util = require('util');
var _ = require('underscore');
var defaults = require('../defaults');
var options = require('../options');
var descriptor = defaults.defaultDescriptor({
'productName': {
name: 'Product Name',
required: true
},
'productDesc': {
name: 'Description'
},
'proxies': {
name: 'API Proxies',
required: true
},
'environments':{
name: 'Environments',
required: true
},
'approvalType': {
name: 'Approval Type',
required: true
},
'quota' : {
name: 'Quota',
},
'quotaInterval':{
name: 'Quota Interval'
},
'quotaTimeUnit': {
name:'Quota Time Unit'
},
'scopes': {
name: "Scope",
}
});
module.exports.descriptor = descriptor;
module.exports.run = function(opts, cb) {
options.validateSync(opts, descriptor);
if (opts.debug) {
console.log('createProduct: %j', opts);
}
var request = defaults.defaultRequest(opts);
createProduct(opts, request, function(err, results) {
if (err) {
cb(err);
} else {
if (opts.debug) {
console.log('results: %j', results);
}
cb(undefined, results);
}
});
};
function createProduct(opts,request,done) | product.apiResources = []
if(opts.apiResources){
var split = opts.apiResources.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.apiResources.push(s.trim())
}
})
}
if(opts.scopes){
var split = opts.scopes.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.scopes.push(s.trim())
}
})
}
product.environments = []
if(opts.environments){
var split = opts.environments.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.environments.push(s.trim())
}
})
}
if(opts.quota && opts.quotaInterval && opts.quotaTimeUnit){
product.quota = opts.quota
product.quotaInterval = opts.quotaInterval
product.quotaTimeUnit = opts.quotaTimeUnit
}
var uri = util.format('%s/v1/o/%s/apiproducts', opts.baseuri, opts.organization);
request({
uri: uri,
method:'POST',
body: product,
json:true
},function(err,res,body){
var jsonBody = body
if(err){
if (opts.debug) {
console.log('Error occured %s', err);
}
done(err)
}else if (res.statusCode === 201) {
if (opts.verbose) {
console.log('Create successful');
}
if (opts.debug) {
console.log('%s', body);
}
done(undefined, jsonBody);
}else {
if (opts.verbose) {
console.error('Create Product result: %j', body);
}
var errMsg;
if (jsonBody && (jsonBody.message)) {
errMsg = jsonBody.message;
} else {
errMsg = util.format('Create Product failed with status code %d', res.statusCode);
}
done(new Error(errMsg));
}
})
}
| {
var product = {
"approvalType": "auto",
"attributes":
[ {"name": "access", "value": "public"} ],
"scopes": []
}
product.name = opts.productName
product.displayName = opts.productName
product.description = opts.productDesc
product.proxies = []
if(opts.proxies){
var split = opts.proxies.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.proxies.push(s.trim())
}
})
} | identifier_body |
createproduct.js | /* jshint node: true */
'use strict';
var util = require('util');
var _ = require('underscore');
var defaults = require('../defaults');
var options = require('../options');
var descriptor = defaults.defaultDescriptor({
'productName': {
name: 'Product Name',
required: true
},
'productDesc': {
name: 'Description'
},
'proxies': {
name: 'API Proxies',
required: true
},
'environments':{
name: 'Environments',
required: true
},
'approvalType': {
name: 'Approval Type',
required: true
},
'quota' : {
name: 'Quota',
},
'quotaInterval':{
name: 'Quota Interval'
},
'quotaTimeUnit': {
name:'Quota Time Unit'
},
'scopes': {
name: "Scope",
}
});
module.exports.descriptor = descriptor;
module.exports.run = function(opts, cb) {
options.validateSync(opts, descriptor);
if (opts.debug) {
console.log('createProduct: %j', opts);
}
var request = defaults.defaultRequest(opts);
createProduct(opts, request, function(err, results) {
if (err) {
cb(err);
} else {
if (opts.debug) {
console.log('results: %j', results);
}
cb(undefined, results);
}
});
};
function createProduct(opts,request,done){
var product = {
"approvalType": "auto",
"attributes":
[ {"name": "access", "value": "public"} ],
"scopes": []
}
product.name = opts.productName
product.displayName = opts.productName
product.description = opts.productDesc
product.proxies = []
if(opts.proxies){
var split = opts.proxies.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.proxies.push(s.trim())
} | var split = opts.apiResources.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.apiResources.push(s.trim())
}
})
}
if(opts.scopes){
var split = opts.scopes.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.scopes.push(s.trim())
}
})
}
product.environments = []
if(opts.environments){
var split = opts.environments.split(',')
split.forEach(function(s){
if(s && s.trim()!= '') {
product.environments.push(s.trim())
}
})
}
if(opts.quota && opts.quotaInterval && opts.quotaTimeUnit){
product.quota = opts.quota
product.quotaInterval = opts.quotaInterval
product.quotaTimeUnit = opts.quotaTimeUnit
}
var uri = util.format('%s/v1/o/%s/apiproducts', opts.baseuri, opts.organization);
request({
uri: uri,
method:'POST',
body: product,
json:true
},function(err,res,body){
var jsonBody = body
if(err){
if (opts.debug) {
console.log('Error occured %s', err);
}
done(err)
}else if (res.statusCode === 201) {
if (opts.verbose) {
console.log('Create successful');
}
if (opts.debug) {
console.log('%s', body);
}
done(undefined, jsonBody);
}else {
if (opts.verbose) {
console.error('Create Product result: %j', body);
}
var errMsg;
if (jsonBody && (jsonBody.message)) {
errMsg = jsonBody.message;
} else {
errMsg = util.format('Create Product failed with status code %d', res.statusCode);
}
done(new Error(errMsg));
}
})
} | })
}
product.apiResources = []
if(opts.apiResources){ | random_line_split |
test_mxne_inverse.py | None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
assert_allclose(gof_split, want, atol=1e-12)
@testing.requires_testing_data
@pytest.mark.parametrize('idx, weights', [
# empirically determined approximately orthogonal columns: 0, 15157, 19448
([0], [1]),
([0, 15157], [1, 1]),
([0, 15157], [1, 3]),
([0, 15157], [5, -1]),
([0, 15157, 19448], [1, 1, 1]),
([0, 15157, 19448], [1e-2, 1, 5]),
])
def test_split_gof_meg(forward, idx, weights):
"""Test GOF splitting on MEG data."""
gain = forward['sol']['data'][:, idx]
# close to orthogonal
norms = np.linalg.norm(gain, axis=0)
triu = np.triu_indices(len(idx), 1)
prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu]
assert_array_less(prods, 5e-3) # approximately orthogonal
# first, split across time (one dipole per time point)
M = gain * weights
gof_split = _split_gof(M, np.diag(weights), gain)
assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100
assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc
# next, summed to a single time point (all dipoles active at one time pt)
weights = np.array(weights)[:, np.newaxis]
x = gain @ weights
assert x.shape == (gain.shape[0], 1)
gof_split = _split_gof(x, weights, gain)
want = (norms * weights.T).T ** 2
want = 100 * want / want.sum()
assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2)
assert_allclose(gof_split.sum(), 100, rtol=1e-5)
@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [
(10, 15, 7),
(20, 60, 20),
])
@pytest.mark.parametrize('nnz', [2, 4])
@pytest.mark.parametrize('corr', [0.75])
@pytest.mark.parametrize('n_orient', [1, 3])
def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr,
n_orient, snr=4):
"""Tests SURE criterion for automatic alpha selection on synthetic data."""
rng = np.random.RandomState(0)
sigma = np.sqrt(1 - corr ** 2)
U = rng.randn(n_sensors)
# generate gain matrix
G = np.empty([n_sensors, n_dipoles], order='F')
G[:, :n_orient] = np.expand_dims(U, axis=-1)
n_dip_per_pos = n_dipoles // n_orient
for j in range(1, n_dip_per_pos):
U *= corr
U += sigma * rng.randn(n_sensors)
G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1)
# generate coefficient matrix
support = rng.choice(n_dip_per_pos, nnz, replace=False)
X = np.zeros((n_dipoles, n_times))
for k in support:
X[k * n_orient:(k + 1) * n_orient, :] = rng.normal(
size=(n_orient, n_times))
# generate measurement matrix
M = G @ X
noise = rng.randn(n_sensors, n_times)
sigma = 1 / np.linalg.norm(noise) * np.linalg.norm(M) / snr
M += sigma * noise
# inverse modeling with sure
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15)
_, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma,
n_mxne_iter=5, maxit=3000, tol=1e-4,
n_orient=n_orient,
active_set_size=10, debias=True,
solver="auto", dgap_freq=10,
random_state=0, verbose=False)
assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def test_mxne_inverse_sure():
"""Tests SURE criterion for automatic alpha selection on MEG data."""
def data_fun(times):
data = np.zeros(times.shape)
data[times >= 0] = 50e-9
return data
n_dipoles = 2
raw = mne.io.read_raw_fif(fname_raw)
info = mne.io.read_info(fname_data)
with info._unlock():
info['projs'] = []
noise_cov = mne.make_ad_hoc_cov(info)
label_names = ['Aud-lh', 'Aud-rh']
labels = [
mne.read_label(data_path / 'MEG' / 'sample' / 'labels' / f'{ln}.label')
for ln in label_names]
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_types_forward(forward, meg="grad", eeg=False,
exclude=raw.info['bads'])
times = np.arange(100, dtype=np.float64) / raw.info['sfreq'] - 0.1
stc = simulate_sparse_stc(forward['src'], n_dipoles=n_dipoles, times=times,
random_state=1, labels=labels, data_fun=data_fun)
nave = 30
evoked = simulate_evoked(forward, stc, info, noise_cov, nave=nave,
use_cps=False, iir_filter=None)
evoked = evoked.crop(tmin=0, tmax=10e-3)
stc_ = mixed_norm(evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5,
depth=0.9)
assert_array_equal(stc_.vertices, stc.vertices)
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def | test_mxne_inverse_empty | identifier_name |
|
test_mxne_inverse.py | =True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
@pytest.mark.parametrize('mod', (
None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
assert_allclose(gof_split, want, atol=1e-12)
@testing.requires_testing_data
@pytest.mark.parametrize('idx, weights', [
# empirically determined approximately orthogonal columns: 0, 15157, 19448
([0], [1]),
([0, 15157], [1, 1]),
([0, 15157], [1, 3]),
([0, 15157], [5, -1]),
([0, 15157, 19448], [1, 1, 1]),
([0, 15157, 19448], [1e-2, 1, 5]),
])
def test_split_gof_meg(forward, idx, weights):
"""Test GOF splitting on MEG data."""
gain = forward['sol']['data'][:, idx]
# close to orthogonal
norms = np.linalg.norm(gain, axis=0)
triu = np.triu_indices(len(idx), 1)
prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu]
assert_array_less(prods, 5e-3) # approximately orthogonal
# first, split across time (one dipole per time point)
M = gain * weights
gof_split = _split_gof(M, np.diag(weights), gain)
assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100
assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc
# next, summed to a single time point (all dipoles active at one time pt)
weights = np.array(weights)[:, np.newaxis]
x = gain @ weights
assert x.shape == (gain.shape[0], 1)
gof_split = _split_gof(x, weights, gain)
want = (norms * weights.T).T ** 2
want = 100 * want / want.sum()
assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2)
assert_allclose(gof_split.sum(), 100, rtol=1e-5)
@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [
(10, 15, 7),
(20, 60, 20),
])
@pytest.mark.parametrize('nnz', [2, 4])
@pytest.mark.parametrize('corr', [0.75])
@pytest.mark.parametrize('n_orient', [1, 3])
def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr,
n_orient, snr=4):
"""Tests SURE criterion for automatic alpha selection on synthetic data."""
rng = np.random.RandomState(0)
sigma = np.sqrt(1 - corr ** 2)
U = rng.randn(n_sensors)
# generate gain matrix
G = np.empty([n_sensors, n_dipoles], order='F')
G[:, :n_orient] = np.expand_dims(U, axis=-1)
n_dip_per_pos = n_dipoles // n_orient
for j in range(1, n_dip_per_pos):
U *= corr
U += sigma * rng.randn(n_sensors)
G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1)
# generate coefficient matrix
support = rng.choice(n_dip_per_pos, nnz, replace=False)
X = np.zeros((n_dipoles, n_times))
for k in support:
X[k * n_orient:(k + 1) * n_orient, :] = rng.normal(
size=(n_orient, n_times))
# generate measurement matrix
M = G @ X
noise = rng.randn(n_sensors, n_times)
sigma = 1 / np.linalg.norm(noise) * np.linalg.norm(M) / snr
M += sigma * noise
# inverse modeling with sure
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15)
_, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma,
n_mxne_iter=5, maxit=3000, tol=1e-4,
n_orient=n_orient,
active_set_size=10, debias=True,
solver="auto", dgap_freq=10,
random_state=0, verbose=False)
assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def test_mxne_inverse_sure():
"""Tests SURE criterion for automatic alpha selection on MEG data."""
def data_fun(times):
data = np.zeros(times.shape)
data[times >= 0] = 50e-9
return data
n_dipoles = 2
raw = mne.io.read_raw_fif(fname_raw) | info = mne.io.read_info(fname_data)
with info._unlock():
info['projs'] = [] | random_line_split |
|
test_mxne_inverse.py | loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
with _record_warnings(): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
# vector
with _record_warnings(): # no convergence
stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2)
with _record_warnings(): # no convergence
stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc)
with _record_warnings(), \
pytest.raises(ValueError, match='pick_ori='):
mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2,
pick_ori='vector')
with _record_warnings(), catch_logging() as log: # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
assert_stcs_equal(stc_cd, stc_dip)
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
# Single time point things should match
with _record_warnings(), catch_logging() as log:
dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081),
forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9
gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars
assert_allclose(gof, 37.9, atol=0.1)
with _record_warnings(), catch_logging() as log:
stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert stc.data.min() < -1e-9 # signed
assert_stc_res(evoked_l21, stc, forward, res)
# irMxNE tests
with _record_warnings(), catch_logging() as log: # CD
stc, residual = mixed_norm(
evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
solver='cd', return_residual=True, pick_ori='vector', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert_stc_res(evoked_l21, stc, forward, residual)
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec, residual = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector', return_residual=True)
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., m | """Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov, | identifier_body |
|
test_mxne_inverse.py | c, residual = mixed_norm(
evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
solver='cd', return_residual=True, pick_ori='vector', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert_stc_res(evoked_l21, stc, forward, residual)
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec, residual = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector', return_residual=True)
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
with catch_logging() as log:
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, verbose=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_var_exp_log(log.getvalue(), 9, 11) # 10.2
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=[stc.vertices[0][:1]],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
@pytest.mark.parametrize('mod', (
None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
| assert_allclose(gof_split, want, atol=1e-12) | conditional_block |
|
server.js | var _ = require('lodash');
var express = require('express');
var path = require('path');
var httpProxy = require('http-proxy');
var http = require('http');
var proxy = httpProxy.createProxyServer({
changeOrigin: true,
ws: true
});
var app = express();
var isProduction = process.env.NODE_ENV === 'production';
var port = isProduction ? process.env.PORT : 3000;
var publicPath = path.resolve(__dirname, 'public');
app.use(express.static(publicPath)); |
var data = [{
"quote_collateral_bp": 5000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "USLV",
"base_principal": 50000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 5000,
"base_principal_hi": 52500000,
"quote_principal": 50000000,
"quote_principal_hi": 52500000,
"principal_lo": 47500000,
"quote_principal_lo": 47500000,
"base_multiplier": 1.00,
"base_principal_lo": 47500000,
"principal_hi": 52500000,
"swap_duration": 20563200,
"funding_source": {
"tx_id": "2435d848e74bb0486cab55f02b8c47801be36e0e410371e9ab9f57bff63129cd",
"tx_out": 0
},
"quote_asset": "DSLV",
"principal": 50000000
}, {
"quote_collateral_bp": 2000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "SDOW",
"base_principal": 15000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 2000,
"base_principal_hi": 18000000,
"quote_principal": 15000000,
"quote_principal_hi": 18000000,
"principal_lo": 12000000,
"quote_principal_lo": 12000000,
"base_multiplier": 1.00,
"base_principal_lo": 12000000,
"principal_hi": 18000000,
"swap_duration": 1209600,
"funding_source": {
"tx_id": "4b346ebbd290977e2b423a1b98dc2b3d1989b2abe841084998122d948db0adb9",
"tx_out": 0
},
"quote_asset": "UDOW",
"principal": 15000000
}, {
"quote_collateral_bp": 2000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "SDOW",
"base_principal": 15000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 2000,
"base_principal_hi": 18000000,
"quote_principal": 15000000,
"quote_principal_hi": 18000000,
"principal_lo": 12000000,
"quote_principal_lo": 12000000,
"base_multiplier": 1.00,
"base_principal_lo": 12000000,
"principal_hi": 18000000,
"swap_duration": 1209600,
"funding_source": {
"tx_id": "4b346ebbd290977e2b423a1b98dc2b3d1989b2abe841084998122d948db0adb9",
"tx_out": 0
},
"quote_asset": "UDOW",
"principal": 15000000
}];
// app.post('/data', function(req, res) {
// var rows = [];
// for (var i = 0; i < 1000; i++) {
// data.forEach(function(row) {
// var inc = Math.floor(Math.random() * 11);
// var r = _.clone(row);
// r.base_principal += inc;
// rows.push(r);
// });
// }
// res.json({
// result: {
// results: rows
// }
// });
// });
app.all('/v1', function(req, res) {
proxy.web(req, res, {
target: 'https://beta.ultra-coin.com:30051'
// target: 'http://localhost:7000'
});
});
if (!isProduction) {
var bundle = require('./server/bundle.js');
bundle();
app.all('/build/*', function(req, res) {
proxy.web(req, res, {
target: 'http://127.0.0.1:3001'
});
});
app.all('/socket.io*', function(req, res) {
proxy.web(req, res, {
target: 'http://127.0.0.1:3001'
});
});
proxy.on('error', function(e) {
// Just catch it
});
// We need to use basic HTTP service to proxy
// websocket requests from webpack
var server = http.createServer(app);
server.on('upgrade', function(req, socket, head) {
proxy.ws(req, socket, head);
});
server.listen(port, function() {
console.log('Server running on port ' + port);
});
} else {
// And run the server
app.listen(port, function() {
console.log('Server running on port ' + port);
});
} | random_line_split |
|
server.js | var _ = require('lodash');
var express = require('express');
var path = require('path');
var httpProxy = require('http-proxy');
var http = require('http');
var proxy = httpProxy.createProxyServer({
changeOrigin: true,
ws: true
});
var app = express();
var isProduction = process.env.NODE_ENV === 'production';
var port = isProduction ? process.env.PORT : 3000;
var publicPath = path.resolve(__dirname, 'public');
app.use(express.static(publicPath));
var data = [{
"quote_collateral_bp": 5000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "USLV",
"base_principal": 50000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 5000,
"base_principal_hi": 52500000,
"quote_principal": 50000000,
"quote_principal_hi": 52500000,
"principal_lo": 47500000,
"quote_principal_lo": 47500000,
"base_multiplier": 1.00,
"base_principal_lo": 47500000,
"principal_hi": 52500000,
"swap_duration": 20563200,
"funding_source": {
"tx_id": "2435d848e74bb0486cab55f02b8c47801be36e0e410371e9ab9f57bff63129cd",
"tx_out": 0
},
"quote_asset": "DSLV",
"principal": 50000000
}, {
"quote_collateral_bp": 2000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "SDOW",
"base_principal": 15000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 2000,
"base_principal_hi": 18000000,
"quote_principal": 15000000,
"quote_principal_hi": 18000000,
"principal_lo": 12000000,
"quote_principal_lo": 12000000,
"base_multiplier": 1.00,
"base_principal_lo": 12000000,
"principal_hi": 18000000,
"swap_duration": 1209600,
"funding_source": {
"tx_id": "4b346ebbd290977e2b423a1b98dc2b3d1989b2abe841084998122d948db0adb9",
"tx_out": 0
},
"quote_asset": "UDOW",
"principal": 15000000
}, {
"quote_collateral_bp": 2000,
"denom_asset": "~BTC:SATOSHIS",
"base_asset": "SDOW",
"base_principal": 15000000,
"quote_multiplier": 1.00,
"base_collateral_bp": 2000,
"base_principal_hi": 18000000,
"quote_principal": 15000000,
"quote_principal_hi": 18000000,
"principal_lo": 12000000,
"quote_principal_lo": 12000000,
"base_multiplier": 1.00,
"base_principal_lo": 12000000,
"principal_hi": 18000000,
"swap_duration": 1209600,
"funding_source": {
"tx_id": "4b346ebbd290977e2b423a1b98dc2b3d1989b2abe841084998122d948db0adb9",
"tx_out": 0
},
"quote_asset": "UDOW",
"principal": 15000000
}];
// app.post('/data', function(req, res) {
// var rows = [];
// for (var i = 0; i < 1000; i++) {
// data.forEach(function(row) {
// var inc = Math.floor(Math.random() * 11);
// var r = _.clone(row);
// r.base_principal += inc;
// rows.push(r);
// });
// }
// res.json({
// result: {
// results: rows
// }
// });
// });
app.all('/v1', function(req, res) {
proxy.web(req, res, {
target: 'https://beta.ultra-coin.com:30051'
// target: 'http://localhost:7000'
});
});
if (!isProduction) {
var bundle = require('./server/bundle.js');
bundle();
app.all('/build/*', function(req, res) {
proxy.web(req, res, {
target: 'http://127.0.0.1:3001'
});
});
app.all('/socket.io*', function(req, res) {
proxy.web(req, res, {
target: 'http://127.0.0.1:3001'
});
});
proxy.on('error', function(e) {
// Just catch it
});
// We need to use basic HTTP service to proxy
// websocket requests from webpack
var server = http.createServer(app);
server.on('upgrade', function(req, socket, head) {
proxy.ws(req, socket, head);
});
server.listen(port, function() {
console.log('Server running on port ' + port);
});
} else | {
// And run the server
app.listen(port, function() {
console.log('Server running on port ' + port);
});
} | conditional_block |
|
adminPickupBase.ts | 'use strict';
export class | {
protected $http: ng.IHttpService;
PickupUtils: IPickupUtilsService;
PickupOptionsService: IPickupOptionsService;
season: ISeason;
pickup: IPickupEvent;
pickupOptions: IPickupOption[];
userEvents: IPickupUserEvent[];
pickupEventAlternatives: IPickupEvent[];
extraInformation: {};
constructor($http, PickupUtils, PickupOptionsService) {
this.$http = $http;
this.PickupUtils = PickupUtils;
this.PickupOptionsService = PickupOptionsService;
}
public $onInit() {
let resolve = (this as any).resolve;
if (_.has(resolve, 'pickup') && resolve.pickup !== null) {
this.pickup = resolve.pickup;
this.season = resolve.season;
this.load();
} else {
this.cancel();
}
}
private load() {
let scope = this;
this.$http.get('/api/pickupUserEvents/byEvent/'+this.pickup._id)
.then(result => {
scope.userEvents = result.data as IPickupUserEvent[];
scope.userEvents = _.filter(scope.userEvents, userEvent => {
return userEvent.basket !== null;
});
_.each(scope.userEvents, userEvent => {
scope.getExtras(userEvent)
scope.calculateStartTime(userEvent);
});
scope.userEvents = _.sortBy(scope.userEvents, userEvent => {
return userEvent.basket.membership.lastName + userEvent.basket.membership.firstName;
});
this.extraInformation = this.getExtraInformation();
this.$http.get('/api/pickupEvents/alternatives/'+this.pickup._id+'/')
.then(result => {
scope.pickupEventAlternatives = result.data as IPickupEvent[];
_.each(scope.pickupEventAlternatives, alternativePickup => {
alternativePickup.startDate = scope.PickupUtils.getStartDateFor(scope.season, alternativePickup.pickupOption, alternativePickup);
});
});
});
}
protected calculateStartTime(userEvent) {
if (userEvent.pickupEventOverride) {
userEvent.pickupEventOverride.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEventOverride.pickupOption, userEvent.pickupEventOverride);
}
if (userEvent.pickupEvent) {
userEvent.pickupEvent.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEvent.pickupOption, userEvent.pickupEvent);
}
}
public getRequiredBaskets() {
let requiredBaskets = 0;
let scope = this;
_.each(this.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
requiredBaskets++;
}
});
return requiredBaskets;
}
public hasExtraInformation() {
return this.pickup.availableExtras.length>0;
}
public getExtraInformation() {
let extraInfo = {};
let scope = this;
_.each(scope.pickup.availableExtras, extra => {
let fullExtra = _.find(scope.season.availableExtras, candidate => {
return (candidate._id == extra);
});
if (fullExtra) {
extraInfo[fullExtra._id] = {
'count': 0,
'name': fullExtra.name,
'unit': fullExtra.unit
};
}
});
_.each(scope.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
_.each(userEvent.basket.extras, extra => {
if (_.has(extraInfo, extra.extra)) {
extraInfo[extra.extra]['count'] += extra.quantity;
}
});
}
});
return extraInfo;
}
getExtras(userEvent) {
let pickupExtras = _.filter(this.season.availableExtras, candidate => {
return _.find(this.pickup.availableExtras, (child) => {
return candidate._id == child;
}) != null;
});
let result = [];
_.each(pickupExtras, candidate => {
_.each(userEvent.basket.extras, (child) => {
if (candidate._id == child.extra) {
result.push(
{
'quantity': child.quantity,
'unit': candidate.unit,
'name': candidate.name
});
}
}) != null;
});
userEvent.$extras = result;
}
public ok() {
(this as any).close({$value: 'ok'});
};
public cancel() {
(this as any).dismiss({$value: 'cancel'});
};
}
| AdminPickupBase | identifier_name |
adminPickupBase.ts | 'use strict';
export class AdminPickupBase {
protected $http: ng.IHttpService;
PickupUtils: IPickupUtilsService;
PickupOptionsService: IPickupOptionsService;
season: ISeason;
pickup: IPickupEvent;
pickupOptions: IPickupOption[];
userEvents: IPickupUserEvent[];
pickupEventAlternatives: IPickupEvent[];
extraInformation: {};
constructor($http, PickupUtils, PickupOptionsService) {
this.$http = $http;
this.PickupUtils = PickupUtils;
this.PickupOptionsService = PickupOptionsService;
}
public $onInit() {
let resolve = (this as any).resolve;
if (_.has(resolve, 'pickup') && resolve.pickup !== null) {
this.pickup = resolve.pickup;
this.season = resolve.season;
this.load();
} else {
this.cancel();
}
}
private load() {
let scope = this;
this.$http.get('/api/pickupUserEvents/byEvent/'+this.pickup._id)
.then(result => {
scope.userEvents = result.data as IPickupUserEvent[];
scope.userEvents = _.filter(scope.userEvents, userEvent => {
return userEvent.basket !== null;
});
_.each(scope.userEvents, userEvent => {
scope.getExtras(userEvent)
scope.calculateStartTime(userEvent);
});
scope.userEvents = _.sortBy(scope.userEvents, userEvent => {
return userEvent.basket.membership.lastName + userEvent.basket.membership.firstName;
});
this.extraInformation = this.getExtraInformation();
this.$http.get('/api/pickupEvents/alternatives/'+this.pickup._id+'/')
.then(result => {
scope.pickupEventAlternatives = result.data as IPickupEvent[];
_.each(scope.pickupEventAlternatives, alternativePickup => {
alternativePickup.startDate = scope.PickupUtils.getStartDateFor(scope.season, alternativePickup.pickupOption, alternativePickup);
});
});
});
}
protected calculateStartTime(userEvent) {
if (userEvent.pickupEventOverride) {
userEvent.pickupEventOverride.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEventOverride.pickupOption, userEvent.pickupEventOverride);
}
if (userEvent.pickupEvent) {
userEvent.pickupEvent.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEvent.pickupOption, userEvent.pickupEvent);
}
}
public getRequiredBaskets() {
let requiredBaskets = 0;
let scope = this;
_.each(this.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
requiredBaskets++;
}
});
return requiredBaskets; | }
public getExtraInformation() {
let extraInfo = {};
let scope = this;
_.each(scope.pickup.availableExtras, extra => {
let fullExtra = _.find(scope.season.availableExtras, candidate => {
return (candidate._id == extra);
});
if (fullExtra) {
extraInfo[fullExtra._id] = {
'count': 0,
'name': fullExtra.name,
'unit': fullExtra.unit
};
}
});
_.each(scope.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
_.each(userEvent.basket.extras, extra => {
if (_.has(extraInfo, extra.extra)) {
extraInfo[extra.extra]['count'] += extra.quantity;
}
});
}
});
return extraInfo;
}
getExtras(userEvent) {
let pickupExtras = _.filter(this.season.availableExtras, candidate => {
return _.find(this.pickup.availableExtras, (child) => {
return candidate._id == child;
}) != null;
});
let result = [];
_.each(pickupExtras, candidate => {
_.each(userEvent.basket.extras, (child) => {
if (candidate._id == child.extra) {
result.push(
{
'quantity': child.quantity,
'unit': candidate.unit,
'name': candidate.name
});
}
}) != null;
});
userEvent.$extras = result;
}
public ok() {
(this as any).close({$value: 'ok'});
};
public cancel() {
(this as any).dismiss({$value: 'cancel'});
};
} | }
public hasExtraInformation() {
return this.pickup.availableExtras.length>0; | random_line_split |
adminPickupBase.ts | 'use strict';
export class AdminPickupBase {
protected $http: ng.IHttpService;
PickupUtils: IPickupUtilsService;
PickupOptionsService: IPickupOptionsService;
season: ISeason;
pickup: IPickupEvent;
pickupOptions: IPickupOption[];
userEvents: IPickupUserEvent[];
pickupEventAlternatives: IPickupEvent[];
extraInformation: {};
constructor($http, PickupUtils, PickupOptionsService) {
this.$http = $http;
this.PickupUtils = PickupUtils;
this.PickupOptionsService = PickupOptionsService;
}
public $onInit() {
let resolve = (this as any).resolve;
if (_.has(resolve, 'pickup') && resolve.pickup !== null) {
this.pickup = resolve.pickup;
this.season = resolve.season;
this.load();
} else {
this.cancel();
}
}
private load() {
let scope = this;
this.$http.get('/api/pickupUserEvents/byEvent/'+this.pickup._id)
.then(result => {
scope.userEvents = result.data as IPickupUserEvent[];
scope.userEvents = _.filter(scope.userEvents, userEvent => {
return userEvent.basket !== null;
});
_.each(scope.userEvents, userEvent => {
scope.getExtras(userEvent)
scope.calculateStartTime(userEvent);
});
scope.userEvents = _.sortBy(scope.userEvents, userEvent => {
return userEvent.basket.membership.lastName + userEvent.basket.membership.firstName;
});
this.extraInformation = this.getExtraInformation();
this.$http.get('/api/pickupEvents/alternatives/'+this.pickup._id+'/')
.then(result => {
scope.pickupEventAlternatives = result.data as IPickupEvent[];
_.each(scope.pickupEventAlternatives, alternativePickup => {
alternativePickup.startDate = scope.PickupUtils.getStartDateFor(scope.season, alternativePickup.pickupOption, alternativePickup);
});
});
});
}
protected calculateStartTime(userEvent) {
if (userEvent.pickupEventOverride) {
userEvent.pickupEventOverride.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEventOverride.pickupOption, userEvent.pickupEventOverride);
}
if (userEvent.pickupEvent) {
userEvent.pickupEvent.startDate = this.PickupUtils.getStartDateFor(this.season, userEvent.pickupEvent.pickupOption, userEvent.pickupEvent);
}
}
public getRequiredBaskets() {
let requiredBaskets = 0;
let scope = this;
_.each(this.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
requiredBaskets++;
}
});
return requiredBaskets;
}
public hasExtraInformation() {
return this.pickup.availableExtras.length>0;
}
public getExtraInformation() {
let extraInfo = {};
let scope = this;
_.each(scope.pickup.availableExtras, extra => {
let fullExtra = _.find(scope.season.availableExtras, candidate => {
return (candidate._id == extra);
});
if (fullExtra) {
extraInfo[fullExtra._id] = {
'count': 0,
'name': fullExtra.name,
'unit': fullExtra.unit
};
}
});
_.each(scope.userEvents, userEvent => {
if (((userEvent.pickupEventOverride && userEvent.pickupEventOverride._id === scope.pickup._id) ||
(!userEvent.pickupEventOverride && userEvent.pickupEvent._id === scope.pickup._id))&&
!userEvent.absent) {
_.each(userEvent.basket.extras, extra => {
if (_.has(extraInfo, extra.extra)) {
extraInfo[extra.extra]['count'] += extra.quantity;
}
});
}
});
return extraInfo;
}
getExtras(userEvent) {
let pickupExtras = _.filter(this.season.availableExtras, candidate => {
return _.find(this.pickup.availableExtras, (child) => {
return candidate._id == child;
}) != null;
});
let result = [];
_.each(pickupExtras, candidate => {
_.each(userEvent.basket.extras, (child) => {
if (candidate._id == child.extra) |
}) != null;
});
userEvent.$extras = result;
}
public ok() {
(this as any).close({$value: 'ok'});
};
public cancel() {
(this as any).dismiss({$value: 'cancel'});
};
}
| {
result.push(
{
'quantity': child.quantity,
'unit': candidate.unit,
'name': candidate.name
});
} | conditional_block |
DashboardPage.test.tsx | /react';
import { Props, UnthemedDashboardPage } from './DashboardPage';
import { Props as LazyLoaderProps } from '../dashgrid/LazyLoader';
import { Router } from 'react-router-dom';
import { locationService, setDataSourceSrv } from '@grafana/runtime';
import { DashboardModel } from '../state';
import { configureStore } from '../../../store/configureStore';
import { mockToolkitActionCreator } from 'test/core/redux/mocks';
import { DashboardInitPhase, DashboardRoutes } from 'app/types';
import { notifyApp } from 'app/core/actions';
import { selectors } from '@grafana/e2e-selectors';
import { getRouteComponentProps } from 'app/core/navigation/__mocks__/routeProps';
import { createTheme } from '@grafana/data';
import { AutoSizerProps } from 'react-virtualized-auto-sizer';
import { setDashboardSrv } from '../services/DashboardSrv';
jest.mock('app/features/dashboard/dashgrid/LazyLoader', () => {
const LazyLoader = ({ children }: Pick<LazyLoaderProps, 'children'>) => {
return <>{typeof children === 'function' ? children({ isInView: true }) : children}</>;
};
return { LazyLoader };
});
jest.mock('app/features/dashboard/components/DashboardSettings/GeneralSettings', () => {
class GeneralSettings extends React.Component<{}, {}> {
render() {
return <>general settings</>;
}
}
return { GeneralSettings };
});
jest.mock('app/features/query/components/QueryGroup', () => {
return {
QueryGroup: () => null,
};
});
jest.mock('app/core/core', () => ({
appEvents: {
subscribe: () => {
return { unsubscribe: () => {} };
},
},
}));
jest.mock('react-virtualized-auto-sizer', () => {
// The size of the children need to be small enough to be outside the view.
// So it does not trigger the query to be run by the PanelQueryRunner.
return ({ children }: AutoSizerProps) => children({ height: 1, width: 1 });
});
// the mock below gets rid of this warning from recompose:
// Warning: React.createFactory() is deprecated and will be removed in a future major release. Consider using JSX or use React.createElement() directly instead.
jest.mock('@jaegertracing/jaeger-ui-components', () => ({}));
interface ScenarioContext {
dashboard?: DashboardModel | null;
container?: HTMLElement;
mount: (propOverrides?: Partial<Props>) => void;
unmount: () => void;
props: Props;
rerender: (propOverrides?: Partial<Props>) => void;
setup: (fn: () => void) => void;
}
function getTestDashboard(overrides?: any, metaOverrides?: any): DashboardModel {
const data = Object.assign(
{
title: 'My dashboard',
panels: [
{
id: 1,
type: 'timeseries',
title: 'My panel title',
gridPos: { x: 0, y: 0, w: 1, h: 1 },
},
],
},
overrides
);
const meta = Object.assign({ canSave: true, canEdit: true }, metaOverrides);
return new DashboardModel(data, meta);
}
function dashboardPageScenario(description: string, scenarioFn: (ctx: ScenarioContext) => void) {
describe(description, () => {
let setupFn: () => void;
const ctx: ScenarioContext = {
setup: (fn) => {
setupFn = fn;
},
mount: (propOverrides?: Partial<Props>) => {
const store = configureStore();
const props: Props = {
...getRouteComponentProps({
match: { params: { slug: 'my-dash', uid: '11' } } as any,
route: { routeName: DashboardRoutes.Normal } as any,
}),
initPhase: DashboardInitPhase.NotStarted,
initError: null,
initDashboard: jest.fn(),
notifyApp: mockToolkitActionCreator(notifyApp),
cleanUpDashboardAndVariables: jest.fn(),
cancelVariables: jest.fn(),
templateVarsChangedInUrl: jest.fn(),
dashboard: null,
theme: createTheme(),
};
Object.assign(props, propOverrides);
ctx.props = props;
ctx.dashboard = props.dashboard;
const { container, rerender, unmount } = render(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
ctx.container = container;
ctx.rerender = (newProps?: Partial<Props>) => {
Object.assign(props, newProps);
rerender(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
};
ctx.unmount = unmount;
},
props: {} as Props,
rerender: () => {},
unmount: () => {},
};
beforeEach(() => {
setupFn();
});
scenarioFn(ctx);
});
}
describe('DashboardPage', () => {
dashboardPageScenario('Given initial state', (ctx) => {
ctx.setup(() => {
ctx.mount();
});
it('Should call initDashboard on mount', () => {
expect(ctx.props.initDashboard).toBeCalledWith({
fixUrl: true,
routeName: 'normal-dashboard',
urlSlug: 'my-dash',
urlUid: '11',
});
});
});
dashboardPageScenario('Given a simple dashboard', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
});
it('Should render panels', () => {
expect(screen.getByText('My panel title')).toBeInTheDocument();
});
it('Should update title', () => {
expect(document.title).toBe('My dashboard - Grafana');
});
});
dashboardPageScenario('When going into view mode', (ctx) => {
ctx.setup(() => {
setDataSourceSrv({
get: jest.fn().mockResolvedValue({ getRef: jest.fn(), query: jest.fn().mockResolvedValue([]) }), | getCurrent: () => getTestDashboard(),
} as any);
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { viewPanel: '1' },
});
});
it('Should render panel in view mode', () => {
expect(ctx.dashboard?.panelInView).toBeDefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(true);
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(ctx.dashboard?.panelInView).toBeUndefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(false);
});
});
dashboardPageScenario('When going into edit mode', (ctx) => {
ctx.setup(() => {
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { editPanel: '1' },
});
});
it('Should render panel in edit mode', () => {
expect(ctx.dashboard?.panelInEdit).toBeDefined();
});
it('Should render panel editor', () => {
expect(screen.getByTitle('Apply changes and go back to dashboard')).toBeInTheDocument();
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(screen.queryByTitle('Apply changes and go back to dashboard')).not.toBeInTheDocument();
});
});
dashboardPageScenario('When dashboard unmounts', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.unmount();
});
it('Should call close action', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
});
});
dashboardPageScenario('When dashboard changes', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.rerender({
match: {
params: { uid: 'new-uid' },
} as any,
dashboard: getTestDashboard({ title: 'Another dashboard' }),
});
});
it('Should call clean up action and init', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
expect(ctx.props.initDashboard).toHaveBeenCalledTimes(2);
});
});
dashboardPageScenario('No kiosk mode tv', (ctx) => {
ctx.setup(() => {
ctx.mount({ dashboard: getTestDashboard() });
ctx.rerender({ dashboard: ctx.dashboard });
});
it('should render dashboard page toolbar and submenu', () => {
expect(screen.queryAllByTestId(selectors.pages.Dashboard.DashNav.navV2)).toHaveLength(1);
expect(screen.queryAllByLabelText(selectors.pages.Dashboard.SubMenu.submenu)).toHaveLength(1);
});
});
dashboardPageScenario('When in full kiosk mode', (ctx) => {
ctx.setup(() => {
locationService.partial({ kiosk: true });
ctx.mount({
queryParams: {},
dashboard: getTestDashboard(),
});
ctx.rerender({ dashboard: ctx.dashboard });
});
it('should | getInstanceSettings: jest.fn().mockReturnValue({ meta: {} }),
getList: jest.fn(),
reload: jest.fn(),
});
setDashboardSrv({ | random_line_split |
DashboardPage.test.tsx | /react';
import { Props, UnthemedDashboardPage } from './DashboardPage';
import { Props as LazyLoaderProps } from '../dashgrid/LazyLoader';
import { Router } from 'react-router-dom';
import { locationService, setDataSourceSrv } from '@grafana/runtime';
import { DashboardModel } from '../state';
import { configureStore } from '../../../store/configureStore';
import { mockToolkitActionCreator } from 'test/core/redux/mocks';
import { DashboardInitPhase, DashboardRoutes } from 'app/types';
import { notifyApp } from 'app/core/actions';
import { selectors } from '@grafana/e2e-selectors';
import { getRouteComponentProps } from 'app/core/navigation/__mocks__/routeProps';
import { createTheme } from '@grafana/data';
import { AutoSizerProps } from 'react-virtualized-auto-sizer';
import { setDashboardSrv } from '../services/DashboardSrv';
jest.mock('app/features/dashboard/dashgrid/LazyLoader', () => {
const LazyLoader = ({ children }: Pick<LazyLoaderProps, 'children'>) => {
return <>{typeof children === 'function' ? children({ isInView: true }) : children}</>;
};
return { LazyLoader };
});
jest.mock('app/features/dashboard/components/DashboardSettings/GeneralSettings', () => {
class GeneralSettings extends React.Component<{}, {}> {
render() {
return <>general settings</>;
}
}
return { GeneralSettings };
});
jest.mock('app/features/query/components/QueryGroup', () => {
return {
QueryGroup: () => null,
};
});
jest.mock('app/core/core', () => ({
appEvents: {
subscribe: () => {
return { unsubscribe: () => {} };
},
},
}));
jest.mock('react-virtualized-auto-sizer', () => {
// The size of the children need to be small enough to be outside the view.
// So it does not trigger the query to be run by the PanelQueryRunner.
return ({ children }: AutoSizerProps) => children({ height: 1, width: 1 });
});
// the mock below gets rid of this warning from recompose:
// Warning: React.createFactory() is deprecated and will be removed in a future major release. Consider using JSX or use React.createElement() directly instead.
jest.mock('@jaegertracing/jaeger-ui-components', () => ({}));
interface ScenarioContext {
dashboard?: DashboardModel | null;
container?: HTMLElement;
mount: (propOverrides?: Partial<Props>) => void;
unmount: () => void;
props: Props;
rerender: (propOverrides?: Partial<Props>) => void;
setup: (fn: () => void) => void;
}
function getTestDashboard(overrides?: any, metaOverrides?: any): DashboardModel {
const data = Object.assign(
{
title: 'My dashboard',
panels: [
{
id: 1,
type: 'timeseries',
title: 'My panel title',
gridPos: { x: 0, y: 0, w: 1, h: 1 },
},
],
},
overrides
);
const meta = Object.assign({ canSave: true, canEdit: true }, metaOverrides);
return new DashboardModel(data, meta);
}
function | (description: string, scenarioFn: (ctx: ScenarioContext) => void) {
describe(description, () => {
let setupFn: () => void;
const ctx: ScenarioContext = {
setup: (fn) => {
setupFn = fn;
},
mount: (propOverrides?: Partial<Props>) => {
const store = configureStore();
const props: Props = {
...getRouteComponentProps({
match: { params: { slug: 'my-dash', uid: '11' } } as any,
route: { routeName: DashboardRoutes.Normal } as any,
}),
initPhase: DashboardInitPhase.NotStarted,
initError: null,
initDashboard: jest.fn(),
notifyApp: mockToolkitActionCreator(notifyApp),
cleanUpDashboardAndVariables: jest.fn(),
cancelVariables: jest.fn(),
templateVarsChangedInUrl: jest.fn(),
dashboard: null,
theme: createTheme(),
};
Object.assign(props, propOverrides);
ctx.props = props;
ctx.dashboard = props.dashboard;
const { container, rerender, unmount } = render(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
ctx.container = container;
ctx.rerender = (newProps?: Partial<Props>) => {
Object.assign(props, newProps);
rerender(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
};
ctx.unmount = unmount;
},
props: {} as Props,
rerender: () => {},
unmount: () => {},
};
beforeEach(() => {
setupFn();
});
scenarioFn(ctx);
});
}
describe('DashboardPage', () => {
dashboardPageScenario('Given initial state', (ctx) => {
ctx.setup(() => {
ctx.mount();
});
it('Should call initDashboard on mount', () => {
expect(ctx.props.initDashboard).toBeCalledWith({
fixUrl: true,
routeName: 'normal-dashboard',
urlSlug: 'my-dash',
urlUid: '11',
});
});
});
dashboardPageScenario('Given a simple dashboard', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
});
it('Should render panels', () => {
expect(screen.getByText('My panel title')).toBeInTheDocument();
});
it('Should update title', () => {
expect(document.title).toBe('My dashboard - Grafana');
});
});
dashboardPageScenario('When going into view mode', (ctx) => {
ctx.setup(() => {
setDataSourceSrv({
get: jest.fn().mockResolvedValue({ getRef: jest.fn(), query: jest.fn().mockResolvedValue([]) }),
getInstanceSettings: jest.fn().mockReturnValue({ meta: {} }),
getList: jest.fn(),
reload: jest.fn(),
});
setDashboardSrv({
getCurrent: () => getTestDashboard(),
} as any);
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { viewPanel: '1' },
});
});
it('Should render panel in view mode', () => {
expect(ctx.dashboard?.panelInView).toBeDefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(true);
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(ctx.dashboard?.panelInView).toBeUndefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(false);
});
});
dashboardPageScenario('When going into edit mode', (ctx) => {
ctx.setup(() => {
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { editPanel: '1' },
});
});
it('Should render panel in edit mode', () => {
expect(ctx.dashboard?.panelInEdit).toBeDefined();
});
it('Should render panel editor', () => {
expect(screen.getByTitle('Apply changes and go back to dashboard')).toBeInTheDocument();
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(screen.queryByTitle('Apply changes and go back to dashboard')).not.toBeInTheDocument();
});
});
dashboardPageScenario('When dashboard unmounts', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.unmount();
});
it('Should call close action', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
});
});
dashboardPageScenario('When dashboard changes', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.rerender({
match: {
params: { uid: 'new-uid' },
} as any,
dashboard: getTestDashboard({ title: 'Another dashboard' }),
});
});
it('Should call clean up action and init', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
expect(ctx.props.initDashboard).toHaveBeenCalledTimes(2);
});
});
dashboardPageScenario('No kiosk mode tv', (ctx) => {
ctx.setup(() => {
ctx.mount({ dashboard: getTestDashboard() });
ctx.rerender({ dashboard: ctx.dashboard });
});
it('should render dashboard page toolbar and submenu', () => {
expect(screen.queryAllByTestId(selectors.pages.Dashboard.DashNav.navV2)).toHaveLength(1);
expect(screen.queryAllByLabelText(selectors.pages.Dashboard.SubMenu.submenu)).toHaveLength(1);
});
});
dashboardPageScenario('When in full kiosk mode', (ctx) => {
ctx.setup(() => {
locationService.partial({ kiosk: true });
ctx.mount({
queryParams: {},
dashboard: getTestDashboard(),
});
ctx.rerender({ dashboard: ctx.dashboard });
});
it(' | dashboardPageScenario | identifier_name |
DashboardPage.test.tsx | /react';
import { Props, UnthemedDashboardPage } from './DashboardPage';
import { Props as LazyLoaderProps } from '../dashgrid/LazyLoader';
import { Router } from 'react-router-dom';
import { locationService, setDataSourceSrv } from '@grafana/runtime';
import { DashboardModel } from '../state';
import { configureStore } from '../../../store/configureStore';
import { mockToolkitActionCreator } from 'test/core/redux/mocks';
import { DashboardInitPhase, DashboardRoutes } from 'app/types';
import { notifyApp } from 'app/core/actions';
import { selectors } from '@grafana/e2e-selectors';
import { getRouteComponentProps } from 'app/core/navigation/__mocks__/routeProps';
import { createTheme } from '@grafana/data';
import { AutoSizerProps } from 'react-virtualized-auto-sizer';
import { setDashboardSrv } from '../services/DashboardSrv';
jest.mock('app/features/dashboard/dashgrid/LazyLoader', () => {
const LazyLoader = ({ children }: Pick<LazyLoaderProps, 'children'>) => {
return <>{typeof children === 'function' ? children({ isInView: true }) : children}</>;
};
return { LazyLoader };
});
jest.mock('app/features/dashboard/components/DashboardSettings/GeneralSettings', () => {
class GeneralSettings extends React.Component<{}, {}> {
render() {
return <>general settings</>;
}
}
return { GeneralSettings };
});
jest.mock('app/features/query/components/QueryGroup', () => {
return {
QueryGroup: () => null,
};
});
jest.mock('app/core/core', () => ({
appEvents: {
subscribe: () => {
return { unsubscribe: () => {} };
},
},
}));
jest.mock('react-virtualized-auto-sizer', () => {
// The size of the children need to be small enough to be outside the view.
// So it does not trigger the query to be run by the PanelQueryRunner.
return ({ children }: AutoSizerProps) => children({ height: 1, width: 1 });
});
// the mock below gets rid of this warning from recompose:
// Warning: React.createFactory() is deprecated and will be removed in a future major release. Consider using JSX or use React.createElement() directly instead.
jest.mock('@jaegertracing/jaeger-ui-components', () => ({}));
interface ScenarioContext {
dashboard?: DashboardModel | null;
container?: HTMLElement;
mount: (propOverrides?: Partial<Props>) => void;
unmount: () => void;
props: Props;
rerender: (propOverrides?: Partial<Props>) => void;
setup: (fn: () => void) => void;
}
function getTestDashboard(overrides?: any, metaOverrides?: any): DashboardModel |
function dashboardPageScenario(description: string, scenarioFn: (ctx: ScenarioContext) => void) {
describe(description, () => {
let setupFn: () => void;
const ctx: ScenarioContext = {
setup: (fn) => {
setupFn = fn;
},
mount: (propOverrides?: Partial<Props>) => {
const store = configureStore();
const props: Props = {
...getRouteComponentProps({
match: { params: { slug: 'my-dash', uid: '11' } } as any,
route: { routeName: DashboardRoutes.Normal } as any,
}),
initPhase: DashboardInitPhase.NotStarted,
initError: null,
initDashboard: jest.fn(),
notifyApp: mockToolkitActionCreator(notifyApp),
cleanUpDashboardAndVariables: jest.fn(),
cancelVariables: jest.fn(),
templateVarsChangedInUrl: jest.fn(),
dashboard: null,
theme: createTheme(),
};
Object.assign(props, propOverrides);
ctx.props = props;
ctx.dashboard = props.dashboard;
const { container, rerender, unmount } = render(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
ctx.container = container;
ctx.rerender = (newProps?: Partial<Props>) => {
Object.assign(props, newProps);
rerender(
<Provider store={store}>
<Router history={locationService.getHistory()}>
<UnthemedDashboardPage {...props} />
</Router>
</Provider>
);
};
ctx.unmount = unmount;
},
props: {} as Props,
rerender: () => {},
unmount: () => {},
};
beforeEach(() => {
setupFn();
});
scenarioFn(ctx);
});
}
describe('DashboardPage', () => {
dashboardPageScenario('Given initial state', (ctx) => {
ctx.setup(() => {
ctx.mount();
});
it('Should call initDashboard on mount', () => {
expect(ctx.props.initDashboard).toBeCalledWith({
fixUrl: true,
routeName: 'normal-dashboard',
urlSlug: 'my-dash',
urlUid: '11',
});
});
});
dashboardPageScenario('Given a simple dashboard', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
});
it('Should render panels', () => {
expect(screen.getByText('My panel title')).toBeInTheDocument();
});
it('Should update title', () => {
expect(document.title).toBe('My dashboard - Grafana');
});
});
dashboardPageScenario('When going into view mode', (ctx) => {
ctx.setup(() => {
setDataSourceSrv({
get: jest.fn().mockResolvedValue({ getRef: jest.fn(), query: jest.fn().mockResolvedValue([]) }),
getInstanceSettings: jest.fn().mockReturnValue({ meta: {} }),
getList: jest.fn(),
reload: jest.fn(),
});
setDashboardSrv({
getCurrent: () => getTestDashboard(),
} as any);
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { viewPanel: '1' },
});
});
it('Should render panel in view mode', () => {
expect(ctx.dashboard?.panelInView).toBeDefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(true);
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(ctx.dashboard?.panelInView).toBeUndefined();
expect(ctx.dashboard?.panels[0].isViewing).toBe(false);
});
});
dashboardPageScenario('When going into edit mode', (ctx) => {
ctx.setup(() => {
ctx.mount({
dashboard: getTestDashboard(),
queryParams: { editPanel: '1' },
});
});
it('Should render panel in edit mode', () => {
expect(ctx.dashboard?.panelInEdit).toBeDefined();
});
it('Should render panel editor', () => {
expect(screen.getByTitle('Apply changes and go back to dashboard')).toBeInTheDocument();
});
it('Should reset state when leaving', () => {
ctx.rerender({ queryParams: {} });
expect(screen.queryByTitle('Apply changes and go back to dashboard')).not.toBeInTheDocument();
});
});
dashboardPageScenario('When dashboard unmounts', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.unmount();
});
it('Should call close action', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
});
});
dashboardPageScenario('When dashboard changes', (ctx) => {
ctx.setup(() => {
ctx.mount();
ctx.rerender({ dashboard: getTestDashboard() });
ctx.rerender({
match: {
params: { uid: 'new-uid' },
} as any,
dashboard: getTestDashboard({ title: 'Another dashboard' }),
});
});
it('Should call clean up action and init', () => {
expect(ctx.props.cleanUpDashboardAndVariables).toHaveBeenCalledTimes(1);
expect(ctx.props.initDashboard).toHaveBeenCalledTimes(2);
});
});
dashboardPageScenario('No kiosk mode tv', (ctx) => {
ctx.setup(() => {
ctx.mount({ dashboard: getTestDashboard() });
ctx.rerender({ dashboard: ctx.dashboard });
});
it('should render dashboard page toolbar and submenu', () => {
expect(screen.queryAllByTestId(selectors.pages.Dashboard.DashNav.navV2)).toHaveLength(1);
expect(screen.queryAllByLabelText(selectors.pages.Dashboard.SubMenu.submenu)).toHaveLength(1);
});
});
dashboardPageScenario('When in full kiosk mode', (ctx) => {
ctx.setup(() => {
locationService.partial({ kiosk: true });
ctx.mount({
queryParams: {},
dashboard: getTestDashboard(),
});
ctx.rerender({ dashboard: ctx.dashboard });
});
it | {
const data = Object.assign(
{
title: 'My dashboard',
panels: [
{
id: 1,
type: 'timeseries',
title: 'My panel title',
gridPos: { x: 0, y: 0, w: 1, h: 1 },
},
],
},
overrides
);
const meta = Object.assign({ canSave: true, canEdit: true }, metaOverrides);
return new DashboardModel(data, meta);
} | identifier_body |
autocomplete.py | """Customized autocomplete widgets"""
# Standard Library
import re
# Third Party
from dal import autocomplete
# MuckRock
from muckrock.jurisdiction.models import Jurisdiction
class MRSelect2Mixin:
"""MuckRock Model Select2 mixin"""
def __init__(self, *args, **kwargs):
attrs = {
"data-html": True,
"data-dropdown-css-class": "select2-dropdown",
"data-width": "100%",
}
attrs.update(kwargs.pop("attrs", {}))
super().__init__(*args, attrs=attrs, **kwargs)
def filter_choices_to_render(self, selected_choices):
"""Filter out non-numeric choices"""
selected_choices = [c for c in selected_choices if c.isdecimal()]
return super().filter_choices_to_render(selected_choices)
class ModelSelect2(MRSelect2Mixin, autocomplete.ModelSelect2):
"""MuckRock Model Select2"""
class ModelSelect2Multiple(MRSelect2Mixin, autocomplete.ModelSelect2Multiple):
"""MuckRock Model Select2"""
class Select2MultipleSI(MRSelect2Mixin, autocomplete.Select2Multiple):
"""MuckRock Select2 for state inclusive jurisdiction autocomplete"""
value_format = re.compile(r"\d+-(True|False)")
def filter_choices_to_render(self, selected_choices):
"""Replace self.choices with selected_choices."""
self.choices = []
for choice in selected_choices:
if not self.value_format.match(choice):
|
pk, include_local = choice.split("-")
jurisdiction = Jurisdiction.objects.get(pk=pk)
label = str(jurisdiction)
if include_local == "True":
label += " (include local)"
self.choices.append((choice, label))
| continue | conditional_block |
autocomplete.py | """Customized autocomplete widgets"""
# Standard Library
import re
# Third Party
from dal import autocomplete
# MuckRock
from muckrock.jurisdiction.models import Jurisdiction
class MRSelect2Mixin:
"""MuckRock Model Select2 mixin"""
def __init__(self, *args, **kwargs):
attrs = {
"data-html": True,
"data-dropdown-css-class": "select2-dropdown",
"data-width": "100%",
}
attrs.update(kwargs.pop("attrs", {}))
super().__init__(*args, attrs=attrs, **kwargs)
def filter_choices_to_render(self, selected_choices):
"""Filter out non-numeric choices"""
selected_choices = [c for c in selected_choices if c.isdecimal()]
return super().filter_choices_to_render(selected_choices)
class ModelSelect2(MRSelect2Mixin, autocomplete.ModelSelect2):
|
class ModelSelect2Multiple(MRSelect2Mixin, autocomplete.ModelSelect2Multiple):
"""MuckRock Model Select2"""
class Select2MultipleSI(MRSelect2Mixin, autocomplete.Select2Multiple):
"""MuckRock Select2 for state inclusive jurisdiction autocomplete"""
value_format = re.compile(r"\d+-(True|False)")
def filter_choices_to_render(self, selected_choices):
"""Replace self.choices with selected_choices."""
self.choices = []
for choice in selected_choices:
if not self.value_format.match(choice):
continue
pk, include_local = choice.split("-")
jurisdiction = Jurisdiction.objects.get(pk=pk)
label = str(jurisdiction)
if include_local == "True":
label += " (include local)"
self.choices.append((choice, label))
| """MuckRock Model Select2""" | identifier_body |
autocomplete.py | """Customized autocomplete widgets"""
# Standard Library
import re
# Third Party
from dal import autocomplete
# MuckRock
from muckrock.jurisdiction.models import Jurisdiction
class MRSelect2Mixin:
"""MuckRock Model Select2 mixin"""
def __init__(self, *args, **kwargs):
attrs = {
"data-html": True,
"data-dropdown-css-class": "select2-dropdown",
"data-width": "100%",
}
attrs.update(kwargs.pop("attrs", {}))
super().__init__(*args, attrs=attrs, **kwargs)
def filter_choices_to_render(self, selected_choices):
"""Filter out non-numeric choices"""
selected_choices = [c for c in selected_choices if c.isdecimal()]
return super().filter_choices_to_render(selected_choices)
class ModelSelect2(MRSelect2Mixin, autocomplete.ModelSelect2):
"""MuckRock Model Select2"""
class ModelSelect2Multiple(MRSelect2Mixin, autocomplete.ModelSelect2Multiple):
"""MuckRock Model Select2"""
class | (MRSelect2Mixin, autocomplete.Select2Multiple):
"""MuckRock Select2 for state inclusive jurisdiction autocomplete"""
value_format = re.compile(r"\d+-(True|False)")
def filter_choices_to_render(self, selected_choices):
"""Replace self.choices with selected_choices."""
self.choices = []
for choice in selected_choices:
if not self.value_format.match(choice):
continue
pk, include_local = choice.split("-")
jurisdiction = Jurisdiction.objects.get(pk=pk)
label = str(jurisdiction)
if include_local == "True":
label += " (include local)"
self.choices.append((choice, label))
| Select2MultipleSI | identifier_name |
autocomplete.py | """Customized autocomplete widgets"""
# Standard Library
import re
# Third Party
from dal import autocomplete
# MuckRock
from muckrock.jurisdiction.models import Jurisdiction
| attrs = {
"data-html": True,
"data-dropdown-css-class": "select2-dropdown",
"data-width": "100%",
}
attrs.update(kwargs.pop("attrs", {}))
super().__init__(*args, attrs=attrs, **kwargs)
def filter_choices_to_render(self, selected_choices):
"""Filter out non-numeric choices"""
selected_choices = [c for c in selected_choices if c.isdecimal()]
return super().filter_choices_to_render(selected_choices)
class ModelSelect2(MRSelect2Mixin, autocomplete.ModelSelect2):
"""MuckRock Model Select2"""
class ModelSelect2Multiple(MRSelect2Mixin, autocomplete.ModelSelect2Multiple):
"""MuckRock Model Select2"""
class Select2MultipleSI(MRSelect2Mixin, autocomplete.Select2Multiple):
"""MuckRock Select2 for state inclusive jurisdiction autocomplete"""
value_format = re.compile(r"\d+-(True|False)")
def filter_choices_to_render(self, selected_choices):
"""Replace self.choices with selected_choices."""
self.choices = []
for choice in selected_choices:
if not self.value_format.match(choice):
continue
pk, include_local = choice.split("-")
jurisdiction = Jurisdiction.objects.get(pk=pk)
label = str(jurisdiction)
if include_local == "True":
label += " (include local)"
self.choices.append((choice, label)) |
class MRSelect2Mixin:
"""MuckRock Model Select2 mixin"""
def __init__(self, *args, **kwargs): | random_line_split |
aggregates.rs | // Copyright 2016 Mozilla | // this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg ?e)
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test_the_without_max_or_min() {
let schema = prepopulated_schema();
let query = r#"[:find (the ?e) ?a
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
} | //
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use | random_line_split |
aggregates.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg ?e)
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test |
let schema = prepopulated_schema();
let query = r#"[:find (the ?e) ?a
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
}
| _the_without_max_or_min() { | identifier_name |
aggregates.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema | schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg ?e)
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test_the_without_max_or_min() {
let schema = prepopulated_schema();
let query = r#"[:find (the ?e) ?a
:where
[?e :foo/age ?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
}
| {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
}); | identifier_body |
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp ---------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
VERSION = (0,0,4, 'alpha', 0)
def get_version():
v |
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| ersion = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%spre-alpha' % version
else:
if VERSION[3] != 'final':
version = "%s%s" % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
return version
| identifier_body |
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp ---------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
VERSION = (0,0,4, 'alpha', 0)
def g | ):
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%spre-alpha' % version
else:
if VERSION[3] != 'final':
version = "%s%s" % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
return version
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| et_version( | identifier_name |
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp ---------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
VERSION = (0,0,4, 'alpha', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%spre-alpha' % version
else:
i | return version
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| f VERSION[3] != 'final':
version = "%s%s" % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
| conditional_block |
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp ---------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details. | # any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
VERSION = (0,0,4, 'alpha', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%spre-alpha' % version
else:
if VERSION[3] != 'final':
version = "%s%s" % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
return version
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------=== | #
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) | random_line_split |
help.js | 'use strict';
const chalk = require('chalk');
const fs = require('hexo-fs');
const pathFn = require('path');
const Promise = require('bluebird');
const COMPLETION_DIR = pathFn.join(__dirname, '../../completion');
function helpConsole(args) {
if (args.v || args.version) {
return this.call('version');
} else if (args.consoleList) {
return printConsoleList(this.extend.console.list());
} else if (typeof args.completion === 'string') {
return printCompletion(args.completion);
}
const command = args._[0];
if (typeof command === 'string' && command !== 'help') {
const c = this.extend.console.get(command);
if (c) return printHelpForCommand(this.extend.console.alias[command], c);
}
return printAllHelp(this.extend.console.list());
}
function printHelpForCommand(command, data) {
const { options } = data;
const desc = options.description || options.desc || data.description || data.desc;
console.log('Usage: hexo', command, options.usage || '');
console.log('\nDescription:');
console.log(`${desc}\n`);
if (options.arguments) printList('Arguments', options.arguments);
if (options.commands) printList('Commands', options.commands);
if (options.options) printList('Options', options.options);
return Promise.resolve();
}
function printAllHelp(list) {
const keys = Object.keys(list);
const commands = [];
const { length } = keys;
for (let i = 0; i < length; i++) {
const key = keys[i];
commands.push({
name: key,
desc: list[key].desc
});
}
console.log('Usage: hexo <command>\n');
printList('Commands', commands);
printList('Global Options', [
{name: '--config', desc: 'Specify config file instead of using _config.yml'},
{name: '--cwd', desc: 'Specify the CWD'},
{name: '--debug', desc: 'Display all verbose messages in the terminal'},
{name: '--draft', desc: 'Display draft posts'},
{name: '--safe', desc: 'Disable all plugins and scripts'},
{name: '--silent', desc: 'Hide output on console'}
]);
console.log('For more help, you can use \'hexo help [command]\' for the detailed information');
console.log('or you can check the docs:', chalk.underline('http://hexo.io/docs/'));
return Promise.resolve();
}
function | (title, list) {
list.sort((a, b) => {
const nameA = a.name;
const nameB = b.name;
if (nameA < nameB) return -1;
if (nameA > nameB) return 1;
return 0;
});
const lengths = list.map(item => item.name.length);
const maxLen = lengths.reduce((prev, current) => Math.max(prev, current));
let str = `${title}:\n`;
const { length } = list;
for (let i = 0; i < length; i++) {
const { description = list[i].desc } = list[i];
const pad = ' '.repeat(maxLen - lengths[i] + 2);
str += ` ${chalk.bold(list[i].name)}${pad}${description}`;
}
console.log(str);
return Promise.resolve();
}
function printConsoleList(list) {
console.log(Object.keys(list).join('\n'));
return Promise.resolve();
}
function printCompletion(type) {
return fs.readFile(pathFn.join(COMPLETION_DIR, type)).then(content => {
console.log(content);
});
}
module.exports = helpConsole;
| printList | identifier_name |
help.js | 'use strict';
const chalk = require('chalk');
const fs = require('hexo-fs');
const pathFn = require('path');
const Promise = require('bluebird');
const COMPLETION_DIR = pathFn.join(__dirname, '../../completion');
function helpConsole(args) {
if (args.v || args.version) {
return this.call('version');
} else if (args.consoleList) {
return printConsoleList(this.extend.console.list());
} else if (typeof args.completion === 'string') {
return printCompletion(args.completion);
}
const command = args._[0];
if (typeof command === 'string' && command !== 'help') {
const c = this.extend.console.get(command);
if (c) return printHelpForCommand(this.extend.console.alias[command], c);
}
return printAllHelp(this.extend.console.list());
}
function printHelpForCommand(command, data) {
const { options } = data;
const desc = options.description || options.desc || data.description || data.desc;
console.log('Usage: hexo', command, options.usage || '');
console.log('\nDescription:');
console.log(`${desc}\n`);
if (options.arguments) printList('Arguments', options.arguments);
if (options.commands) printList('Commands', options.commands);
if (options.options) printList('Options', options.options);
return Promise.resolve();
}
function printAllHelp(list) | {name: '--cwd', desc: 'Specify the CWD'},
{name: '--debug', desc: 'Display all verbose messages in the terminal'},
{name: '--draft', desc: 'Display draft posts'},
{name: '--safe', desc: 'Disable all plugins and scripts'},
{name: '--silent', desc: 'Hide output on console'}
]);
console.log('For more help, you can use \'hexo help [command]\' for the detailed information');
console.log('or you can check the docs:', chalk.underline('http://hexo.io/docs/'));
return Promise.resolve();
}
function printList(title, list) {
list.sort((a, b) => {
const nameA = a.name;
const nameB = b.name;
if (nameA < nameB) return -1;
if (nameA > nameB) return 1;
return 0;
});
const lengths = list.map(item => item.name.length);
const maxLen = lengths.reduce((prev, current) => Math.max(prev, current));
let str = `${title}:\n`;
const { length } = list;
for (let i = 0; i < length; i++) {
const { description = list[i].desc } = list[i];
const pad = ' '.repeat(maxLen - lengths[i] + 2);
str += ` ${chalk.bold(list[i].name)}${pad}${description}`;
}
console.log(str);
return Promise.resolve();
}
function printConsoleList(list) {
console.log(Object.keys(list).join('\n'));
return Promise.resolve();
}
function printCompletion(type) {
return fs.readFile(pathFn.join(COMPLETION_DIR, type)).then(content => {
console.log(content);
});
}
module.exports = helpConsole;
| {
const keys = Object.keys(list);
const commands = [];
const { length } = keys;
for (let i = 0; i < length; i++) {
const key = keys[i];
commands.push({
name: key,
desc: list[key].desc
});
}
console.log('Usage: hexo <command>\n');
printList('Commands', commands);
printList('Global Options', [
{name: '--config', desc: 'Specify config file instead of using _config.yml'}, | identifier_body |
help.js | 'use strict';
const chalk = require('chalk');
const fs = require('hexo-fs');
const pathFn = require('path');
const Promise = require('bluebird');
const COMPLETION_DIR = pathFn.join(__dirname, '../../completion');
function helpConsole(args) {
if (args.v || args.version) {
return this.call('version');
} else if (args.consoleList) {
return printConsoleList(this.extend.console.list());
} else if (typeof args.completion === 'string') {
return printCompletion(args.completion);
}
const command = args._[0];
if (typeof command === 'string' && command !== 'help') {
const c = this.extend.console.get(command);
if (c) return printHelpForCommand(this.extend.console.alias[command], c);
}
return printAllHelp(this.extend.console.list());
}
function printHelpForCommand(command, data) {
const { options } = data;
const desc = options.description || options.desc || data.description || data.desc;
console.log('Usage: hexo', command, options.usage || '');
console.log('\nDescription:');
console.log(`${desc}\n`);
if (options.arguments) printList('Arguments', options.arguments);
if (options.commands) printList('Commands', options.commands);
if (options.options) printList('Options', options.options);
return Promise.resolve();
}
function printAllHelp(list) {
const keys = Object.keys(list);
const commands = [];
const { length } = keys;
for (let i = 0; i < length; i++) {
const key = keys[i];
commands.push({
name: key,
desc: list[key].desc
});
}
console.log('Usage: hexo <command>\n');
printList('Commands', commands);
printList('Global Options', [
{name: '--config', desc: 'Specify config file instead of using _config.yml'},
{name: '--cwd', desc: 'Specify the CWD'},
{name: '--debug', desc: 'Display all verbose messages in the terminal'}, | {name: '--silent', desc: 'Hide output on console'}
]);
console.log('For more help, you can use \'hexo help [command]\' for the detailed information');
console.log('or you can check the docs:', chalk.underline('http://hexo.io/docs/'));
return Promise.resolve();
}
function printList(title, list) {
list.sort((a, b) => {
const nameA = a.name;
const nameB = b.name;
if (nameA < nameB) return -1;
if (nameA > nameB) return 1;
return 0;
});
const lengths = list.map(item => item.name.length);
const maxLen = lengths.reduce((prev, current) => Math.max(prev, current));
let str = `${title}:\n`;
const { length } = list;
for (let i = 0; i < length; i++) {
const { description = list[i].desc } = list[i];
const pad = ' '.repeat(maxLen - lengths[i] + 2);
str += ` ${chalk.bold(list[i].name)}${pad}${description}`;
}
console.log(str);
return Promise.resolve();
}
function printConsoleList(list) {
console.log(Object.keys(list).join('\n'));
return Promise.resolve();
}
function printCompletion(type) {
return fs.readFile(pathFn.join(COMPLETION_DIR, type)).then(content => {
console.log(content);
});
}
module.exports = helpConsole; | {name: '--draft', desc: 'Display draft posts'},
{name: '--safe', desc: 'Disable all plugins and scripts'}, | random_line_split |
handler.js | const config = require('../../../server/config'),
Manager = require('./manager'),
manager = new Manager();
// Responsible for handling requests for sitemap files
module.exports = function handler(siteApp) {
const verifyResourceType = function verifyResourceType(req, res, next) {
if (!Object.prototype.hasOwnProperty.call(manager, req.params.resource)) |
next();
};
siteApp.get('/sitemap.xml', function sitemapXML(req, res) {
res.set({
'Cache-Control': 'public, max-age=' + config.get('caching:sitemap:maxAge'),
'Content-Type': 'text/xml'
});
res.send(manager.getIndexXml());
});
siteApp.get('/sitemap-:resource.xml', verifyResourceType, function sitemapResourceXML(req, res) {
var type = req.params.resource,
page = 1;
res.set({
'Cache-Control': 'public, max-age=' + config.get('caching:sitemap:maxAge'),
'Content-Type': 'text/xml'
});
res.send(manager.getSiteMapXml(type, page));
});
};
| {
return res.sendStatus(404);
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.