file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
index.js
import { createFilter, makeLegalIdentifier } from 'rollup-pluginutils'; export default function json(options = {}) { const filter = createFilter(options.include, options.exclude); return { name: 'json', transform(json, id) { if (id.slice(-5) !== '.json') return null; if (!filter(id)) return null; const data = JSON.parse(json); let code = ''; const ast = { type: 'Program', sourceType: 'module', start: 0, end: null, body: [] }; if (Object.prototype.toString.call(data) !== '[object Object]')
else { const indent = 'indent' in options ? options.indent : '\t'; const validKeys = []; const invalidKeys = []; Object.keys(data).forEach(key => { if (key === makeLegalIdentifier(key)) { validKeys.push(key); } else { invalidKeys.push(key); } }); let char = 0; validKeys.forEach(key => { const declarationType = options.preferConst ? 'const' : 'var'; const declaration = `export ${declarationType} ${key} = ${JSON.stringify(data[key])};`; const start = char; const end = start + declaration.length; // generate fake AST node while we're here ast.body.push({ type: 'ExportNamedDeclaration', start: char, end: char + declaration.length, declaration: { type: 'VariableDeclaration', start: start + 7, // 'export '.length end, declarations: [ { type: 'VariableDeclarator', start: start + 7 + declarationType.length + 1, // `export ${declarationType} `.length end: end - 1, id: { type: 'Identifier', start: start + 7 + declarationType.length + 1, // `export ${declarationType} `.length end: start + 7 + declarationType.length + 1 + key.length, // `export ${declarationType} ${key}`.length name: key }, init: { type: 'Literal', start: start + 7 + declarationType.length + 1 + key.length + 3, // `export ${declarationType} ${key} = `.length end: end - 1, value: null, raw: 'null' } } ], kind: declarationType }, specifiers: [], source: null }); char = end + 1; code += `${declaration}\n`; }); const defaultExportNode = { type: 'ExportDefaultDeclaration', start: char, end: null, declaration: { type: 'ObjectExpression', start: char + 15, end: null, properties: [] } }; char += 17 + indent.length; // 'export default {\n\t'.length' const defaultExportRows = validKeys .map(key => { const row = `${key}: ${key}`; const start = char; const end = start + row.length; defaultExportNode.declaration.properties.push({ type: 'Property', start, end, method: false, shorthand: false, computed: false, key: { type: 'Identifier', start, end: start + key.length, name: key }, value: { type: 'Identifier', start: start + key.length + 2, end, name: key }, kind: 'init' }); char += row.length + (2 + indent.length); // ',\n\t'.length return row; }) .concat( invalidKeys.map(key => `"${key}": ${JSON.stringify(data[key])}`) ); code += `export default {\n${indent}${defaultExportRows.join(`,\n${indent}`)}\n};`; ast.body.push(defaultExportNode); const end = code.length; defaultExportNode.declaration.end = end - 1; defaultExportNode.end = end; } ast.end = code.length; return { ast, code, map: { mappings: '' } }; } }; }
{ code = `export default ${json};`; ast.body.push({ type: 'ExportDefaultDeclaration', start: 0, end: code.length, declaration: { type: 'Literal', start: 15, end: code.length - 1, value: null, raw: 'null' } }); }
conditional_block
index.js
import { createFilter, makeLegalIdentifier } from 'rollup-pluginutils'; export default function json(options = {}) { const filter = createFilter(options.include, options.exclude); return { name: 'json', transform(json, id) { if (id.slice(-5) !== '.json') return null; if (!filter(id)) return null; const data = JSON.parse(json); let code = ''; const ast = { type: 'Program', sourceType: 'module', start: 0, end: null, body: [] }; if (Object.prototype.toString.call(data) !== '[object Object]') { code = `export default ${json};`; ast.body.push({ type: 'ExportDefaultDeclaration', start: 0, end: code.length, declaration: { type: 'Literal', start: 15, end: code.length - 1, value: null, raw: 'null' } }); } else { const indent = 'indent' in options ? options.indent : '\t'; const validKeys = []; const invalidKeys = []; Object.keys(data).forEach(key => { if (key === makeLegalIdentifier(key)) { validKeys.push(key); } else { invalidKeys.push(key); } }); let char = 0; validKeys.forEach(key => { const declarationType = options.preferConst ? 'const' : 'var'; const declaration = `export ${declarationType} ${key} = ${JSON.stringify(data[key])};`; const start = char; const end = start + declaration.length; // generate fake AST node while we're here ast.body.push({ type: 'ExportNamedDeclaration', start: char, end: char + declaration.length, declaration: { type: 'VariableDeclaration', start: start + 7, // 'export '.length end, declarations: [ { type: 'VariableDeclarator', start: start + 7 + declarationType.length + 1, // `export ${declarationType} `.length end: end - 1, id: { type: 'Identifier', start: start + 7 + declarationType.length + 1, // `export ${declarationType} `.length end: start + 7 + declarationType.length + 1 + key.length, // `export ${declarationType} ${key}`.length name: key }, init: { type: 'Literal', start: start + 7 + declarationType.length + 1 + key.length + 3, // `export ${declarationType} ${key} = `.length end: end - 1, value: null, raw: 'null' } } ], kind: declarationType
}); char = end + 1; code += `${declaration}\n`; }); const defaultExportNode = { type: 'ExportDefaultDeclaration', start: char, end: null, declaration: { type: 'ObjectExpression', start: char + 15, end: null, properties: [] } }; char += 17 + indent.length; // 'export default {\n\t'.length' const defaultExportRows = validKeys .map(key => { const row = `${key}: ${key}`; const start = char; const end = start + row.length; defaultExportNode.declaration.properties.push({ type: 'Property', start, end, method: false, shorthand: false, computed: false, key: { type: 'Identifier', start, end: start + key.length, name: key }, value: { type: 'Identifier', start: start + key.length + 2, end, name: key }, kind: 'init' }); char += row.length + (2 + indent.length); // ',\n\t'.length return row; }) .concat( invalidKeys.map(key => `"${key}": ${JSON.stringify(data[key])}`) ); code += `export default {\n${indent}${defaultExportRows.join(`,\n${indent}`)}\n};`; ast.body.push(defaultExportNode); const end = code.length; defaultExportNode.declaration.end = end - 1; defaultExportNode.end = end; } ast.end = code.length; return { ast, code, map: { mappings: '' } }; } }; }
}, specifiers: [], source: null
random_line_split
floatf.rs
//! formatter for %f %F common-notation floating-point subs use super::super::format_field::FormatField; use super::super::formatter::{InPrefix, FormatPrimitive, Formatter}; use super::float_common::{FloatAnalysis, get_primitive_dec, primitive_to_str_common}; pub struct Floatf { as_num: f64, } impl Floatf { pub fn new() -> Floatf { Floatf { as_num: 0.0 } } } impl Formatter for Floatf { fn get_primitive(&self, field: &FormatField, inprefix: &InPrefix, str_in: &str) -> Option<FormatPrimitive> { let second_field = field.second_field.unwrap_or(6) + 1; let analysis = FloatAnalysis::analyze(&str_in, inprefix, None, Some(second_field as usize), false); let f = get_primitive_dec(inprefix, &str_in[inprefix.offset..], &analysis, second_field as usize, None); Some(f) } fn
(&self, prim: &FormatPrimitive, field: FormatField) -> String { primitive_to_str_common(prim, &field) } }
primitive_to_str
identifier_name
floatf.rs
//! formatter for %f %F common-notation floating-point subs use super::super::format_field::FormatField; use super::super::formatter::{InPrefix, FormatPrimitive, Formatter}; use super::float_common::{FloatAnalysis, get_primitive_dec, primitive_to_str_common}; pub struct Floatf { as_num: f64, } impl Floatf { pub fn new() -> Floatf { Floatf { as_num: 0.0 } } } impl Formatter for Floatf { fn get_primitive(&self, field: &FormatField, inprefix: &InPrefix, str_in: &str) -> Option<FormatPrimitive> { let second_field = field.second_field.unwrap_or(6) + 1; let analysis = FloatAnalysis::analyze(&str_in, inprefix, None, Some(second_field as usize), false); let f = get_primitive_dec(inprefix, &str_in[inprefix.offset..], &analysis, second_field as usize, None); Some(f) } fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String
}
{ primitive_to_str_common(prim, &field) }
identifier_body
floatf.rs
//! formatter for %f %F common-notation floating-point subs use super::super::format_field::FormatField; use super::super::formatter::{InPrefix, FormatPrimitive, Formatter}; use super::float_common::{FloatAnalysis, get_primitive_dec, primitive_to_str_common}; pub struct Floatf { as_num: f64, } impl Floatf { pub fn new() -> Floatf { Floatf { as_num: 0.0 } } } impl Formatter for Floatf { fn get_primitive(&self, field: &FormatField, inprefix: &InPrefix, str_in: &str) -> Option<FormatPrimitive> { let second_field = field.second_field.unwrap_or(6) + 1; let analysis = FloatAnalysis::analyze(&str_in,
&str_in[inprefix.offset..], &analysis, second_field as usize, None); Some(f) } fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String { primitive_to_str_common(prim, &field) } }
inprefix, None, Some(second_field as usize), false); let f = get_primitive_dec(inprefix,
random_line_split
count_custom_vhs.py
import os, sys import json import copy import numpy as np import random from multiprocessing import Pool import ipdb ################################################################################################ utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts') source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria' #source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia' #treemap_name = 'carreras_rubro_mina' #adj_name = 'ing_total_adjmatrix' treemap_name = 'carreras_mantto_mina' adj_name = 'mantto_mina_adjmatrix' class LabelDict(dict): def __init__(self, label_names=[]): self.names = [] for name in label_names: self.add(name) def add(self, name): label_id = len(self.names) if name in self: #warnings.warn('Ignoring duplicated label ' + name) return self[name] self[name] = label_id self.names.append(name) return label_id def get_label_name(self, label_id): return self.names[label_id] def
(self, name): if name not in self: return -1 return self[name] def size(self): return len(self.names) ################################################################################################ hierarchy = json.loads(open('carreras_ing2.json').read()) # docname : {docname : true name} nameByFile = json.loads(open('ident_names2.json').read()) fileByName = {} temp={} for (file,name) in nameByFile.items(): temp[file.strip(' ')] = name.strip(' ') fileByName[name.strip(' ')] = file.strip(' ') nameByFile = dict(temp) ################################################################################################ def sorter(T,sizeById, file_dict): if "children" not in T: _id = file_dict.get_label_id(fileByName[T["name"]]) try: T["size"] = int(sizeById[_id]) except: T["size"] = sizeById[_id] return float(T["size"]) children = T["children"] temp = [] _total = 0 for child in children: subt_sum = sorter(child,sizeById, file_dict) _total += subt_sum temp.append(subt_sum) temp = list(zip(temp,range(len(children)))) temp.sort(reverse=True) T["children"] = [children[k[1]] for k in temp] return _total def getSortedLeaves(T, V,file_dict): if "children" not in T: fn = fileByName[ T["name"] ] V.append(file_dict.get_label_id(fn)) return for child in T["children"]: getSortedLeaves(child,V,file_dict) ################################################################################################ ################################################################################################ if __name__=='__main__': vh_dict = LabelDict() file_dict = LabelDict() graph = np.zeros([30,30]) vhsByFile = [set() for i in range(30)] freq_major = np.zeros([30]) for root,dirs,filenames in os.walk(source_vh_dir): for f in filenames: if f[-1]!='~': #file_name = f[3:] # vh_name #if file_name=='all' or file_name=='ing': # continue p = f.find('_mineria') #p = f.find('_mantto_mineria') file_name = f[3:p] # vh_name_mineria #file_name = f[14:] # mantto_min_vh_name id_file = file_dict.add(file_name) for line in open(os.path.join(source_vh_dir,f)): line = line.strip('\n') if line!='': id_vh = vh_dict.add(line) freq_major[id_file]+=1 vhsByFile[id_file].add(id_vh) count_id_vh = vh_dict.size() count_id_file = file_dict.size() print(count_id_vh) print(count_id_file) ipdb.set_trace() # node for k in range(count_id_file): # posible edges outgoing = set() for i in range(count_id_file): if k!=i: temp = vhsByFile[k] & vhsByFile[i] graph[k,i] = len(temp) outgoing |= temp graph[k,k] = freq_major[k] - len(outgoing) # GENERATE CARRERAS.JSON tot = sorter(hierarchy,freq_major,file_dict) open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2)) per_hierarchy = dict(hierarchy) temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh] tot = sorter(per_hierarchy,temp,file_dict) open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2)) # GENERATE ADJMATRIX.JSON sorted_ids = [] getSortedLeaves(hierarchy,sorted_ids,file_dict) adjmatrix = [] for k in sorted_ids: if freq_major[k]==0: continue u = file_dict.get_label_name(k) item = dict() item["name"] = nameByFile[u] item["size"] = int(freq_major[k]) item["imports"] = [] for i in sorted_ids: if graph[k,i]>0: v = file_dict.get_label_name(i) imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])}) item["imports"].append(imp) adjmatrix.append(item) open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
get_label_id
identifier_name
count_custom_vhs.py
import os, sys import json import copy import numpy as np import random from multiprocessing import Pool import ipdb ################################################################################################ utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts') source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria' #source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia' #treemap_name = 'carreras_rubro_mina' #adj_name = 'ing_total_adjmatrix' treemap_name = 'carreras_mantto_mina' adj_name = 'mantto_mina_adjmatrix' class LabelDict(dict): def __init__(self, label_names=[]): self.names = [] for name in label_names: self.add(name) def add(self, name): label_id = len(self.names) if name in self: #warnings.warn('Ignoring duplicated label ' + name) return self[name] self[name] = label_id self.names.append(name) return label_id def get_label_name(self, label_id): return self.names[label_id] def get_label_id(self, name): if name not in self: return -1 return self[name] def size(self): return len(self.names) ################################################################################################ hierarchy = json.loads(open('carreras_ing2.json').read()) # docname : {docname : true name} nameByFile = json.loads(open('ident_names2.json').read()) fileByName = {} temp={} for (file,name) in nameByFile.items(): temp[file.strip(' ')] = name.strip(' ') fileByName[name.strip(' ')] = file.strip(' ') nameByFile = dict(temp) ################################################################################################ def sorter(T,sizeById, file_dict): if "children" not in T: _id = file_dict.get_label_id(fileByName[T["name"]]) try: T["size"] = int(sizeById[_id]) except: T["size"] = sizeById[_id] return float(T["size"]) children = T["children"] temp = [] _total = 0 for child in children: subt_sum = sorter(child,sizeById, file_dict) _total += subt_sum temp.append(subt_sum) temp = list(zip(temp,range(len(children)))) temp.sort(reverse=True) T["children"] = [children[k[1]] for k in temp] return _total def getSortedLeaves(T, V,file_dict): if "children" not in T: fn = fileByName[ T["name"] ] V.append(file_dict.get_label_id(fn)) return for child in T["children"]: getSortedLeaves(child,V,file_dict) ################################################################################################ ################################################################################################ if __name__=='__main__': vh_dict = LabelDict() file_dict = LabelDict() graph = np.zeros([30,30]) vhsByFile = [set() for i in range(30)] freq_major = np.zeros([30]) for root,dirs,filenames in os.walk(source_vh_dir): for f in filenames: if f[-1]!='~': #file_name = f[3:] # vh_name #if file_name=='all' or file_name=='ing': # continue p = f.find('_mineria') #p = f.find('_mantto_mineria') file_name = f[3:p] # vh_name_mineria #file_name = f[14:] # mantto_min_vh_name id_file = file_dict.add(file_name) for line in open(os.path.join(source_vh_dir,f)): line = line.strip('\n') if line!='': id_vh = vh_dict.add(line) freq_major[id_file]+=1 vhsByFile[id_file].add(id_vh) count_id_vh = vh_dict.size() count_id_file = file_dict.size() print(count_id_vh) print(count_id_file) ipdb.set_trace() # node for k in range(count_id_file): # posible edges outgoing = set() for i in range(count_id_file): if k!=i: temp = vhsByFile[k] & vhsByFile[i] graph[k,i] = len(temp) outgoing |= temp graph[k,k] = freq_major[k] - len(outgoing) # GENERATE CARRERAS.JSON tot = sorter(hierarchy,freq_major,file_dict) open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2)) per_hierarchy = dict(hierarchy) temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh] tot = sorter(per_hierarchy,temp,file_dict) open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2)) # GENERATE ADJMATRIX.JSON sorted_ids = [] getSortedLeaves(hierarchy,sorted_ids,file_dict) adjmatrix = [] for k in sorted_ids: if freq_major[k]==0: continue u = file_dict.get_label_name(k) item = dict() item["name"] = nameByFile[u] item["size"] = int(freq_major[k]) item["imports"] = [] for i in sorted_ids:
adjmatrix.append(item) open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
if graph[k,i]>0: v = file_dict.get_label_name(i) imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])}) item["imports"].append(imp)
conditional_block
count_custom_vhs.py
import os, sys import json import copy import numpy as np import random from multiprocessing import Pool import ipdb ################################################################################################ utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts') source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria' #source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia' #treemap_name = 'carreras_rubro_mina' #adj_name = 'ing_total_adjmatrix' treemap_name = 'carreras_mantto_mina' adj_name = 'mantto_mina_adjmatrix' class LabelDict(dict): def __init__(self, label_names=[]): self.names = [] for name in label_names: self.add(name) def add(self, name): label_id = len(self.names) if name in self: #warnings.warn('Ignoring duplicated label ' + name) return self[name] self[name] = label_id self.names.append(name) return label_id def get_label_name(self, label_id): return self.names[label_id] def get_label_id(self, name): if name not in self: return -1 return self[name] def size(self): return len(self.names) ################################################################################################ hierarchy = json.loads(open('carreras_ing2.json').read()) # docname : {docname : true name} nameByFile = json.loads(open('ident_names2.json').read()) fileByName = {} temp={} for (file,name) in nameByFile.items(): temp[file.strip(' ')] = name.strip(' ') fileByName[name.strip(' ')] = file.strip(' ') nameByFile = dict(temp) ################################################################################################ def sorter(T,sizeById, file_dict): if "children" not in T: _id = file_dict.get_label_id(fileByName[T["name"]]) try: T["size"] = int(sizeById[_id]) except: T["size"] = sizeById[_id] return float(T["size"]) children = T["children"] temp = [] _total = 0 for child in children: subt_sum = sorter(child,sizeById, file_dict) _total += subt_sum temp.append(subt_sum) temp = list(zip(temp,range(len(children)))) temp.sort(reverse=True) T["children"] = [children[k[1]] for k in temp] return _total def getSortedLeaves(T, V,file_dict): if "children" not in T: fn = fileByName[ T["name"] ] V.append(file_dict.get_label_id(fn)) return for child in T["children"]: getSortedLeaves(child,V,file_dict) ################################################################################################ ################################################################################################ if __name__=='__main__': vh_dict = LabelDict() file_dict = LabelDict() graph = np.zeros([30,30]) vhsByFile = [set() for i in range(30)] freq_major = np.zeros([30]) for root,dirs,filenames in os.walk(source_vh_dir): for f in filenames: if f[-1]!='~': #file_name = f[3:] # vh_name #if file_name=='all' or file_name=='ing': # continue p = f.find('_mineria') #p = f.find('_mantto_mineria') file_name = f[3:p] # vh_name_mineria #file_name = f[14:] # mantto_min_vh_name id_file = file_dict.add(file_name) for line in open(os.path.join(source_vh_dir,f)): line = line.strip('\n') if line!='': id_vh = vh_dict.add(line) freq_major[id_file]+=1 vhsByFile[id_file].add(id_vh) count_id_vh = vh_dict.size() count_id_file = file_dict.size() print(count_id_vh)
print(count_id_file) ipdb.set_trace() # node for k in range(count_id_file): # posible edges outgoing = set() for i in range(count_id_file): if k!=i: temp = vhsByFile[k] & vhsByFile[i] graph[k,i] = len(temp) outgoing |= temp graph[k,k] = freq_major[k] - len(outgoing) # GENERATE CARRERAS.JSON tot = sorter(hierarchy,freq_major,file_dict) open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2)) per_hierarchy = dict(hierarchy) temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh] tot = sorter(per_hierarchy,temp,file_dict) open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2)) # GENERATE ADJMATRIX.JSON sorted_ids = [] getSortedLeaves(hierarchy,sorted_ids,file_dict) adjmatrix = [] for k in sorted_ids: if freq_major[k]==0: continue u = file_dict.get_label_name(k) item = dict() item["name"] = nameByFile[u] item["size"] = int(freq_major[k]) item["imports"] = [] for i in sorted_ids: if graph[k,i]>0: v = file_dict.get_label_name(i) imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])}) item["imports"].append(imp) adjmatrix.append(item) open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
random_line_split
count_custom_vhs.py
import os, sys import json import copy import numpy as np import random from multiprocessing import Pool import ipdb ################################################################################################ utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts') source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria' #source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia' #treemap_name = 'carreras_rubro_mina' #adj_name = 'ing_total_adjmatrix' treemap_name = 'carreras_mantto_mina' adj_name = 'mantto_mina_adjmatrix' class LabelDict(dict): def __init__(self, label_names=[]): self.names = [] for name in label_names: self.add(name) def add(self, name): label_id = len(self.names) if name in self: #warnings.warn('Ignoring duplicated label ' + name) return self[name] self[name] = label_id self.names.append(name) return label_id def get_label_name(self, label_id): return self.names[label_id] def get_label_id(self, name):
def size(self): return len(self.names) ################################################################################################ hierarchy = json.loads(open('carreras_ing2.json').read()) # docname : {docname : true name} nameByFile = json.loads(open('ident_names2.json').read()) fileByName = {} temp={} for (file,name) in nameByFile.items(): temp[file.strip(' ')] = name.strip(' ') fileByName[name.strip(' ')] = file.strip(' ') nameByFile = dict(temp) ################################################################################################ def sorter(T,sizeById, file_dict): if "children" not in T: _id = file_dict.get_label_id(fileByName[T["name"]]) try: T["size"] = int(sizeById[_id]) except: T["size"] = sizeById[_id] return float(T["size"]) children = T["children"] temp = [] _total = 0 for child in children: subt_sum = sorter(child,sizeById, file_dict) _total += subt_sum temp.append(subt_sum) temp = list(zip(temp,range(len(children)))) temp.sort(reverse=True) T["children"] = [children[k[1]] for k in temp] return _total def getSortedLeaves(T, V,file_dict): if "children" not in T: fn = fileByName[ T["name"] ] V.append(file_dict.get_label_id(fn)) return for child in T["children"]: getSortedLeaves(child,V,file_dict) ################################################################################################ ################################################################################################ if __name__=='__main__': vh_dict = LabelDict() file_dict = LabelDict() graph = np.zeros([30,30]) vhsByFile = [set() for i in range(30)] freq_major = np.zeros([30]) for root,dirs,filenames in os.walk(source_vh_dir): for f in filenames: if f[-1]!='~': #file_name = f[3:] # vh_name #if file_name=='all' or file_name=='ing': # continue p = f.find('_mineria') #p = f.find('_mantto_mineria') file_name = f[3:p] # vh_name_mineria #file_name = f[14:] # mantto_min_vh_name id_file = file_dict.add(file_name) for line in open(os.path.join(source_vh_dir,f)): line = line.strip('\n') if line!='': id_vh = vh_dict.add(line) freq_major[id_file]+=1 vhsByFile[id_file].add(id_vh) count_id_vh = vh_dict.size() count_id_file = file_dict.size() print(count_id_vh) print(count_id_file) ipdb.set_trace() # node for k in range(count_id_file): # posible edges outgoing = set() for i in range(count_id_file): if k!=i: temp = vhsByFile[k] & vhsByFile[i] graph[k,i] = len(temp) outgoing |= temp graph[k,k] = freq_major[k] - len(outgoing) # GENERATE CARRERAS.JSON tot = sorter(hierarchy,freq_major,file_dict) open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2)) per_hierarchy = dict(hierarchy) temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh] tot = sorter(per_hierarchy,temp,file_dict) open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2)) # GENERATE ADJMATRIX.JSON sorted_ids = [] getSortedLeaves(hierarchy,sorted_ids,file_dict) adjmatrix = [] for k in sorted_ids: if freq_major[k]==0: continue u = file_dict.get_label_name(k) item = dict() item["name"] = nameByFile[u] item["size"] = int(freq_major[k]) item["imports"] = [] for i in sorted_ids: if graph[k,i]>0: v = file_dict.get_label_name(i) imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])}) item["imports"].append(imp) adjmatrix.append(item) open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
if name not in self: return -1 return self[name]
identifier_body
containers.py
#!/usr/bin/python # This file is part of PARPG. # PARPG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PARPG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PARPG. If not, see <http://www.gnu.org/licenses/>. """Containes classes defining concrete container game objects like crates, barrels, chests, etc.""" __all__ = ["WoodenCrate",] from composed import ImmovableContainer class WoodenCrate (ImmovableContainer):
def __init__ (self, ID, name = 'Wooden Crate', \ text = 'A battered crate', gfx = 'crate', **kwargs): ImmovableContainer.__init__(self, ID = ID, name = name, gfx = gfx, \ text = text, **kwargs)
identifier_body
containers.py
#!/usr/bin/python # This file is part of PARPG. # PARPG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PARPG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PARPG. If not, see <http://www.gnu.org/licenses/>. """Containes classes defining concrete container game objects like crates,
barrels, chests, etc.""" __all__ = ["WoodenCrate",] from composed import ImmovableContainer class WoodenCrate (ImmovableContainer): def __init__ (self, ID, name = 'Wooden Crate', \ text = 'A battered crate', gfx = 'crate', **kwargs): ImmovableContainer.__init__(self, ID = ID, name = name, gfx = gfx, \ text = text, **kwargs)
random_line_split
containers.py
#!/usr/bin/python # This file is part of PARPG. # PARPG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PARPG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PARPG. If not, see <http://www.gnu.org/licenses/>. """Containes classes defining concrete container game objects like crates, barrels, chests, etc.""" __all__ = ["WoodenCrate",] from composed import ImmovableContainer class
(ImmovableContainer): def __init__ (self, ID, name = 'Wooden Crate', \ text = 'A battered crate', gfx = 'crate', **kwargs): ImmovableContainer.__init__(self, ID = ID, name = name, gfx = gfx, \ text = text, **kwargs)
WoodenCrate
identifier_name
index.d.ts
// Generated by typings // Source: https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/56295f5058cac7ae458540423c50ac2dcf9fc711/log4js/log4js.d.ts // Type definitions for log4js // Project: https://github.com/nomiddlename/log4js-node // Definitions by: Kentaro Okuno <http://github.com/armorik83> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped declare module "log4js" { import express = require('express'); /** * Replaces the console * @param logger * @returns void */ export function replaceConsole(logger?: Logger): void; /** * Restores the console * @returns void */ export function restoreConsole(): void; /** * Get a logger instance. Instance is cached on categoryName level. * * @param {String} [categoryName] name of category to log to. * @returns {Logger} instance of logger for the category * @static */ export function getLogger(categoryName?: string): Logger; export function getBufferedLogger(categoryName?: string): Logger; /** * Has a logger instance cached on categoryName. * * @param {String} [categoryName] name of category to log to. * @returns {boolean} contains logger for the category * @static */ export function hasLogger(categoryName: string): boolean; /** * Get the default logger instance. * * @returns {Logger} instance of default logger * @static */ export function getDefaultLogger(): Logger; /** * args are appender, then zero or more categories * * @param {*[]} appenders * @returns {void} * @static */ export function addAppender(...appenders: any[]): void; /** * Claer configured appenders * * @returns {void} * @static */ export function clearAppenders(): void; /** * Shutdown all log appenders. This will first disable all writing to appenders * and then call the shutdown function each appender. * * @params {Function} cb - The callback to be invoked once all appenders have * shutdown. If an error occurs, the callback will be given the error object * as the first argument. * @returns {void} */ export function shutdown(cb: Function): void; export function configure(filename: string, options?: any): void; export function configure(config: IConfig, options?: any): void; export function setGlobalLogLevel(level: string): void; export function setGlobalLogLevel(level: Level): void; /** * Create logger for connect middleware. * * * @returns {express.Handler} Instance of middleware. * @static */ export function connectLogger(logger: Logger, options: { format?: string; level?: string; nolog?: any; }): express.Handler; export function connectLogger(logger: Logger, options: { format?: string; level?: Level; nolog?: any; }): express.Handler; export var appenders: any; export var levels: { ALL: Level; TRACE: Level; DEBUG: Level; INFO: Level; WARN: Level; ERROR: Level; FATAL: Level; OFF: Level; toLevel(level: string, defaultLevel?: Level): Level; toLevel(level: Level, defaultLevel?: Level): Level; }; export interface Logger { setLevel(level: string): void; setLevel(level: Level): void; isLevelEnabled(level: Level): boolean; isTraceEnabled(): boolean; isDebugEnabled(): boolean; isInfoEnabled(): boolean; isWarnEnabled(): boolean; isErrorEnabled(): boolean; isFatalEnabled(): boolean; trace(message: string, ...args: any[]): void; debug(message: string, ...args: any[]): void; info(message: string, ...args: any[]): void; warn(message: string, ...args: any[]): void; error(message: string, ...args: any[]): void; fatal(message: string, ...args: any[]): void; } export interface Level { isEqualTo(other: string): boolean; isEqualTo(otherLevel: Level): boolean; isLessThanOrEqualTo(other: string): boolean; isLessThanOrEqualTo(otherLevel: Level): boolean; isGreaterThanOrEqualTo(other: string): boolean; isGreaterThanOrEqualTo(otherLevel: Level): boolean; } export interface IConfig { appenders: AppenderConfig[]; levels?: { [category: string]: string }; replaceConsole?: boolean; } export interface AppenderConfigBase { type: string; category?: string; } export interface ConsoleAppenderConfig extends AppenderConfigBase {} export interface FileAppenderConfig extends AppenderConfigBase { filename: string; } export interface DateFileAppenderConfig extends FileAppenderConfig { /** * The following strings are recognised in the pattern: * - yyyy : the full year, use yy for just the last two digits * - MM : the month * - dd : the day of the month * - hh : the hour of the day (24-hour clock) * - mm : the minute of the hour * - ss : seconds * - SSS : milliseconds (although I'm not sure you'd want to roll your logs every millisecond) * - O : timezone (capital letter o) */ pattern: string; alwaysIncludePattern: boolean; } export interface SmtpAppenderConfig extends AppenderConfigBase { /** Comma separated list of email recipients */ recipients: string; /** Sender of all emails (defaults to transport user) */ sender: string; /** Subject of all email messages (defaults to first event's message)*/ subject: string; /** * The time in seconds between sending attempts (defaults to 0). * All events are buffered and sent in one email during this time. * If 0 then every event sends an email */ sendInterval: number; SMTP: { host: string; secure: boolean; port: number; auth: { user: string; pass: string; } } } export interface HookIoAppenderConfig extends FileAppenderConfig { maxLogSize: number; backup: number; pollInterval: number; } export interface GelfAppenderConfig extends AppenderConfigBase { host: string; hostname: string; port: string; facility: string; } export interface MultiprocessAppenderConfig extends AppenderConfigBase { mode: string; loggerPort: number; loggerHost: string; facility: string; appender?: AppenderConfig; }
/** Loggly customer subdomain (use 'abc' for abc.loggly.com) */ subdomain: string; /** an array of strings to help segment your data & narrow down search results in Loggly */ tags: string[]; /** Enable JSON logging by setting to 'true' */ json: boolean; } export interface ClusteredAppenderConfig extends AppenderConfigBase { appenders?: AppenderConfig[]; } type CoreAppenderConfig = ConsoleAppenderConfig | FileAppenderConfig | DateFileAppenderConfig | SmtpAppenderConfig | HookIoAppenderConfig | GelfAppenderConfig | MultiprocessAppenderConfig | LogglyAppenderConfig | ClusteredAppenderConfig interface CustomAppenderConfig extends AppenderConfigBase { [prop: string]: any; } type AppenderConfig = CoreAppenderConfig | CustomAppenderConfig; }
export interface LogglyAppenderConfig extends AppenderConfigBase { /** Loggly customer token - https://www.loggly.com/docs/api-sending-data/ */ token: string;
random_line_split
paste.py
""" Contains a base Site class for pastebin-like sites. Each child class only needs to specify a base url, the relative url to the public pastes archive, and a lambda function to get the paste links out of the page. """ import logging import re from urllib.parse import urljoin from bs4 import BeautifulSoup import requests LOGGER = logging.getLogger(__name__) class Site(object): """ Base class for all paste sites to inherit from. """ def __init__(self, url_base, url_archive, paste_tag, target_patterns, paste): self.url_base = url_base self.url_archive = url_archive self.paste_tag = paste_tag self.target_patterns = target_patterns self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36' \ '(KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36'} self.paste = paste def get(self): """ Gets the archive page for the paste site. Returns list of links to pastes. """ req = requests.get(self.url_base+self.url_archive, headers=self.headers) LOGGER.debug('Got the response for the archive page') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(self.url_base+self.url_archive) soup = BeautifulSoup(req.text, 'lxml') links = soup.find_all(self.paste_tag) LOGGER.debug('Got %d links from page', len(links)) return [self.paste(urljoin(self.url_base, link.a.get('href'))) for link in links] def get_paste(self, paste): """ Gets the supplied paste url. Returns list of tuples of matches in paste. """ req = requests.get(paste.url, headers=self.headers) LOGGER.debug('Got response for paste') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(paste.url) found = [] for name, pattern in self.target_patterns: LOGGER.debug('Trying pattern: %s', pattern) matches = re.findall(pattern, req.text) LOGGER.debug('Got %d matches', len(matches)) if matches:
return found class Paste(object): """ Base class for pastes. Parses paste ID from supplied URL """ def __init__(self, url): _id = url.split('/')[-1] self._id = _id class PastebinPaste(Paste): """ Paste for Pastebin """ def __init__(self, url): super().__init__(url) self.url = 'http://pastebin.com/raw.php?i={}'.format(self._id) class PastiePaste(Paste): """ Paste for Pastie """ def __init__(self, url): super().__init__(url) self.url = 'http://pastie.org/pastes/{}/text'.format(self._id) class SlexyPaste(Paste): """ Paste for Slexy """ def __init__(self, url): super().__init__(url) self.url = 'http://slexy.org/raw/{}'.format(self._id) class Pastebin(Site): """ Pastebin class """ def __init__(self, target_patterns): self.url_base = 'http://pastebin.com/' self.url_archive = '/archive' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and \ '/archive/' not in tag.a['href'] and tag.a['href'][1:] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastebinPaste) class Pastie(Site): """ Pastie class """ def __init__(self, target_patterns): self.url_base = 'http://pastie.org' self.url_archive = '/pastes' self.paste_tag = lambda tag: tag.name == 'p' and tag.a and self.url_base in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastiePaste) class Slexy(Site): """ Slexy site """ def __init__(self, target_patterns): self.url_base = 'http://slexy.org' self.url_archive = '/recent' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and '/view/' in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, SlexyPaste)
found.append((name, len(matches)))
conditional_block
paste.py
""" Contains a base Site class for pastebin-like sites. Each child class only needs to specify a base url, the relative url to the public pastes archive, and a lambda function to get the paste links out of the page. """ import logging import re from urllib.parse import urljoin from bs4 import BeautifulSoup import requests LOGGER = logging.getLogger(__name__) class Site(object): """ Base class for all paste sites to inherit from. """ def __init__(self, url_base, url_archive, paste_tag, target_patterns, paste): self.url_base = url_base self.url_archive = url_archive self.paste_tag = paste_tag self.target_patterns = target_patterns self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36' \ '(KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36'} self.paste = paste def get(self): """ Gets the archive page for the paste site. Returns list of links to pastes. """ req = requests.get(self.url_base+self.url_archive, headers=self.headers) LOGGER.debug('Got the response for the archive page') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(self.url_base+self.url_archive) soup = BeautifulSoup(req.text, 'lxml') links = soup.find_all(self.paste_tag) LOGGER.debug('Got %d links from page', len(links)) return [self.paste(urljoin(self.url_base, link.a.get('href'))) for link in links] def get_paste(self, paste): """ Gets the supplied paste url. Returns list of tuples of matches in paste. """ req = requests.get(paste.url, headers=self.headers) LOGGER.debug('Got response for paste') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(paste.url) found = [] for name, pattern in self.target_patterns: LOGGER.debug('Trying pattern: %s', pattern) matches = re.findall(pattern, req.text) LOGGER.debug('Got %d matches', len(matches)) if matches: found.append((name, len(matches))) return found class Paste(object): """ Base class for pastes. Parses paste ID from supplied URL """ def __init__(self, url): _id = url.split('/')[-1] self._id = _id class PastebinPaste(Paste): """ Paste for Pastebin """ def
(self, url): super().__init__(url) self.url = 'http://pastebin.com/raw.php?i={}'.format(self._id) class PastiePaste(Paste): """ Paste for Pastie """ def __init__(self, url): super().__init__(url) self.url = 'http://pastie.org/pastes/{}/text'.format(self._id) class SlexyPaste(Paste): """ Paste for Slexy """ def __init__(self, url): super().__init__(url) self.url = 'http://slexy.org/raw/{}'.format(self._id) class Pastebin(Site): """ Pastebin class """ def __init__(self, target_patterns): self.url_base = 'http://pastebin.com/' self.url_archive = '/archive' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and \ '/archive/' not in tag.a['href'] and tag.a['href'][1:] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastebinPaste) class Pastie(Site): """ Pastie class """ def __init__(self, target_patterns): self.url_base = 'http://pastie.org' self.url_archive = '/pastes' self.paste_tag = lambda tag: tag.name == 'p' and tag.a and self.url_base in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastiePaste) class Slexy(Site): """ Slexy site """ def __init__(self, target_patterns): self.url_base = 'http://slexy.org' self.url_archive = '/recent' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and '/view/' in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, SlexyPaste)
__init__
identifier_name
paste.py
""" Contains a base Site class for pastebin-like sites. Each child class only needs to specify a base url, the relative url to the public pastes archive, and a lambda function to get the paste links out of the page. """ import logging import re from urllib.parse import urljoin from bs4 import BeautifulSoup import requests LOGGER = logging.getLogger(__name__) class Site(object): """ Base class for all paste sites to inherit from. """ def __init__(self, url_base, url_archive, paste_tag, target_patterns, paste): self.url_base = url_base self.url_archive = url_archive self.paste_tag = paste_tag self.target_patterns = target_patterns self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36' \ '(KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36'} self.paste = paste def get(self): """ Gets the archive page for the paste site. Returns list of links to pastes. """ req = requests.get(self.url_base+self.url_archive, headers=self.headers) LOGGER.debug('Got the response for the archive page') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(self.url_base+self.url_archive) soup = BeautifulSoup(req.text, 'lxml') links = soup.find_all(self.paste_tag) LOGGER.debug('Got %d links from page', len(links)) return [self.paste(urljoin(self.url_base, link.a.get('href'))) for link in links] def get_paste(self, paste): """ Gets the supplied paste url. Returns list of tuples of matches in paste. """ req = requests.get(paste.url, headers=self.headers) LOGGER.debug('Got response for paste') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(paste.url) found = [] for name, pattern in self.target_patterns: LOGGER.debug('Trying pattern: %s', pattern) matches = re.findall(pattern, req.text) LOGGER.debug('Got %d matches', len(matches)) if matches: found.append((name, len(matches))) return found class Paste(object): """ Base class for pastes. Parses paste ID from supplied URL """ def __init__(self, url): _id = url.split('/')[-1] self._id = _id class PastebinPaste(Paste): """ Paste for Pastebin """ def __init__(self, url): super().__init__(url) self.url = 'http://pastebin.com/raw.php?i={}'.format(self._id) class PastiePaste(Paste): """ Paste for Pastie """
self.url = 'http://pastie.org/pastes/{}/text'.format(self._id) class SlexyPaste(Paste): """ Paste for Slexy """ def __init__(self, url): super().__init__(url) self.url = 'http://slexy.org/raw/{}'.format(self._id) class Pastebin(Site): """ Pastebin class """ def __init__(self, target_patterns): self.url_base = 'http://pastebin.com/' self.url_archive = '/archive' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and \ '/archive/' not in tag.a['href'] and tag.a['href'][1:] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastebinPaste) class Pastie(Site): """ Pastie class """ def __init__(self, target_patterns): self.url_base = 'http://pastie.org' self.url_archive = '/pastes' self.paste_tag = lambda tag: tag.name == 'p' and tag.a and self.url_base in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastiePaste) class Slexy(Site): """ Slexy site """ def __init__(self, target_patterns): self.url_base = 'http://slexy.org' self.url_archive = '/recent' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and '/view/' in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, SlexyPaste)
def __init__(self, url): super().__init__(url)
random_line_split
paste.py
""" Contains a base Site class for pastebin-like sites. Each child class only needs to specify a base url, the relative url to the public pastes archive, and a lambda function to get the paste links out of the page. """ import logging import re from urllib.parse import urljoin from bs4 import BeautifulSoup import requests LOGGER = logging.getLogger(__name__) class Site(object): """ Base class for all paste sites to inherit from. """ def __init__(self, url_base, url_archive, paste_tag, target_patterns, paste): self.url_base = url_base self.url_archive = url_archive self.paste_tag = paste_tag self.target_patterns = target_patterns self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36' \ '(KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36'} self.paste = paste def get(self): """ Gets the archive page for the paste site. Returns list of links to pastes. """ req = requests.get(self.url_base+self.url_archive, headers=self.headers) LOGGER.debug('Got the response for the archive page') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(self.url_base+self.url_archive) soup = BeautifulSoup(req.text, 'lxml') links = soup.find_all(self.paste_tag) LOGGER.debug('Got %d links from page', len(links)) return [self.paste(urljoin(self.url_base, link.a.get('href'))) for link in links] def get_paste(self, paste): """ Gets the supplied paste url. Returns list of tuples of matches in paste. """ req = requests.get(paste.url, headers=self.headers) LOGGER.debug('Got response for paste') while req.status_code != 200: LOGGER.error('Response was not 200. Trying again...') req = requests.get(paste.url) found = [] for name, pattern in self.target_patterns: LOGGER.debug('Trying pattern: %s', pattern) matches = re.findall(pattern, req.text) LOGGER.debug('Got %d matches', len(matches)) if matches: found.append((name, len(matches))) return found class Paste(object): """ Base class for pastes. Parses paste ID from supplied URL """ def __init__(self, url): _id = url.split('/')[-1] self._id = _id class PastebinPaste(Paste): """ Paste for Pastebin """ def __init__(self, url): super().__init__(url) self.url = 'http://pastebin.com/raw.php?i={}'.format(self._id) class PastiePaste(Paste): """ Paste for Pastie """ def __init__(self, url): super().__init__(url) self.url = 'http://pastie.org/pastes/{}/text'.format(self._id) class SlexyPaste(Paste): """ Paste for Slexy """ def __init__(self, url): super().__init__(url) self.url = 'http://slexy.org/raw/{}'.format(self._id) class Pastebin(Site):
class Pastie(Site): """ Pastie class """ def __init__(self, target_patterns): self.url_base = 'http://pastie.org' self.url_archive = '/pastes' self.paste_tag = lambda tag: tag.name == 'p' and tag.a and self.url_base in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastiePaste) class Slexy(Site): """ Slexy site """ def __init__(self, target_patterns): self.url_base = 'http://slexy.org' self.url_archive = '/recent' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and '/view/' in tag.a['href'] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, SlexyPaste)
""" Pastebin class """ def __init__(self, target_patterns): self.url_base = 'http://pastebin.com/' self.url_archive = '/archive' self.paste_tag = lambda tag: tag.name == 'td' and tag.a and \ '/archive/' not in tag.a['href'] and tag.a['href'][1:] super().__init__(self.url_base, self.url_archive, self.paste_tag, target_patterns, PastebinPaste)
identifier_body
interactive.js
/** * class InteractiveBehavior < Behavior * * `Physics.behavior('interactive')`. * * User interaction helper. * * Used to get mouse/touch events and add a mouse grab interaction. * * Additional options include: * - el: The element of the renderer. What you input as the `el` for the renderer. * - moveThrottle: The min time between move events (default: `10`). * - minVel: The minimum velocity clamp [[Vectorish]] (default: { x: -5, y: -5 }) to restrict velocity a user can give to a body * - maxVel: The maximum velocity clamp [[Vectorish]] (default: { x: 5, y: 5 }) to restrict velocity a user can give to a body * * The behavior also triggers the following events on the world: * ```javascript * // a body has been grabbed * world.on('interact:grab', function( data ){ * data.x; // the x coord * data.y; // the y coord * data.body; // the body that was grabbed * }); * // no body was grabbed, but the renderer area was clicked, or touched * world.on('interact:poke', function( data ){ * data.x; // the x coord * data.y; // the y coord * }); * world.on('interact:move', function( data ){ * data.x; // the x coord * data.y; // the y coord * data.body; // the body that was grabbed (if applicable) * }); * // when the viewport is released (mouseup, touchend) * world.on('interact:release', function( data ){ * data.x; // the x coord * data.y; // the y coord * }); * ``` **/ Physics.behavior('interactive', function( parent ){ if ( !document ){ // must be in node environment return {}; } var defaults = { // the element to monitor el: null, // time between move events moveThrottle: 1000 / 100 | 0, // minimum velocity clamp minVel: { x: -5, y: -5 }, // maximum velocity clamp maxVel: { x: 5, y: 5 } } ,getElementOffset = function( el ){ var curleft = 0 ,curtop = 0 ; if (el.offsetParent) { do { curleft += el.offsetLeft; curtop += el.offsetTop; } while (el = el.offsetParent); } return { left: curleft, top: curtop }; } ,getCoords = function( e ){ var offset = getElementOffset( e.target ) ,obj = ( e.changedTouches && e.changedTouches[0] ) || e ,x = obj.pageX - offset.left
,y = obj.pageY - offset.top ; return { x: x ,y: y }; } ; return { // extended init: function( options ){ var self = this ,prevTreatment ,time ; // call parent init method parent.init.call( this ); this.options.defaults( defaults ); this.options( options ); // vars this.mousePos = new Physics.vector(); this.mousePosOld = new Physics.vector(); this.offset = new Physics.vector(); this.el = typeof this.options.el === 'string' ? document.getElementById(this.options.el) : this.options.el; if ( !this.el ){ throw "No DOM element specified"; } // init events var grab = function grab( e ){ var pos = getCoords( e ) ,body ; time = Physics.util.ticker.now(); if ( self._world ){ body = self._world.findOne({ $at: new Physics.vector( pos.x, pos.y ) }); if ( body ){ // we're trying to grab a body // fix the body in place prevTreatment = body.treatment; body.treatment = 'kinematic'; body.state.vel.zero(); body.state.angular.vel = 0; // remember the currently grabbed body self.body = body; // remember the mouse offset self.mousePos.clone( pos ); self.mousePosOld.clone( pos ); self.offset.clone( pos ).vsub( body.state.pos ); pos.body = body; self._world.emit('interact:grab', pos); } else { self._world.emit('interact:poke', pos); } } }; var move = Physics.util.throttle(function move( e ){ var pos = getCoords( e ) ,state ; if ( self.body ){ time = Physics.util.ticker.now(); self.mousePosOld.clone( self.mousePos ); // get new mouse position self.mousePos.set(pos.x, pos.y); pos.body = self.body; } self._world.emit('interact:move', pos); }, self.options.moveThrottle); var release = function release( e ){ var pos = getCoords( e ) ,body ,dt = Math.max(Physics.util.ticker.now() - time, self.options.moveThrottle) ; // get new mouse position self.mousePos.set(pos.x, pos.y); // release the body if (self.body){ self.body.treatment = prevTreatment; // calculate the release velocity self.body.state.vel.clone( self.mousePos ).vsub( self.mousePosOld ).mult( 1 / dt ); // make sure it's not too big self.body.state.vel.clamp( self.options.minVel, self.options.maxVel ); self.body = false; } if ( self._world ){ self._world.emit('interact:release', pos); } }; this.el.addEventListener('mousedown', grab); this.el.addEventListener('touchstart', grab); this.el.addEventListener('mousemove', move); this.el.addEventListener('touchmove', move); this.el.addEventListener('mouseup', release); this.el.addEventListener('touchend', release); }, // extended connect: function( world ){ // subscribe the .behave() method to the position integration step world.on('integrate:positions', this.behave, this); }, // extended disconnect: function( world ){ // unsubscribe when disconnected world.off('integrate:positions', this.behave); }, // extended behave: function( data ){ var self = this ,state ,dt = Math.max(data.dt, self.options.moveThrottle) ; if ( self.body ){ // if we have a body, we need to move it the the new mouse position. // we'll do this by adjusting the velocity so it gets there at the next step state = self.body.state; state.vel.clone( self.mousePos ).vsub( self.offset ).vsub( state.pos ).mult( 1 / dt ); } } }; });
random_line_split
interactive.js
/** * class InteractiveBehavior < Behavior * * `Physics.behavior('interactive')`. * * User interaction helper. * * Used to get mouse/touch events and add a mouse grab interaction. * * Additional options include: * - el: The element of the renderer. What you input as the `el` for the renderer. * - moveThrottle: The min time between move events (default: `10`). * - minVel: The minimum velocity clamp [[Vectorish]] (default: { x: -5, y: -5 }) to restrict velocity a user can give to a body * - maxVel: The maximum velocity clamp [[Vectorish]] (default: { x: 5, y: 5 }) to restrict velocity a user can give to a body * * The behavior also triggers the following events on the world: * ```javascript * // a body has been grabbed * world.on('interact:grab', function( data ){ * data.x; // the x coord * data.y; // the y coord * data.body; // the body that was grabbed * }); * // no body was grabbed, but the renderer area was clicked, or touched * world.on('interact:poke', function( data ){ * data.x; // the x coord * data.y; // the y coord * }); * world.on('interact:move', function( data ){ * data.x; // the x coord * data.y; // the y coord * data.body; // the body that was grabbed (if applicable) * }); * // when the viewport is released (mouseup, touchend) * world.on('interact:release', function( data ){ * data.x; // the x coord * data.y; // the y coord * }); * ``` **/ Physics.behavior('interactive', function( parent ){ if ( !document )
var defaults = { // the element to monitor el: null, // time between move events moveThrottle: 1000 / 100 | 0, // minimum velocity clamp minVel: { x: -5, y: -5 }, // maximum velocity clamp maxVel: { x: 5, y: 5 } } ,getElementOffset = function( el ){ var curleft = 0 ,curtop = 0 ; if (el.offsetParent) { do { curleft += el.offsetLeft; curtop += el.offsetTop; } while (el = el.offsetParent); } return { left: curleft, top: curtop }; } ,getCoords = function( e ){ var offset = getElementOffset( e.target ) ,obj = ( e.changedTouches && e.changedTouches[0] ) || e ,x = obj.pageX - offset.left ,y = obj.pageY - offset.top ; return { x: x ,y: y }; } ; return { // extended init: function( options ){ var self = this ,prevTreatment ,time ; // call parent init method parent.init.call( this ); this.options.defaults( defaults ); this.options( options ); // vars this.mousePos = new Physics.vector(); this.mousePosOld = new Physics.vector(); this.offset = new Physics.vector(); this.el = typeof this.options.el === 'string' ? document.getElementById(this.options.el) : this.options.el; if ( !this.el ){ throw "No DOM element specified"; } // init events var grab = function grab( e ){ var pos = getCoords( e ) ,body ; time = Physics.util.ticker.now(); if ( self._world ){ body = self._world.findOne({ $at: new Physics.vector( pos.x, pos.y ) }); if ( body ){ // we're trying to grab a body // fix the body in place prevTreatment = body.treatment; body.treatment = 'kinematic'; body.state.vel.zero(); body.state.angular.vel = 0; // remember the currently grabbed body self.body = body; // remember the mouse offset self.mousePos.clone( pos ); self.mousePosOld.clone( pos ); self.offset.clone( pos ).vsub( body.state.pos ); pos.body = body; self._world.emit('interact:grab', pos); } else { self._world.emit('interact:poke', pos); } } }; var move = Physics.util.throttle(function move( e ){ var pos = getCoords( e ) ,state ; if ( self.body ){ time = Physics.util.ticker.now(); self.mousePosOld.clone( self.mousePos ); // get new mouse position self.mousePos.set(pos.x, pos.y); pos.body = self.body; } self._world.emit('interact:move', pos); }, self.options.moveThrottle); var release = function release( e ){ var pos = getCoords( e ) ,body ,dt = Math.max(Physics.util.ticker.now() - time, self.options.moveThrottle) ; // get new mouse position self.mousePos.set(pos.x, pos.y); // release the body if (self.body){ self.body.treatment = prevTreatment; // calculate the release velocity self.body.state.vel.clone( self.mousePos ).vsub( self.mousePosOld ).mult( 1 / dt ); // make sure it's not too big self.body.state.vel.clamp( self.options.minVel, self.options.maxVel ); self.body = false; } if ( self._world ){ self._world.emit('interact:release', pos); } }; this.el.addEventListener('mousedown', grab); this.el.addEventListener('touchstart', grab); this.el.addEventListener('mousemove', move); this.el.addEventListener('touchmove', move); this.el.addEventListener('mouseup', release); this.el.addEventListener('touchend', release); }, // extended connect: function( world ){ // subscribe the .behave() method to the position integration step world.on('integrate:positions', this.behave, this); }, // extended disconnect: function( world ){ // unsubscribe when disconnected world.off('integrate:positions', this.behave); }, // extended behave: function( data ){ var self = this ,state ,dt = Math.max(data.dt, self.options.moveThrottle) ; if ( self.body ){ // if we have a body, we need to move it the the new mouse position. // we'll do this by adjusting the velocity so it gets there at the next step state = self.body.state; state.vel.clone( self.mousePos ).vsub( self.offset ).vsub( state.pos ).mult( 1 / dt ); } } }; });
{ // must be in node environment return {}; }
conditional_block
table_datablock.py
import attr import struct import math import re import copy from datablock import Datablock from record import Record from rowid import Rowid @attr.s class TableDatablock(Datablock): header = attr.ib(default=[]) records = attr.ib(default=[]) def get_data(self): """ Convert header and records to bytes """ records_buffer = bytearray(self.records_size()) for i in range(0,len(self.records)): struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack()) fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer)) data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer) return data def save_record(self, record): """ Saves a Record to the datablock """ if type(record) is not Record: raise TypeError("Wrong type for save_record()") # TODO: check if there's room in the Datablock # TODO: save to the Datablock def records_size(self): return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2)) def free_contiguous_space(self, space_needed):
return -1 def write_data(self, record, position=None): if(position is None): position = self.free_contiguous_space(record.size()+4) if(position == -1): print('Error writing data') return False # Insert Header in the right position place = -1 for i in range(0, len(self.header), 2): if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated place = i self.header[i+1] = record.size() return self._insert_new_record(record, place, True) elif(self.header[i] > position): place = i self.header.insert(i, position) self.header.insert(i+1, record.size()) return self._insert_new_record(record, place) if(place == -1): place = len(self.header) self.header.append(position) self.header.append(record.size()) return self._insert_new_record(record, place) def update_record(self, record, desc): tmp_record = copy.copy(record) tmp_record.description = desc pos = record.rowid.pos*2 can_store = False if(pos+2 >= len(self.header)): can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size()) else: can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size()) #Check for space between records if(can_store): record.description = desc self.header[pos+1] = record.size() self._dirty = True return True else: self.delete_record(record) return None def delete_record(self, record): pos = record.rowid.pos self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed self.records[pos].deleted = True self._dirty = True return True def search_by(self, value, field): found_records = [] for record in self.records: if(field == 'code'): if(record.code == value and not record.deleted): return [record] elif(field == 'description'): if(record.description == value and not record.deleted): found_records.append(record) return found_records def get_record_by_pos(self, position): """ Get specific record by its position """ return self.records[position] @classmethod def from_bytes(cls, address, data=None, count_record=0): """ Creates a new TableDatablock in memory from a string of bytes """ if(count_record == 0 and data is None): return cls(address=address, count_record=count_record, type=1, header=[], records=[]) header = [] header_info, record_info = TableDatablock.unpack(count_record, data) for i in range(0, count_record * 2, 2): header.append(header_info[i+2]) #Get record begin position header.append(header_info[i + 2 + 1]) #Get record length records = TableDatablock.unpack_records(record_info[0], header, address) return cls(address=address, count_record=count_record, type=1, header=header, records=records) @staticmethod def unpack(count_record, data): records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area fmt_header = 'BH%sH%sx' % (count_record * 2, records_size) fmt_record = '%ss' % records_size header = struct.unpack(fmt_header, data) # Get binary header data records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data return header, records @staticmethod def unpack_records(record_str, header, address): """ Returns a list of Records included in the datablock """ records = [] for i in range(0, len(header), 2): if(header[i+1] != 0): info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i]) rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) desc = re.sub(r'[^\w]', '', info[1].decode()) records.append(Record(code=info[0], description=desc, rowid=rowid)) else: rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) records.append(Record(code=0, description='', rowid=rowid, deleted=True)) return records def _insert_new_record(self, record, place, reuse=False): if(record.rowid is None): record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0))) if(reuse): self.records[place] = record else: self.records.insert(place, record) self._dirty = True self.count_record = len(self.records) return record
if(len(self.header) == 0): return 0 last_offset = 0 for i in range(0, len(self.header), 2): if self.header[last_offset] < self.header[i]: last_offset = i #Check for space between records if(i+2 < len(self.header)): space_between = self.header[i+2]-(self.header[i]+self.header[i+1]) if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space space_between += 4 if(space_needed <= space_between): return self.header[i]+self.header[i+1] #Check for space in the end if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed): return self.header[last_offset]+self.header[last_offset+1]
identifier_body
table_datablock.py
import attr import struct import math import re import copy from datablock import Datablock from record import Record from rowid import Rowid @attr.s class TableDatablock(Datablock): header = attr.ib(default=[]) records = attr.ib(default=[]) def get_data(self): """ Convert header and records to bytes """ records_buffer = bytearray(self.records_size()) for i in range(0,len(self.records)): struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack()) fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer)) data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer) return data def save_record(self, record): """ Saves a Record to the datablock """ if type(record) is not Record: raise TypeError("Wrong type for save_record()") # TODO: check if there's room in the Datablock # TODO: save to the Datablock def records_size(self): return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2)) def free_contiguous_space(self, space_needed): if(len(self.header) == 0): return 0 last_offset = 0 for i in range(0, len(self.header), 2): if self.header[last_offset] < self.header[i]: last_offset = i #Check for space between records if(i+2 < len(self.header)): space_between = self.header[i+2]-(self.header[i]+self.header[i+1]) if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space space_between += 4 if(space_needed <= space_between): return self.header[i]+self.header[i+1] #Check for space in the end if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed): return self.header[last_offset]+self.header[last_offset+1] return -1 def write_data(self, record, position=None): if(position is None): position = self.free_contiguous_space(record.size()+4) if(position == -1): print('Error writing data') return False # Insert Header in the right position place = -1 for i in range(0, len(self.header), 2): if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated place = i self.header[i+1] = record.size() return self._insert_new_record(record, place, True) elif(self.header[i] > position): place = i self.header.insert(i, position) self.header.insert(i+1, record.size()) return self._insert_new_record(record, place) if(place == -1): place = len(self.header) self.header.append(position) self.header.append(record.size()) return self._insert_new_record(record, place) def update_record(self, record, desc): tmp_record = copy.copy(record) tmp_record.description = desc pos = record.rowid.pos*2 can_store = False if(pos+2 >= len(self.header)): can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size()) else: can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size()) #Check for space between records if(can_store): record.description = desc self.header[pos+1] = record.size() self._dirty = True return True else: self.delete_record(record) return None def delete_record(self, record): pos = record.rowid.pos self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed self.records[pos].deleted = True self._dirty = True return True def search_by(self, value, field): found_records = [] for record in self.records: if(field == 'code'): if(record.code == value and not record.deleted): return [record] elif(field == 'description'): if(record.description == value and not record.deleted): found_records.append(record) return found_records def get_record_by_pos(self, position): """ Get specific record by its position """ return self.records[position] @classmethod def
(cls, address, data=None, count_record=0): """ Creates a new TableDatablock in memory from a string of bytes """ if(count_record == 0 and data is None): return cls(address=address, count_record=count_record, type=1, header=[], records=[]) header = [] header_info, record_info = TableDatablock.unpack(count_record, data) for i in range(0, count_record * 2, 2): header.append(header_info[i+2]) #Get record begin position header.append(header_info[i + 2 + 1]) #Get record length records = TableDatablock.unpack_records(record_info[0], header, address) return cls(address=address, count_record=count_record, type=1, header=header, records=records) @staticmethod def unpack(count_record, data): records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area fmt_header = 'BH%sH%sx' % (count_record * 2, records_size) fmt_record = '%ss' % records_size header = struct.unpack(fmt_header, data) # Get binary header data records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data return header, records @staticmethod def unpack_records(record_str, header, address): """ Returns a list of Records included in the datablock """ records = [] for i in range(0, len(header), 2): if(header[i+1] != 0): info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i]) rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) desc = re.sub(r'[^\w]', '', info[1].decode()) records.append(Record(code=info[0], description=desc, rowid=rowid)) else: rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) records.append(Record(code=0, description='', rowid=rowid, deleted=True)) return records def _insert_new_record(self, record, place, reuse=False): if(record.rowid is None): record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0))) if(reuse): self.records[place] = record else: self.records.insert(place, record) self._dirty = True self.count_record = len(self.records) return record
from_bytes
identifier_name
table_datablock.py
import attr import struct import math import re import copy from datablock import Datablock from record import Record from rowid import Rowid @attr.s class TableDatablock(Datablock): header = attr.ib(default=[]) records = attr.ib(default=[]) def get_data(self): """ Convert header and records to bytes """ records_buffer = bytearray(self.records_size()) for i in range(0,len(self.records)): struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack()) fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer)) data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer) return data def save_record(self, record): """ Saves a Record to the datablock """ if type(record) is not Record: raise TypeError("Wrong type for save_record()") # TODO: check if there's room in the Datablock # TODO: save to the Datablock def records_size(self): return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2)) def free_contiguous_space(self, space_needed): if(len(self.header) == 0): return 0 last_offset = 0 for i in range(0, len(self.header), 2): if self.header[last_offset] < self.header[i]: last_offset = i #Check for space between records if(i+2 < len(self.header)): space_between = self.header[i+2]-(self.header[i]+self.header[i+1]) if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space space_between += 4 if(space_needed <= space_between): return self.header[i]+self.header[i+1] #Check for space in the end if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed): return self.header[last_offset]+self.header[last_offset+1] return -1 def write_data(self, record, position=None): if(position is None): position = self.free_contiguous_space(record.size()+4) if(position == -1): print('Error writing data') return False # Insert Header in the right position place = -1 for i in range(0, len(self.header), 2): if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated place = i self.header[i+1] = record.size() return self._insert_new_record(record, place, True) elif(self.header[i] > position): place = i self.header.insert(i, position) self.header.insert(i+1, record.size()) return self._insert_new_record(record, place) if(place == -1): place = len(self.header) self.header.append(position) self.header.append(record.size()) return self._insert_new_record(record, place) def update_record(self, record, desc): tmp_record = copy.copy(record) tmp_record.description = desc pos = record.rowid.pos*2 can_store = False if(pos+2 >= len(self.header)): can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size()) else: can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size()) #Check for space between records if(can_store): record.description = desc self.header[pos+1] = record.size() self._dirty = True return True else: self.delete_record(record) return None def delete_record(self, record): pos = record.rowid.pos self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed self.records[pos].deleted = True self._dirty = True return True def search_by(self, value, field): found_records = [] for record in self.records: if(field == 'code'): if(record.code == value and not record.deleted): return [record] elif(field == 'description'): if(record.description == value and not record.deleted): found_records.append(record) return found_records def get_record_by_pos(self, position): """ Get specific record by its position """
return self.records[position] @classmethod def from_bytes(cls, address, data=None, count_record=0): """ Creates a new TableDatablock in memory from a string of bytes """ if(count_record == 0 and data is None): return cls(address=address, count_record=count_record, type=1, header=[], records=[]) header = [] header_info, record_info = TableDatablock.unpack(count_record, data) for i in range(0, count_record * 2, 2): header.append(header_info[i+2]) #Get record begin position header.append(header_info[i + 2 + 1]) #Get record length records = TableDatablock.unpack_records(record_info[0], header, address) return cls(address=address, count_record=count_record, type=1, header=header, records=records) @staticmethod def unpack(count_record, data): records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area fmt_header = 'BH%sH%sx' % (count_record * 2, records_size) fmt_record = '%ss' % records_size header = struct.unpack(fmt_header, data) # Get binary header data records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data return header, records @staticmethod def unpack_records(record_str, header, address): """ Returns a list of Records included in the datablock """ records = [] for i in range(0, len(header), 2): if(header[i+1] != 0): info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i]) rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) desc = re.sub(r'[^\w]', '', info[1].decode()) records.append(Record(code=info[0], description=desc, rowid=rowid)) else: rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) records.append(Record(code=0, description='', rowid=rowid, deleted=True)) return records def _insert_new_record(self, record, place, reuse=False): if(record.rowid is None): record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0))) if(reuse): self.records[place] = record else: self.records.insert(place, record) self._dirty = True self.count_record = len(self.records) return record
random_line_split
table_datablock.py
import attr import struct import math import re import copy from datablock import Datablock from record import Record from rowid import Rowid @attr.s class TableDatablock(Datablock): header = attr.ib(default=[]) records = attr.ib(default=[]) def get_data(self): """ Convert header and records to bytes """ records_buffer = bytearray(self.records_size()) for i in range(0,len(self.records)): struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack()) fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer)) data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer) return data def save_record(self, record): """ Saves a Record to the datablock """ if type(record) is not Record: raise TypeError("Wrong type for save_record()") # TODO: check if there's room in the Datablock # TODO: save to the Datablock def records_size(self): return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2)) def free_contiguous_space(self, space_needed): if(len(self.header) == 0): return 0 last_offset = 0 for i in range(0, len(self.header), 2): if self.header[last_offset] < self.header[i]: last_offset = i #Check for space between records if(i+2 < len(self.header)): space_between = self.header[i+2]-(self.header[i]+self.header[i+1]) if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space space_between += 4 if(space_needed <= space_between): return self.header[i]+self.header[i+1] #Check for space in the end if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed): return self.header[last_offset]+self.header[last_offset+1] return -1 def write_data(self, record, position=None): if(position is None): position = self.free_contiguous_space(record.size()+4) if(position == -1): print('Error writing data') return False # Insert Header in the right position place = -1 for i in range(0, len(self.header), 2): if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated place = i self.header[i+1] = record.size() return self._insert_new_record(record, place, True) elif(self.header[i] > position): place = i self.header.insert(i, position) self.header.insert(i+1, record.size()) return self._insert_new_record(record, place) if(place == -1): place = len(self.header) self.header.append(position) self.header.append(record.size()) return self._insert_new_record(record, place) def update_record(self, record, desc): tmp_record = copy.copy(record) tmp_record.description = desc pos = record.rowid.pos*2 can_store = False if(pos+2 >= len(self.header)):
else: can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size()) #Check for space between records if(can_store): record.description = desc self.header[pos+1] = record.size() self._dirty = True return True else: self.delete_record(record) return None def delete_record(self, record): pos = record.rowid.pos self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed self.records[pos].deleted = True self._dirty = True return True def search_by(self, value, field): found_records = [] for record in self.records: if(field == 'code'): if(record.code == value and not record.deleted): return [record] elif(field == 'description'): if(record.description == value and not record.deleted): found_records.append(record) return found_records def get_record_by_pos(self, position): """ Get specific record by its position """ return self.records[position] @classmethod def from_bytes(cls, address, data=None, count_record=0): """ Creates a new TableDatablock in memory from a string of bytes """ if(count_record == 0 and data is None): return cls(address=address, count_record=count_record, type=1, header=[], records=[]) header = [] header_info, record_info = TableDatablock.unpack(count_record, data) for i in range(0, count_record * 2, 2): header.append(header_info[i+2]) #Get record begin position header.append(header_info[i + 2 + 1]) #Get record length records = TableDatablock.unpack_records(record_info[0], header, address) return cls(address=address, count_record=count_record, type=1, header=header, records=records) @staticmethod def unpack(count_record, data): records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area fmt_header = 'BH%sH%sx' % (count_record * 2, records_size) fmt_record = '%ss' % records_size header = struct.unpack(fmt_header, data) # Get binary header data records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data return header, records @staticmethod def unpack_records(record_str, header, address): """ Returns a list of Records included in the datablock """ records = [] for i in range(0, len(header), 2): if(header[i+1] != 0): info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i]) rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) desc = re.sub(r'[^\w]', '', info[1].decode()) records.append(Record(code=info[0], description=desc, rowid=rowid)) else: rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) records.append(Record(code=0, description='', rowid=rowid, deleted=True)) return records def _insert_new_record(self, record, place, reuse=False): if(record.rowid is None): record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0))) if(reuse): self.records[place] = record else: self.records.insert(place, record) self._dirty = True self.count_record = len(self.records) return record
can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size())
conditional_block
validating.py
# coding: utf-8 """参数验证相关工具 """ import re import ujson import types
from girlfriend.exception import InvalidArgumentException class Rule(object): """描述参数验证规则,并执行验证过程 """ @args2fields() def __init__(self, name, type=None, required=False, min=None, max=None, regex=None, logic=None, default=None): """ :param name 参数名称,通常用于错误提示 :param required 如果为True,那么参数是必须的 :param min 如果是字符串,那么该参数为最小长度(等于此长度合法), 如果是数字(numbers.Number类型),那么为该参数最小值(等于此值算合法) :param max 同上 :param regex 正则验证 :param type 类型验证,多个参数可以传递元组 :param logic 谓词函数,满足更加复杂的业务验证需要,比如查查数据库邮箱是否存在等等 该谓词函数并非返回True和False,如果有错误,那么返回错误消息的字符串, 如果没有错误,那么直接返回None :param default 该项的默认值 """ pass @property def name(self): return self._name @property def default(self): return self._default @property def required(self): return self._required def validate(self, value): """执行验证 :param value 要验证的值 """ if self._required and self._is_empty(value): raise InvalidArgumentException( u"参数 '{}' 的值是必须的,不能为空".format(self._name)) # 如果非必须并且为空,那么接下来的验证就不必运行了 if self._is_empty(value): return # 检查类型 self._validate_type(value) # 检查大小、长度 self._validate_min_max(value) # 检查正则 self._validate_regex(value) # 检查逻辑 self._validate_logic(value) def _validate_type(self, value): if not self._type: return if not isinstance(value, self._type): raise InvalidArgumentException( u"参数 '{name}' 的类型不正确,只允许以下类型:{types}".format( name=self._name, types=self._type ) ) def _validate_min_max(self, value): if self._min is not None: if isinstance(value, numbers.Number): if self._min > value: raise InvalidArgumentException( u"参数 '{name}' 的值不能小于{min}".format( name=self._name, min=self._min) ) else: if self._min > len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能小于{min}".format( name=self._name, min=self._min) ) if self._max is not None: if isinstance(value, numbers.Number): if self._max < value: raise InvalidArgumentException( u"参数 '{name}' 的值不能大于{max}".format( name=self._name, max=self._max) ) else: if self._max < len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能大于{max}".format( name=self._name, max=self._max) ) def _validate_regex(self, value): if not self._regex: return value = str(value) if not re.search(self._regex, value): raise InvalidArgumentException( u"参数 '{name}' 不符合正则表达式'{regex}'".format( name=self._name, regex=self._regex) ) def _validate_logic(self, value): if self._logic is None: return msg = self._logic(value) if msg: raise InvalidArgumentException(msg) def _is_empty(self, value): """判断一个值是否为空 如果值为None,那么返回True 如果值为空字符串,那么返回True 如果值为0, 那么不算空,返回False """ if value is None: return True if isinstance(value, types.StringType) and not value: return True return False def be_json(name): def _be_json(value): try: ujson.loads(value) except: return u"参数 '{}' 必须是json格式".format(name) return _be_json
import numbers from girlfriend.util.lang import args2fields
random_line_split
validating.py
# coding: utf-8 """参数验证相关工具 """ import re import ujson import types import numbers from girlfriend.util.lang import args2fields from girlfriend.exception import InvalidArgumentException class Rule(object): """描述参数验证规则,并执行验证过程 """ @args2fields() def __init__(self, name, type=None, required=False, min=None, max=None, regex=None, logic=None, default=None): """ :param name 参数名称,通常用于错误提示 :param required 如果为True,那么参数是必须的 :param min 如果是字符串,那么该参数为最小长度(等于此长度合法), 如果是数字(numbers.Number类型),那么为该参数最小值(等于此值算合法) :param max 同上 :param regex 正则验证 :param type 类型验证,多个参数可以传递元组 :param logic 谓词函数,满足更加复杂的业务验证需要,比如查查数据库邮箱是否存在等等 该谓词函数并非返回True和False,如果有错误,那么返回错误消息的字符串, 如果没有错误,那么直接返回None :param default 该项的默认值 """ pass @property def name(self): return self._name @property def default(self): return self._default @property def required(self): return self._required def validate(self, value): """执行验证 :param value 要验证的值 """ if self._required and self._is_empty(value): raise InvalidArgumentException( u"参数 '{}' 的值是必须的,不能为空".format(self._name)) # 如果非必须并且为空,那么接下来的验证就不必运行了 if self._is_empty(value): return # 检查类型 self._validate_type(value) # 检查大小、长度 self._validate_min_max(value) # 检查正则 self._validate_regex(value) # 检查逻辑 self._validate_logic(value) def _validate_type(self, value): if not self._type: return if not isinstance(value, self._type): raise InvalidArgumentException( u"参数 '{name}' 的类型不正确,只允许以下类型:{types}".format( name=self._name, types=self._type ) ) def _validate_min_max(self, value): if self._min is not None: if isinstance(value, numbers.Number): if self._min > value: raise InvalidArgumentException( u"参数 '{name}' 的值不能小于{min}".format( name=self._name, min=self._min) ) else: if self._min > len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能小于{min}".format( name=self._name, min=self._min) ) if self._max is not None: if isinstance(value, numbers.Number): if self._max < value: raise InvalidArgumentException( u"参数 '{name}' 的值不能大于{max}".format( name=self._name, max=self._max) ) else: if self._max < len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能大于{max}".format( name=self._name, max=self._max) ) def _validate_regex(self, value): if not self._regex: return value = str(value) if not re.search(self._regex, value): raise InvalidArgumentException( u"参数 '{name}' 不符合正则表达式'{regex}'".format( name=self._name, regex=self._regex) ) def _validate_logic(self, value): if self._logic is None: return msg = self._logic(value) if msg: raise InvalidArgumentException(msg) def _is_empty(self, value): """判断一个值是否为空 如果值为None,那么返回True 如果值为空字符串,那么返回True 如果值为0, 那么不算空,返回False """ if value is None: return True if isinstance(value, types.StringType) and not value: return True return False def be_json(name): def _be_json(value): try: ujson.loads(value) except: return u"参数 '{}' 必须是json格式".format(name) return _be_json
identifier_name
validating.py
# coding: utf-8 """参数验证相关工具 """ import re import ujson import types import numbers from girlfriend.util.lang import args2fields from girlfriend.exception import InvalidArgumentException class Rule(object): """描述参数验证规则,并执行验证过程 """ @args2fields() def __init__(self, name, type=None, required=False, min=None, max=None, regex=None, logic=None, default=None): """ :param name 参数名称,通常用于错误提示 :param required 如果为True,那么参数是必须的 :param min 如果是字符串,那么该参数为最小长度(等于此长度合法), 如果是数字(numbers.Number类型),那么为该参数最小值(等于此值算合法) :param max 同上 :param regex 正则验证 :param type 类型验证,多个参数可以传递元组 :param logic 谓词函数,满足更加复杂的业务验证需要,比如查查数据库邮箱是否存在等等 该谓词函数并非返回True和False,如果有错误,那么返回错误消息的字符串, 如果没有错误,那么直接返回None :param default 该项的默认值 """ pass @property def name(self): return self._name @property def default(self): return self._default @property def required(self): return self._required def validate(self, value): """执行验证 :param value 要验证的值 """ if self._required and self._is_empty(value): raise InvalidArgumentException( u"参数 '{}' 的值是必须的,不能为空".format(self._name)) # 如果非必须并且为空,那么接下来的验证就不必运行了 if self._is_empty(value): return # 检查类型 self._validate_type(value) # 检查大小、长度 self._validate_min_max(value) # 检查正则 self._validate_regex(value) # 检查逻辑 self._validate_logic(value) def _validate_type(self, value): if not self._type: return if not isinstance(value, self._type): raise InvalidArgumentException( u"参数 '{name}' 的类型不正确,只允许以下类型:{types}".format( name=self._name, types=self._type ) ) def _validate_min_max(self, value): if self._min is not None: if isinstance(value, numbers.Number): if self._min > value: raise InvalidArgumentException( u"参数 '{name}' 的值不能小于{min}".format( name=self._name, min=self._min) ) else: if self._min > len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能小于{min}".format( name=self._name, min=self._min) ) if self._max is not None: if isinstance(value, numbers.Number): if self._max < value: raise InvalidArgumentException( u"参数 '{name}' 的值不能大于{max}".format( name=self._name, max=self._max) ) else: if self._max < len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能大于{max}".format( name=self._name, max=self._max) ) def _validate_regex(self, value): if not self._regex: return value = str(value) if not re.s
alue): if self._logic is None: return msg = self._logic(value) if msg: raise InvalidArgumentException(msg) def _is_empty(self, value): """判断一个值是否为空 如果值为None,那么返回True 如果值为空字符串,那么返回True 如果值为0, 那么不算空,返回False """ if value is None: return True if isinstance(value, types.StringType) and not value: return True return False def be_json(name): def _be_json(value): try: ujson.loads(value) except: return u"参数 '{}' 必须是json格式".format(name) return _be_json
earch(self._regex, value): raise InvalidArgumentException( u"参数 '{name}' 不符合正则表达式'{regex}'".format( name=self._name, regex=self._regex) ) def _validate_logic(self, v
conditional_block
validating.py
# coding: utf-8 """参数验证相关工具 """ import re import ujson import types import numbers from girlfriend.util.lang import args2fields from girlfriend.exception import InvalidArgumentException class Rule(object): """描述参数验证规则,并执行验证过程 """ @args2fields() def __init__(self, name, type=None, required=False, min=None, max=None, regex=None, logic=None, default=None): """ :param name 参数名称,通常用于错误提示 :param required 如果为True,那么参数是必须的 :param min 如果是字符串,那么该参数为最小长度(等于此长度合法), 如果是数字(numbers.Number类型),那么为该参数最小值(等于此值算合法) :param max 同上 :param regex 正则验证 :param type 类型验证,多个参数可以传递元组 :param logic 谓词函数,满足更加复杂的业务验证需要,比如查查数据库邮箱是否存在等等 该谓词函数并非返回True和False,如果有错误,那么返回错误消息的字符串, 如果没有错误,那么直接返回None :param default 该项的默认值 """ pass @property def name(self): return self._name @property def default(self): return self._default @property def required(self): return self._required def validate(self, value): """执行验证 :param value 要验证的值 """ if self._required and self._is_empty(value): raise InvalidArgumentException( u"参数 '{}' 的值是必须的,不能为空".format(self._name)) # 如果非必须并且为空,那么接下来的验证就不必运行了 if self._is_empty(value): return # 检查类型 self._validate_type(val
度 self._validate_min_max(value) # 检查正则 self._validate_regex(value) # 检查逻辑 self._validate_logic(value) def _validate_type(self, value): if not self._type: return if not isinstance(value, self._type): raise InvalidArgumentException( u"参数 '{name}' 的类型不正确,只允许以下类型:{types}".format( name=self._name, types=self._type ) ) def _validate_min_max(self, value): if self._min is not None: if isinstance(value, numbers.Number): if self._min > value: raise InvalidArgumentException( u"参数 '{name}' 的值不能小于{min}".format( name=self._name, min=self._min) ) else: if self._min > len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能小于{min}".format( name=self._name, min=self._min) ) if self._max is not None: if isinstance(value, numbers.Number): if self._max < value: raise InvalidArgumentException( u"参数 '{name}' 的值不能大于{max}".format( name=self._name, max=self._max) ) else: if self._max < len(value): raise InvalidArgumentException( u"参数 '{name}' 的长度不能大于{max}".format( name=self._name, max=self._max) ) def _validate_regex(self, value): if not self._regex: return value = str(value) if not re.search(self._regex, value): raise InvalidArgumentException( u"参数 '{name}' 不符合正则表达式'{regex}'".format( name=self._name, regex=self._regex) ) def _validate_logic(self, value): if self._logic is None: return msg = self._logic(value) if msg: raise InvalidArgumentException(msg) def _is_empty(self, value): """判断一个值是否为空 如果值为None,那么返回True 如果值为空字符串,那么返回True 如果值为0, 那么不算空,返回False """ if value is None: return True if isinstance(value, types.StringType) and not value: return True return False def be_json(name): def _be_json(value): try: ujson.loads(value) except: return u"参数 '{}' 必须是json格式".format(name) return _be_json
ue) # 检查大小、长
identifier_body
main.py
#!/usr/bin/env python # coding=utf-8 from webapp.web import Application from handlers.index import IndexHandler from handlers.register import RegisterHandler from handlers.user import UserHandler from handlers.signin import SigninHandler from handlers.signout import SignoutHandler from handlers.upload import UploadHandler from handlers.avatar import AvatarHandler from handlers.error import ErrorHandler from handlers.password import PasswordHandler from handlers.ftypeerror import FiletypeErrorHandler URLS = ( ("/", "IndexHandler"), ("/register?", "RegisterHandler"), ("/user", "UserHandler"), ("/signin", "SigninHandler"), ("/signout", "SignoutHandler"), ("/upload", "UploadHandler"), ("/avatar/(.*)", "AvatarHandler"), ("/error", "ErrorHandler"), ("/pwdchange", "PasswordHandler"), ("/ftypeerror", "FiletypeErrorHandler") ) if __name__ == '__main__':
app = Application(globals(), URLS) app.run()
conditional_block
main.py
#!/usr/bin/env python # coding=utf-8 from webapp.web import Application from handlers.index import IndexHandler from handlers.register import RegisterHandler from handlers.user import UserHandler from handlers.signin import SigninHandler from handlers.signout import SignoutHandler from handlers.upload import UploadHandler from handlers.avatar import AvatarHandler from handlers.error import ErrorHandler from handlers.password import PasswordHandler from handlers.ftypeerror import FiletypeErrorHandler URLS = ( ("/", "IndexHandler"), ("/register?", "RegisterHandler"), ("/user", "UserHandler"),
("/error", "ErrorHandler"), ("/pwdchange", "PasswordHandler"), ("/ftypeerror", "FiletypeErrorHandler") ) if __name__ == '__main__': app = Application(globals(), URLS) app.run()
("/signin", "SigninHandler"), ("/signout", "SignoutHandler"), ("/upload", "UploadHandler"), ("/avatar/(.*)", "AvatarHandler"),
random_line_split
tooltip.js
ERROR: type should be large_string, got " https://github.com/angular/material\n * @license MIT\n * v0.9.0-rc2-master-041ffe9\n */\ngoog.provide('ng.material.components.tooltip');\ngoog.require('ng.material.core');\n/**\n * @ngdoc module\n * @name material.components.tooltip\n */\nangular\n .module('material.components.tooltip', [ 'material.core' ])\n .directive('mdTooltip', MdTooltipDirective);\n\n/**\n * @ngdoc directive\n * @name mdTooltip\n * @module material.components.tooltip\n * @description\n * Tooltips are used to describe elements that are interactive and primarily graphical (not textual).\n *\n * Place a `<md-tooltip>` as a child of the element it describes.\n *\n * A tooltip will activate when the user focuses, hovers over, or touches the parent.\n *\n * @usage\n * <hljs lang=\"html\">\n * <md-button class=\"md-fab md-accent\" aria-label=\"Play\">\n * <md-tooltip>\n * Play Music\n * </md-tooltip>\n * <md-icon icon=\"/img/icons/ic_play_arrow_24px.svg\"></md-icon>\n * </md-button>\n * </hljs>\n *\n * @param {expression=} md-visible Boolean bound to whether the tooltip is\n * currently visible.\n * @param {number=} md-delay How many milliseconds to wait to show the tooltip after the user focuses, hovers, or touches the parent. Defaults to 400ms.\n * @param {string=} md-direction Which direction would you like the tooltip to go? Supports left, right, top, and bottom. Defaults to bottom.\n * @param {boolean=} md-autohide If present or provided with a boolean value, the tooltip will hide on mouse leave, regardless of focus\n */\nfunction MdTooltipDirective($timeout, $window, $$rAF, $document, $mdUtil, $mdTheming, $rootElement, $animate, $q) {\n\n var TOOLTIP_SHOW_DELAY = 300;\n var TOOLTIP_WINDOW_EDGE_SPACE = 8;\n\n return {\n restrict: 'E',\n transclude: true,\n template: '\\\n <div class=\"md-background\"></div>\\\n <div class=\"md-content\" ng-transclude></div>',\n scope: {\n visible: '=?mdVisible',\n delay: '=?mdDelay',\n autohide: '=?mdAutohide'\n },\n link: postLink\n };\n\n function postLink(scope, element, attr) {\n\n $mdTheming(element);\n\n var parent = getParentWithPointerEvents(),\n background = angular.element(element[0].getElementsByClassName('md-background')[0]),\n content = angular.element(element[0].getElementsByClassName('md-content')[0]),\n direction = attr.mdDirection,\n current = getNearestContentElement(),\n tooltipParent = angular.element(current || document.body),\n debouncedOnResize = $$rAF.throttle(function () { if (scope.visible) positionTooltip(); });\n\n return init();\n\n function init () {\n setDefaults();\n manipulateElement();\n bindEvents();\n configureWatchers();\n }\n\n function setDefaults () {\n if (!angular.isDefined(attr.mdDelay)) scope.delay = TOOLTIP_SHOW_DELAY;\n }\n\n function configureWatchers () {\n scope.$watch('visible', function (isVisible) {\n if (isVisible) showTooltip();\n else hideTooltip();\n });\n scope.$on('$destroy', function() {\n scope.visible = false;\n element.remove();\n angular.element($window).off('resize', debouncedOnResize);\n });\n }\n\n function manipulateElement () "
function getParentWithPointerEvents () { var parent = element.parent(); while ($window.getComputedStyle(parent[0])['pointer-events'] == 'none') { parent = parent.parent(); } return parent; } function getNearestContentElement () { var current = element.parent()[0]; // Look for the nearest parent md-content, stopping at the rootElement. while (current && current !== $rootElement[0] && current !== document.body) { if (current.tagName && current.tagName.toLowerCase() == 'md-content') break; current = current.parentNode; } return current; } function bindEvents () { var autohide = scope.hasOwnProperty('autohide') ? scope.autohide : attr.hasOwnProperty('mdAutohide'); parent.on('focus mouseenter touchstart', function() { setVisible(true); }); parent.on('blur mouseleave touchend touchcancel', function() { if ($document[0].activeElement !== parent[0] || autohide) setVisible(false); }); angular.element($window).on('resize', debouncedOnResize); } function setVisible (value) { setVisible.value = !!value; if (!setVisible.queued) { if (value) { setVisible.queued = true; $timeout(function() { scope.visible = setVisible.value; setVisible.queued = false; }, scope.delay); } else { $timeout(function() { scope.visible = false; }); } } } function showTooltip() { // Insert the element before positioning it, so we can get the position // and check if we should display it tooltipParent.append(element); // Check if we should display it or not. // This handles hide-* and show-* along with any user defined css var computedStyles = $window.getComputedStyle(element[0]); if (angular.isDefined(computedStyles.display) && computedStyles.display == 'none') { element.detach(); return; } parent.attr('aria-describedby', element.attr('id')); positionTooltip(); angular.forEach([element, background, content], function (element) { $animate.addClass(element, 'md-show'); }); } function hideTooltip() { parent.removeAttr('aria-describedby'); $q.all([ $animate.removeClass(content, 'md-show'), $animate.removeClass(background, 'md-show'), $animate.removeClass(element, 'md-show') ]).then(function () { if (!scope.visible) element.detach(); }); } function positionTooltip() { var tipRect = $mdUtil.offsetRect(element, tooltipParent); var parentRect = $mdUtil.offsetRect(parent, tooltipParent); var newPosition = getPosition(direction); // If the user provided a direction, just nudge the tooltip onto the screen // Otherwise, recalculate based on 'top' since default is 'bottom' if (direction) { newPosition = fitInParent(newPosition); } else if (newPosition.top > element.prop('offsetParent').scrollHeight - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE) { newPosition = fitInParent(getPosition('top')); } element.css({top: newPosition.top + 'px', left: newPosition.left + 'px'}); positionBackground(); function positionBackground () { var size = direction === 'left' || direction === 'right' ? Math.sqrt(Math.pow(tipRect.width, 2) + Math.pow(tipRect.height / 2, 2)) * 2 : Math.sqrt(Math.pow(tipRect.width / 2, 2) + Math.pow(tipRect.height, 2)) * 2, position = direction === 'left' ? { left: 100, top: 50 } : direction === 'right' ? { left: 0, top: 50 } : direction === 'top' ? { left: 50, top: 100 } : { left: 50, top: 0 }; background.css({ width: size + 'px', height: size + 'px', left: position.left + '%', top: position.top + '%' }); } function fitInParent (pos) { var newPosition = { left: pos.left, top: pos.top }; newPosition.left = Math.min( newPosition.left, tooltipParent.prop('scrollWidth') - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.left = Math.max( newPosition.left, TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.min( newPosition.top, tooltipParent.prop('scrollHeight') - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.max( newPosition.top, TOOLTIP_WINDOW_EDGE_SPACE ); return newPosition; } function getPosition (dir) { return dir === 'left' ? { left: parentRect.left - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'right' ? { left: parentRect.left + parentRect.width + TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'top' ? { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE } : { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top + parentRect.height + TOOLTIP_WINDOW_EDGE_SPACE }; } } } } MdTooltipDirective.$inject = ["$timeout", "$window", "$$rAF", "$document", "$mdUtil", "$mdTheming", "$rootElement
{ element.detach(); element.attr('role', 'tooltip'); element.attr('id', attr.id || ('tooltip_' + $mdUtil.nextUid())); }
identifier_body
tooltip.js
ERROR: type should be large_string, got " https://github.com/angular/material\n * @license MIT\n * v0.9.0-rc2-master-041ffe9\n */\ngoog.provide('ng.material.components.tooltip');\ngoog.require('ng.material.core');\n/**\n * @ngdoc module\n * @name material.components.tooltip\n */\nangular\n .module('material.components.tooltip', [ 'material.core' ])\n .directive('mdTooltip', MdTooltipDirective);\n\n/**\n * @ngdoc directive\n * @name mdTooltip\n * @module material.components.tooltip\n * @description\n * Tooltips are used to describe elements that are interactive and primarily graphical (not textual).\n *\n * Place a `<md-tooltip>` as a child of the element it describes.\n *\n * A tooltip will activate when the user focuses, hovers over, or touches the parent.\n *\n * @usage\n * <hljs lang=\"html\">\n * <md-button class=\"md-fab md-accent\" aria-label=\"Play\">\n * <md-tooltip>\n * Play Music\n * </md-tooltip>\n * <md-icon icon=\"/img/icons/ic_play_arrow_24px.svg\"></md-icon>\n * </md-button>\n * </hljs>\n *\n * @param {expression=} md-visible Boolean bound to whether the tooltip is\n * currently visible.\n * @param {number=} md-delay How many milliseconds to wait to show the tooltip after the user focuses, hovers, or touches the parent. Defaults to 400ms.\n * @param {string=} md-direction Which direction would you like the tooltip to go? Supports left, right, top, and bottom. Defaults to bottom.\n * @param {boolean=} md-autohide If present or provided with a boolean value, the tooltip will hide on mouse leave, regardless of focus\n */\nfunction MdTooltipDirective($timeout, $window, $$rAF, $document, $mdUtil, $mdTheming, $rootElement, $animate, $q) {\n\n var TOOLTIP_SHOW_DELAY = 300;\n var TOOLTIP_WINDOW_EDGE_SPACE = 8;\n\n return {\n restrict: 'E',\n transclude: true,\n template: '\\\n <div class=\"md-background\"></div>\\\n <div class=\"md-content\" ng-transclude></div>',\n scope: {\n visible: '=?mdVisible',\n delay: '=?mdDelay',\n autohide: '=?mdAutohide'\n },\n link: postLink\n };\n\n function postLink(scope, element, attr) {\n\n $mdTheming(element);\n\n var parent = getParentWithPointerEvents(),\n background = angular.element(element[0].getElementsByClassName('md-background')[0]),\n content = angular.element(element[0].getElementsByClassName('md-content')[0]),\n direction = attr.mdDirection,\n current = getNearestContentElement(),\n tooltipParent = angular.element(current || document.body),\n debouncedOnResize = $$rAF.throttle(function () { if (scope.visible) positionTooltip(); });\n\n return init();\n\n function init () {\n setDefaults();\n manipulateElement();\n bindEvents();\n configureWatchers();\n }\n\n function setDefaults () {\n if (!angular.isDefined(attr.mdDelay)) scope.delay = TOOLTIP_SHOW_DELAY;\n }\n\n function configureWatchers () {\n scope.$watch('visible', function (isVisible) {\n if (isVisible) showTooltip();\n else hideTooltip();\n });\n scope.$on('$destroy', function() {\n scope.visible = false;\n element.remove();\n angular.element($window).off('resize', debouncedOnResize);\n });\n }\n\n function manipulateElement () {\n element.detach();\n element.attr('role', 'tooltip');\n element.attr('id', attr.id || ('tooltip_' + $mdUtil.nextUid()));\n }\n\n function getParentWithPointerEvents () {\n var parent = element.parent();\n while ($window.getComputedStyle(parent[0])['pointer-events'] == 'none') {\n parent = parent.parent();\n }\n return parent;\n }\n\n function getNearestContentElement () {\n var current = element.parent()[0];\n // Look for the nearest parent md-content, stopping at the rootElement.\n while (current && current !== $rootElement[0] && current !== document.body) {\n if (current.tagName && current.tagName.toLowerCase() == 'md-content') break;\n current = current.parentNode;\n }\n return current;\n }\n\n function bindEvents () {\n var autohide = scope.hasOwnProperty('autohide') ? scope.autohide : attr.hasOwnProperty('mdAutohide');\n parent.on('focus mouseenter touchstart', function() { setVisible(true); });\n parent.on('blur mouseleave touchend touchcancel', function() { if ($document[0].activeElement !== parent[0] || autohide) setVisible(false); });\n angular.element($window).on('resize', debouncedOnResize);\n }\n\n function "
(value) { setVisible.value = !!value; if (!setVisible.queued) { if (value) { setVisible.queued = true; $timeout(function() { scope.visible = setVisible.value; setVisible.queued = false; }, scope.delay); } else { $timeout(function() { scope.visible = false; }); } } } function showTooltip() { // Insert the element before positioning it, so we can get the position // and check if we should display it tooltipParent.append(element); // Check if we should display it or not. // This handles hide-* and show-* along with any user defined css var computedStyles = $window.getComputedStyle(element[0]); if (angular.isDefined(computedStyles.display) && computedStyles.display == 'none') { element.detach(); return; } parent.attr('aria-describedby', element.attr('id')); positionTooltip(); angular.forEach([element, background, content], function (element) { $animate.addClass(element, 'md-show'); }); } function hideTooltip() { parent.removeAttr('aria-describedby'); $q.all([ $animate.removeClass(content, 'md-show'), $animate.removeClass(background, 'md-show'), $animate.removeClass(element, 'md-show') ]).then(function () { if (!scope.visible) element.detach(); }); } function positionTooltip() { var tipRect = $mdUtil.offsetRect(element, tooltipParent); var parentRect = $mdUtil.offsetRect(parent, tooltipParent); var newPosition = getPosition(direction); // If the user provided a direction, just nudge the tooltip onto the screen // Otherwise, recalculate based on 'top' since default is 'bottom' if (direction) { newPosition = fitInParent(newPosition); } else if (newPosition.top > element.prop('offsetParent').scrollHeight - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE) { newPosition = fitInParent(getPosition('top')); } element.css({top: newPosition.top + 'px', left: newPosition.left + 'px'}); positionBackground(); function positionBackground () { var size = direction === 'left' || direction === 'right' ? Math.sqrt(Math.pow(tipRect.width, 2) + Math.pow(tipRect.height / 2, 2)) * 2 : Math.sqrt(Math.pow(tipRect.width / 2, 2) + Math.pow(tipRect.height, 2)) * 2, position = direction === 'left' ? { left: 100, top: 50 } : direction === 'right' ? { left: 0, top: 50 } : direction === 'top' ? { left: 50, top: 100 } : { left: 50, top: 0 }; background.css({ width: size + 'px', height: size + 'px', left: position.left + '%', top: position.top + '%' }); } function fitInParent (pos) { var newPosition = { left: pos.left, top: pos.top }; newPosition.left = Math.min( newPosition.left, tooltipParent.prop('scrollWidth') - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.left = Math.max( newPosition.left, TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.min( newPosition.top, tooltipParent.prop('scrollHeight') - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.max( newPosition.top, TOOLTIP_WINDOW_EDGE_SPACE ); return newPosition; } function getPosition (dir) { return dir === 'left' ? { left: parentRect.left - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'right' ? { left: parentRect.left + parentRect.width + TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'top' ? { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE } : { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top + parentRect.height + TOOLTIP_WINDOW_EDGE_SPACE }; } } } } MdTooltipDirective.$inject = ["$timeout", "$window", "$$rAF", "$document", "$mdUtil", "$mdTheming", "$rootElement",
setVisible
identifier_name
tooltip.js
ERROR: type should be large_string, got " https://github.com/angular/material\n * @license MIT\n * v0.9.0-rc2-master-041ffe9\n */\ngoog.provide('ng.material.components.tooltip');\ngoog.require('ng.material.core');\n/**\n * @ngdoc module\n * @name material.components.tooltip\n */\nangular\n .module('material.components.tooltip', [ 'material.core' ])\n .directive('mdTooltip', MdTooltipDirective);\n\n/**\n * @ngdoc directive\n * @name mdTooltip\n * @module material.components.tooltip\n * @description\n * Tooltips are used to describe elements that are interactive and primarily graphical (not textual).\n *\n * Place a `<md-tooltip>` as a child of the element it describes.\n *\n * A tooltip will activate when the user focuses, hovers over, or touches the parent.\n *\n * @usage\n * <hljs lang=\"html\">\n * <md-button class=\"md-fab md-accent\" aria-label=\"Play\">\n * <md-tooltip>\n * Play Music\n * </md-tooltip>\n * <md-icon icon=\"/img/icons/ic_play_arrow_24px.svg\"></md-icon>\n * </md-button>\n * </hljs>\n *\n * @param {expression=} md-visible Boolean bound to whether the tooltip is\n * currently visible.\n * @param {number=} md-delay How many milliseconds to wait to show the tooltip after the user focuses, hovers, or touches the parent. Defaults to 400ms.\n * @param {string=} md-direction Which direction would you like the tooltip to go? Supports left, right, top, and bottom. Defaults to bottom.\n * @param {boolean=} md-autohide If present or provided with a boolean value, the tooltip will hide on mouse leave, regardless of focus\n */\nfunction MdTooltipDirective($timeout, $window, $$rAF, $document, $mdUtil, $mdTheming, $rootElement, $animate, $q) {\n\n var TOOLTIP_SHOW_DELAY = 300;\n var TOOLTIP_WINDOW_EDGE_SPACE = 8;\n\n return {\n restrict: 'E',\n transclude: true,\n template: '\\\n <div class=\"md-background\"></div>\\\n <div class=\"md-content\" ng-transclude></div>',\n scope: {\n visible: '=?mdVisible',\n delay: '=?mdDelay',\n autohide: '=?mdAutohide'\n },\n link: postLink\n };\n\n function postLink(scope, element, attr) {\n\n $mdTheming(element);\n\n var parent = getParentWithPointerEvents(),\n background = angular.element(element[0].getElementsByClassName('md-background')[0]),\n content = angular.element(element[0].getElementsByClassName('md-content')[0]),\n direction = attr.mdDirection,\n current = getNearestContentElement(),\n tooltipParent = angular.element(current || document.body),\n debouncedOnResize = $$rAF.throttle(function () { if (scope.visible) positionTooltip(); });\n\n return init();\n\n function init () {\n setDefaults();\n manipulateElement();\n bindEvents();\n configureWatchers();\n }\n\n function setDefaults () {\n if (!angular.isDefined(attr.mdDelay)) scope.delay = TOOLTIP_SHOW_DELAY;\n }\n\n function configureWatchers () {\n scope.$watch('visible', function (isVisible) {\n if (isVisible) showTooltip();\n else hideTooltip();\n });\n scope.$on('$destroy', function() {\n scope.visible = false;\n element.remove();\n angular.element($window).off('resize', debouncedOnResize);\n });\n }\n\n function manipulateElement () {\n element.detach();\n element.attr('role', 'tooltip');\n element.attr('id', attr.id || ('tooltip_' + $mdUtil.nextUid()));\n }\n\n function getParentWithPointerEvents () {\n var parent = element.parent();\n while ($window.getComputedStyle(parent[0])['pointer-events'] == 'none') {\n parent = parent.parent();\n }\n return parent;\n }\n\n function getNearestContentElement () {\n var current = element.parent()[0];\n // Look for the nearest parent md-content, stopping at the rootElement.\n while (current && current !== $rootElement[0] && current !== document.body) {\n if (current.tagName && current.tagName.toLowerCase() == 'md-content') break;\n current = current.parentNode;\n }\n return current;\n }\n\n function bindEvents () {\n var autohide = scope.hasOwnProperty('autohide') ? scope.autohide : attr.hasOwnProperty('mdAutohide');\n parent.on('focus mouseenter touchstart', function() { setVisible(true); });\n parent.on('blur mouseleave touchend touchcancel', function() { if ($document[0].activeElement !== parent[0] || autohide) setVisible(false); });\n angular.element($window).on('resize', debouncedOnResize);\n }\n\n function setVisible (value) {\n setVisible.value = !!value;\n if (!setVisible.queued) {\n if (value) {\n setVisible.queued = true;\n $timeout(function() {\n scope.visible = setVisible.value;\n setVisible.queued = false;\n }, scope.delay);\n } else {\n $timeout(function() { scope.visible = false; });\n }\n }\n }\n\n function showTooltip() {\n // Insert the element before positioning it, so we can get the position\n // and check if we should display it\n tooltipParent.append(element);\n\n // Check if we should display it or not.\n // This handles hide-* and show-* along with any user defined css\n var computedStyles = $window.getComputedStyle(element[0]);\n if (angular.isDefined(computedStyles.display) && computedStyles.display == 'none') {\n element.detach();\n return;\n }\n\n parent.attr('aria-describedby', element.attr('id'));\n\n positionTooltip();\n angular.forEach([element, background, content], function (element) {\n $animate.addClass(element, 'md-show');\n });\n }\n\n function hideTooltip() {\n parent.removeAttr('aria-describedby');\n $q.all([\n $animate.removeClass(content, 'md-show'),\n $animate.removeClass(background, 'md-show'),\n $animate.removeClass(element, 'md-show')\n ]).then(function () {\n if (!scope.visible) element.detach();\n });\n }\n\n function positionTooltip() {\n var tipRect = $mdUtil.offsetRect(element, tooltipParent);\n var parentRect = $mdUtil.offsetRect(parent, tooltipParent);\n var newPosition = getPosition(direction);\n\n // If the user provided a direction, just nudge the tooltip onto the screen\n // Otherwise, recalculate based on 'top' since default is 'bottom'\n if (direction) {\n newPosition = fitInParent(newPosition);\n } else if (newPosition.top > element.prop('offsetParent').scrollHeight - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE) {\n newPosition = fitInParent(getPosition('top'));\n }\n\n element.css({top: newPosition.top + 'px', left: newPosition.left + 'px'});\n\n positionBackground();\n\n function positionBackground () {\n var size = direction === 'left' || direction === 'right'\n ? Math.sqrt(Math.pow(tipRect.width, 2) + Math.pow(tipRect.height / 2, 2)) * 2\n : Math.sqrt(Math.pow(tipRect.width / 2, 2) + Math.pow(tipRect.height, 2)) * 2,\n position = direction === 'left' ? { left: 100, top: 50 }\n : direction === 'right' ? { left: 0, top: 50 }\n : direction === 'top' ? { left: 50, top: 100 }\n : { left: 50, top: 0 };"
height: size + 'px', left: position.left + '%', top: position.top + '%' }); } function fitInParent (pos) { var newPosition = { left: pos.left, top: pos.top }; newPosition.left = Math.min( newPosition.left, tooltipParent.prop('scrollWidth') - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.left = Math.max( newPosition.left, TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.min( newPosition.top, tooltipParent.prop('scrollHeight') - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE ); newPosition.top = Math.max( newPosition.top, TOOLTIP_WINDOW_EDGE_SPACE ); return newPosition; } function getPosition (dir) { return dir === 'left' ? { left: parentRect.left - tipRect.width - TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'right' ? { left: parentRect.left + parentRect.width + TOOLTIP_WINDOW_EDGE_SPACE, top: parentRect.top + parentRect.height / 2 - tipRect.height / 2 } : dir === 'top' ? { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top - tipRect.height - TOOLTIP_WINDOW_EDGE_SPACE } : { left: parentRect.left + parentRect.width / 2 - tipRect.width / 2, top: parentRect.top + parentRect.height + TOOLTIP_WINDOW_EDGE_SPACE }; } } } } MdTooltipDirective.$inject = ["$timeout", "$window", "$$rAF", "$document", "$mdUtil", "$mdTheming", "$rootElement", "$
background.css({ width: size + 'px',
random_line_split
ihex_writer.rs
// In groups of 16 bytes. .chunks(16) // Create a tuple of (Length, Record). .map(|chunk| { (chunk.len() as u16, Record::Data { offset: *record_address.borrow(), value: Vec::from(chunk) }) }) // Increment the address counter by the number of bytes incorporated into the record. .inspect(|&(length, _)| { *record_address.borrow_mut() += length; }) // Discard the length from the tuple. .map(|(_, record)| record) // Collect the records into a Vec<Record>. .collect::<Vec<_>>() ); } // All ihex files end in an EOF marker. records.push(Record::EndOfFile); // Obtain the formatted representation of each record and join with newlines for display. writer::create_object_file_representation(records.as_slice()).unwrap() + &"\n" } // MARK: - Tests #[cfg(test)] mod tests { use twelve_bit::u12::*; use assembler::data_range::DataRange; use super::ihex_representation_of_data_ranges; #[test] fn test_ihex_representation_of_data_ranges_no_ranges() { // An empty set of data ranges yields just an EOF marker. assert_eq!(ihex_representation_of_data_ranges(&[]), String::from(":00000001FF\n")); } #[test] fn test_ihex_representation_of_data_ranges_one_range() { // Build an average-case ihex record. let mut data_range = DataRange::new(u12![0x100]); data_range.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); data_range.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); data_range.append(&vec![0x19,0x4E,0x79,0x23,0x46,0x23,0x96,0x57,0x78,0x23,0x9E,0xDA,0x3F,0x01,0xB2,0xCA]); data_range.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_average = ihex_representation_of_data_ranges(&[data_range]); let expected_ihex_rep_average = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":10012000194E79234623965778239EDA3F01B2CAA7\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_average, expected_ihex_rep_average); } #[test] fn test_ihex_representation_of_data_ranges_adjacent_ranges() { // Build a pair of adjacent data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x110]); range_b.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); // Validate the average case yielded the anticipated result. let ihex_rep_adjacent = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_adjacent = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":00000001FF\n"; assert_eq!(ihex_rep_adjacent, expected_ihex_rep_adjacent); } #[test] fn test_ihex_representation_of_data_ranges_disjoint_ranges() { // Build an disjoint pair of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x130]); range_b.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_disjoint = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_disjoint = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_disjoint, expected_ihex_rep_disjoint); } #[test] fn test_ihex_representation_of_data_ranges_uneven_ranges() { // Build an uneven set of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19]); let mut range_b = DataRange::new(u12![0x130]);
range_b.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21,0x22]); let mut range_c = DataRange::new(u12![0x200]); range_c.append(&vec![0x3F]);
random_line_split
ihex_writer.rs
- Tests #[cfg(test)] mod tests { use twelve_bit::u12::*; use assembler::data_range::DataRange; use super::ihex_representation_of_data_ranges; #[test] fn test_ihex_representation_of_data_ranges_no_ranges() { // An empty set of data ranges yields just an EOF marker. assert_eq!(ihex_representation_of_data_ranges(&[]), String::from(":00000001FF\n")); } #[test] fn test_ihex_representation_of_data_ranges_one_range() { // Build an average-case ihex record. let mut data_range = DataRange::new(u12![0x100]); data_range.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); data_range.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); data_range.append(&vec![0x19,0x4E,0x79,0x23,0x46,0x23,0x96,0x57,0x78,0x23,0x9E,0xDA,0x3F,0x01,0xB2,0xCA]); data_range.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_average = ihex_representation_of_data_ranges(&[data_range]); let expected_ihex_rep_average = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":10012000194E79234623965778239EDA3F01B2CAA7\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_average, expected_ihex_rep_average); } #[test] fn test_ihex_representation_of_data_ranges_adjacent_ranges() { // Build a pair of adjacent data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x110]); range_b.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); // Validate the average case yielded the anticipated result. let ihex_rep_adjacent = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_adjacent = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":00000001FF\n"; assert_eq!(ihex_rep_adjacent, expected_ihex_rep_adjacent); } #[test] fn test_ihex_representation_of_data_ranges_disjoint_ranges() { // Build an disjoint pair of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x130]); range_b.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_disjoint = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_disjoint = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_disjoint, expected_ihex_rep_disjoint); } #[test] fn test_ihex_representation_of_data_ranges_uneven_ranges()
{ // Build an uneven set of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19]); let mut range_b = DataRange::new(u12![0x130]); range_b.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21,0x22]); let mut range_c = DataRange::new(u12![0x200]); range_c.append(&vec![0x3F]); // Validate the average case yielded the anticipated result. let ihex_rep = ihex_representation_of_data_ranges(&[range_a, range_b, range_c]); let expected_ihex_rep = String::new() + &":0F010000214601360121470136007EFE09D21942\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":01014000229C\n" + &":010200003FBE\n" + &":00000001FF\n"; assert_eq!(ihex_rep, expected_ihex_rep); }
identifier_body
ihex_writer.rs
<'a>(ranges: &'a [DataRange]) -> String { assert!(data_range::find_overlapping_ranges(ranges).len() == 0); // All records are collected into a list. let mut records = Vec::<Record>::new(); for range in ranges.iter() { // The range will be sub-divded into chunks of up to 16 bytes, so sub-address must be tracked. let record_address = RefCell::new(u16::from(range.address_range().start)); // Sub-divide the range into 16-byte Record::Data objects. records.append( &mut range // Inspect the data in the range. .data() // As a u8 slice. .as_slice() // In groups of 16 bytes. .chunks(16) // Create a tuple of (Length, Record). .map(|chunk| { (chunk.len() as u16, Record::Data { offset: *record_address.borrow(), value: Vec::from(chunk) }) }) // Increment the address counter by the number of bytes incorporated into the record. .inspect(|&(length, _)| { *record_address.borrow_mut() += length; }) // Discard the length from the tuple. .map(|(_, record)| record) // Collect the records into a Vec<Record>. .collect::<Vec<_>>() ); } // All ihex files end in an EOF marker. records.push(Record::EndOfFile); // Obtain the formatted representation of each record and join with newlines for display. writer::create_object_file_representation(records.as_slice()).unwrap() + &"\n" } // MARK: - Tests #[cfg(test)] mod tests { use twelve_bit::u12::*; use assembler::data_range::DataRange; use super::ihex_representation_of_data_ranges; #[test] fn test_ihex_representation_of_data_ranges_no_ranges() { // An empty set of data ranges yields just an EOF marker. assert_eq!(ihex_representation_of_data_ranges(&[]), String::from(":00000001FF\n")); } #[test] fn test_ihex_representation_of_data_ranges_one_range() { // Build an average-case ihex record. let mut data_range = DataRange::new(u12![0x100]); data_range.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); data_range.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); data_range.append(&vec![0x19,0x4E,0x79,0x23,0x46,0x23,0x96,0x57,0x78,0x23,0x9E,0xDA,0x3F,0x01,0xB2,0xCA]); data_range.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_average = ihex_representation_of_data_ranges(&[data_range]); let expected_ihex_rep_average = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":10012000194E79234623965778239EDA3F01B2CAA7\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_average, expected_ihex_rep_average); } #[test] fn test_ihex_representation_of_data_ranges_adjacent_ranges() { // Build a pair of adjacent data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x110]); range_b.append(&vec![0x21,0x46,0x01,0x7E,0x17,0xC2,0x00,0x01,0xFF,0x5F,0x16,0x00,0x21,0x48,0x01,0x19]); // Validate the average case yielded the anticipated result. let ihex_rep_adjacent = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_adjacent = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100110002146017E17C20001FF5F16002148011928\n" + &":00000001FF\n"; assert_eq!(ihex_rep_adjacent, expected_ihex_rep_adjacent); } #[test] fn test_ihex_representation_of_data_ranges_disjoint_ranges() { // Build an disjoint pair of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x00,0x7E,0xFE,0x09,0xD2,0x19,0x01]); let mut range_b = DataRange::new(u12![0x130]); range_b.append(&vec![0x3F,0x01,0x56,0x70,0x2B,0x5E,0x71,0x2B,0x72,0x2B,0x73,0x21,0x46,0x01,0x34,0x21]); // Validate the average case yielded the anticipated result. let ihex_rep_disjoint = ihex_representation_of_data_ranges(&[range_a, range_b]); let expected_ihex_rep_disjoint = String::new() + &":10010000214601360121470136007EFE09D2190140\n" + &":100130003F0156702B5E712B722B732146013421C7\n" + &":00000001FF\n"; assert_eq!(ihex_rep_disjoint, expected_ihex_rep_disjoint); } #[test] fn test_ihex_representation_of_data_ranges_uneven_ranges() { // Build an uneven set of data ranges. let mut range_a = DataRange::new(u12![0x100]); range_a.append(&vec![0x21,0x46,0x01,0x36,0x01,0x21,0x47,0x01,0x36,0x0
ihex_representation_of_data_ranges
identifier_name
WeatherForecasts.ts
import { fetch, addTask } from 'domain-task'; import { Action, Reducer, ActionCreator } from 'redux'; import { AppThunkAction } from './'; // ----------------- // STATE - This defines the type of data maintained in the Redux store. export interface WeatherForecastsState { isLoading: boolean; startDateIndex: number; forecasts: WeatherForecast[]; } export interface WeatherForecast { dateFormatted: string; temperatureC: number; temperatureF: number; summary: string; } // ----------------- // ACTIONS - These are serializable (hence replayable) descriptions of state transitions. // They do not themselves have any side-effects; they just describe something that is going to happen. interface RequestWeatherForecastsAction { type: 'REQUEST_WEATHER_FORECASTS', startDateIndex: number; } interface ReceiveWeatherForecastsAction { type: 'RECEIVE_WEATHER_FORECASTS', startDateIndex: number; forecasts: WeatherForecast[] } // Declare a 'discriminated union' type. This guarantees that all references to 'type' properties contain one of the // declared type strings (and not any other arbitrary string). type KnownAction = RequestWeatherForecastsAction | ReceiveWeatherForecastsAction; // ---------------- // ACTION CREATORS - These are functions exposed to UI components that will trigger a state transition. // They don't directly mutate state, but they can have external side-effects (such as loading data).
export const actionCreators = { requestWeatherForecasts: (startDateIndex: number): AppThunkAction<KnownAction> => (dispatch, getState) => { // Only load data if it's something we don't already have (and are not already loading) if (startDateIndex !== getState().weatherForecasts.startDateIndex) { let fetchTask = fetch(`/api/SampleData/WeatherForecasts?startDateIndex=${ startDateIndex }`) .then(response => response.json() as Promise<WeatherForecast[]>) .then(data => { dispatch({ type: 'RECEIVE_WEATHER_FORECASTS', startDateIndex: startDateIndex, forecasts: data }); }); addTask(fetchTask); // Ensure server-side prerendering waits for this to complete dispatch({ type: 'REQUEST_WEATHER_FORECASTS', startDateIndex: startDateIndex }); } } }; // ---------------- // REDUCER - For a given state and action, returns the new state. To support time travel, this must not mutate the old state. const unloadedState: WeatherForecastsState = { startDateIndex: null, forecasts: [], isLoading: false }; export const reducer: Reducer<WeatherForecastsState> = (state: WeatherForecastsState, action: KnownAction) => { switch (action.type) { case 'REQUEST_WEATHER_FORECASTS': return { startDateIndex: action.startDateIndex, forecasts: state.forecasts, isLoading: true }; case 'RECEIVE_WEATHER_FORECASTS': // Only accept the incoming data if it matches the most recent request. This ensures we correctly // handle out-of-order responses. if (action.startDateIndex === state.startDateIndex) { return { startDateIndex: action.startDateIndex, forecasts: action.forecasts, isLoading: false }; } break; default: // The following line guarantees that every action in the KnownAction union has been covered by a case above const exhaustiveCheck: never = action; } return state || unloadedState; };
random_line_split
WeatherForecasts.ts
import { fetch, addTask } from 'domain-task'; import { Action, Reducer, ActionCreator } from 'redux'; import { AppThunkAction } from './'; // ----------------- // STATE - This defines the type of data maintained in the Redux store. export interface WeatherForecastsState { isLoading: boolean; startDateIndex: number; forecasts: WeatherForecast[]; } export interface WeatherForecast { dateFormatted: string; temperatureC: number; temperatureF: number; summary: string; } // ----------------- // ACTIONS - These are serializable (hence replayable) descriptions of state transitions. // They do not themselves have any side-effects; they just describe something that is going to happen. interface RequestWeatherForecastsAction { type: 'REQUEST_WEATHER_FORECASTS', startDateIndex: number; } interface ReceiveWeatherForecastsAction { type: 'RECEIVE_WEATHER_FORECASTS', startDateIndex: number; forecasts: WeatherForecast[] } // Declare a 'discriminated union' type. This guarantees that all references to 'type' properties contain one of the // declared type strings (and not any other arbitrary string). type KnownAction = RequestWeatherForecastsAction | ReceiveWeatherForecastsAction; // ---------------- // ACTION CREATORS - These are functions exposed to UI components that will trigger a state transition. // They don't directly mutate state, but they can have external side-effects (such as loading data). export const actionCreators = { requestWeatherForecasts: (startDateIndex: number): AppThunkAction<KnownAction> => (dispatch, getState) => { // Only load data if it's something we don't already have (and are not already loading) if (startDateIndex !== getState().weatherForecasts.startDateIndex)
} }; // ---------------- // REDUCER - For a given state and action, returns the new state. To support time travel, this must not mutate the old state. const unloadedState: WeatherForecastsState = { startDateIndex: null, forecasts: [], isLoading: false }; export const reducer: Reducer<WeatherForecastsState> = (state: WeatherForecastsState, action: KnownAction) => { switch (action.type) { case 'REQUEST_WEATHER_FORECASTS': return { startDateIndex: action.startDateIndex, forecasts: state.forecasts, isLoading: true }; case 'RECEIVE_WEATHER_FORECASTS': // Only accept the incoming data if it matches the most recent request. This ensures we correctly // handle out-of-order responses. if (action.startDateIndex === state.startDateIndex) { return { startDateIndex: action.startDateIndex, forecasts: action.forecasts, isLoading: false }; } break; default: // The following line guarantees that every action in the KnownAction union has been covered by a case above const exhaustiveCheck: never = action; } return state || unloadedState; };
{ let fetchTask = fetch(`/api/SampleData/WeatherForecasts?startDateIndex=${ startDateIndex }`) .then(response => response.json() as Promise<WeatherForecast[]>) .then(data => { dispatch({ type: 'RECEIVE_WEATHER_FORECASTS', startDateIndex: startDateIndex, forecasts: data }); }); addTask(fetchTask); // Ensure server-side prerendering waits for this to complete dispatch({ type: 'REQUEST_WEATHER_FORECASTS', startDateIndex: startDateIndex }); }
conditional_block
pmovsxbd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*;
fn pmovsxbd_1() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 205], OperandSize::Dword) } fn pmovsxbd_2() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM4)), operand2: Some(IndirectDisplaced(EDI, 977278461, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 167, 253, 21, 64, 58], OperandSize::Dword) } fn pmovsxbd_3() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 246], OperandSize::Qword) } fn pmovsxbd_4() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledIndexedDisplaced(RDI, RBX, Two, 1709813562, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 172, 95, 58, 175, 233, 101], OperandSize::Qword) }
use ::RegScale::*;
random_line_split
pmovsxbd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn pmovsxbd_1() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 205], OperandSize::Dword) } fn pmovsxbd_2() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM4)), operand2: Some(IndirectDisplaced(EDI, 977278461, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 167, 253, 21, 64, 58], OperandSize::Dword) } fn
() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 246], OperandSize::Qword) } fn pmovsxbd_4() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledIndexedDisplaced(RDI, RBX, Two, 1709813562, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 172, 95, 58, 175, 233, 101], OperandSize::Qword) }
pmovsxbd_3
identifier_name
pmovsxbd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn pmovsxbd_1() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 205], OperandSize::Dword) } fn pmovsxbd_2() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM4)), operand2: Some(IndirectDisplaced(EDI, 977278461, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 167, 253, 21, 64, 58], OperandSize::Dword) } fn pmovsxbd_3() { run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 246], OperandSize::Qword) } fn pmovsxbd_4()
{ run_test(&Instruction { mnemonic: Mnemonic::PMOVSXBD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledIndexedDisplaced(RDI, RBX, Two, 1709813562, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 33, 172, 95, 58, 175, 233, 101], OperandSize::Qword) }
identifier_body
13.rs
use std::fmt; use std::fs::File; use std::io::prelude::*; use std::collections::HashMap; fn get_input() -> std::io::Result<String> { let mut file = File::open("13.txt")?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(contents) } type Point = (isize, isize); type Tracks = HashMap<Point, Vec<Point>>; #[derive(Clone)] struct Cart { position: Point, direction: Point, decision: usize } impl fmt::Debug for Cart { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", (self.position, match self.direction { (-1, 0) => '<', (1, 0) => '>', (0, -1) => '^', (0, 1) => 'v', _ => ' ' })) } } impl Cart { fn new(position: Point, direction: Point) -> Self { Self { position, direction, decision: 0 } } fn move_cart(&mut self, neighbors: &[Point]) { let (x, y) = self.position; let (dx, dy) = self.direction; // Left, Straight, Right let targets = [(0, -1), (1, 0), (0, 1)].into_iter() .map(|(za, zb)| (x + dx * za - dy * zb, y + dx * zb + dy * za)) .filter(|target| neighbors.contains(target)) .collect::<Vec<_>>(); let (nx, ny) = match targets.len() { 1 => targets[0], 3 => { let target = targets[self.decision]; self.decision = (self.decision + 1) % 3; target }, _ => return }; self.position = (nx, ny); self.direction = (nx - x, ny - y); } } fn
(input: &str) -> (Tracks, Vec<Cart>) { input.lines() .enumerate() .flat_map(|(j, line)| { let chars: Vec<char> = line.chars().collect(); chars.iter() .cloned() .enumerate() .filter_map(|(i, c)| { let (x, y) = (i as isize, j as isize); let horizontal_chars = ['-', '+', '<', '>']; let neighbors = match (c, chars.get(i + 1)) { ('/', Some(nc)) if horizontal_chars.contains(nc) => Some(vec![(x + 1, y), (x, y + 1)]), ('/', _) => Some(vec![(x - 1, y), (x, y - 1)]), ('\\', Some(nc)) if horizontal_chars.contains(nc) => Some(vec![(x + 1, y), (x, y - 1)]), ('\\', _) => Some(vec![(x - 1, y), (x, y + 1)]), ('+', _) => Some(vec![(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]), ('-', _) | ('<', _) | ('>', _) => Some(vec![(x - 1, y), (x + 1, y)]), ('|', _) | ('^', _) | ('v', _) => Some(vec![(x, y - 1), (x, y + 1)]), _ => None }; let cart_direction = match c { '<' => Some((-1, 0)), '>' => Some((1, 0)), '^' => Some((0, -1)), 'v' => Some((0, 1)), _ => None }; neighbors .map(|n| ((x, y), n, cart_direction.map(|d| Cart::new((x, y), d)))) }) .collect::<Vec<_>>() }) .fold((Tracks::new(), Vec::new()), |(mut tracks, mut carts), (k, v, cart)| { tracks.insert(k, v); cart.map(|c| carts.push(c)); (tracks, carts) }) } fn tick(tracks: &Tracks, carts: &mut Vec<Cart>) -> Vec<Point> { let mut collisions = Vec::new(); let mut i = 0; carts.sort_unstable_by_key(|cart| (cart.position.1, cart.position.0)); while i < carts.len() { let new_position = { let cart = carts.get_mut(i).unwrap(); let neighbors = tracks.get(&cart.position).unwrap(); cart.move_cart(neighbors); cart.position }; if let Some(j) = carts.iter().enumerate().position(|(j, cart)| j != i && cart.position == new_position) { carts.retain(|cart| cart.position != new_position); collisions.push(new_position); if j < i { i -= 1; } } i += 1; } collisions } fn main() { let input = get_input().unwrap(); let (tracks, mut carts) = parse(&input); let original_carts = carts.clone(); loop { let collisions = tick(&tracks, &mut carts); if let Some((x, y)) = collisions.into_iter().next() { println!("Part 1: {},{}", x, y); break; } } let mut carts = original_carts; while carts.len() > 1 { tick(&tracks, &mut carts); } carts.into_iter().next() .map(|cart| println!("Part 2: {},{}", cart.position.0, cart.position.1)); }
parse
identifier_name
13.rs
use std::fmt; use std::fs::File; use std::io::prelude::*; use std::collections::HashMap; fn get_input() -> std::io::Result<String> { let mut file = File::open("13.txt")?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(contents) } type Point = (isize, isize); type Tracks = HashMap<Point, Vec<Point>>; #[derive(Clone)] struct Cart { position: Point, direction: Point, decision: usize } impl fmt::Debug for Cart { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", (self.position, match self.direction { (-1, 0) => '<', (1, 0) => '>', (0, -1) => '^', (0, 1) => 'v', _ => ' ' })) } } impl Cart { fn new(position: Point, direction: Point) -> Self { Self { position, direction, decision: 0 } } fn move_cart(&mut self, neighbors: &[Point]) { let (x, y) = self.position; let (dx, dy) = self.direction; // Left, Straight, Right let targets = [(0, -1), (1, 0), (0, 1)].into_iter() .map(|(za, zb)| (x + dx * za - dy * zb, y + dx * zb + dy * za)) .filter(|target| neighbors.contains(target)) .collect::<Vec<_>>(); let (nx, ny) = match targets.len() { 1 => targets[0], 3 => { let target = targets[self.decision]; self.decision = (self.decision + 1) % 3; target }, _ => return }; self.position = (nx, ny); self.direction = (nx - x, ny - y); } } fn parse(input: &str) -> (Tracks, Vec<Cart>) { input.lines() .enumerate() .flat_map(|(j, line)| { let chars: Vec<char> = line.chars().collect(); chars.iter() .cloned() .enumerate() .filter_map(|(i, c)| { let (x, y) = (i as isize, j as isize); let horizontal_chars = ['-', '+', '<', '>']; let neighbors = match (c, chars.get(i + 1)) { ('/', Some(nc)) if horizontal_chars.contains(nc) => Some(vec![(x + 1, y), (x, y + 1)]), ('/', _) => Some(vec![(x - 1, y), (x, y - 1)]), ('\\', Some(nc)) if horizontal_chars.contains(nc) => Some(vec![(x + 1, y), (x, y - 1)]), ('\\', _) => Some(vec![(x - 1, y), (x, y + 1)]), ('+', _) => Some(vec![(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]), ('-', _) | ('<', _) | ('>', _) => Some(vec![(x - 1, y), (x + 1, y)]), ('|', _) | ('^', _) | ('v', _) => Some(vec![(x, y - 1), (x, y + 1)]), _ => None
'>' => Some((1, 0)), '^' => Some((0, -1)), 'v' => Some((0, 1)), _ => None }; neighbors .map(|n| ((x, y), n, cart_direction.map(|d| Cart::new((x, y), d)))) }) .collect::<Vec<_>>() }) .fold((Tracks::new(), Vec::new()), |(mut tracks, mut carts), (k, v, cart)| { tracks.insert(k, v); cart.map(|c| carts.push(c)); (tracks, carts) }) } fn tick(tracks: &Tracks, carts: &mut Vec<Cart>) -> Vec<Point> { let mut collisions = Vec::new(); let mut i = 0; carts.sort_unstable_by_key(|cart| (cart.position.1, cart.position.0)); while i < carts.len() { let new_position = { let cart = carts.get_mut(i).unwrap(); let neighbors = tracks.get(&cart.position).unwrap(); cart.move_cart(neighbors); cart.position }; if let Some(j) = carts.iter().enumerate().position(|(j, cart)| j != i && cart.position == new_position) { carts.retain(|cart| cart.position != new_position); collisions.push(new_position); if j < i { i -= 1; } } i += 1; } collisions } fn main() { let input = get_input().unwrap(); let (tracks, mut carts) = parse(&input); let original_carts = carts.clone(); loop { let collisions = tick(&tracks, &mut carts); if let Some((x, y)) = collisions.into_iter().next() { println!("Part 1: {},{}", x, y); break; } } let mut carts = original_carts; while carts.len() > 1 { tick(&tracks, &mut carts); } carts.into_iter().next() .map(|cart| println!("Part 2: {},{}", cart.position.0, cart.position.1)); }
}; let cart_direction = match c { '<' => Some((-1, 0)),
random_line_split
metadata_builder.py
# -*- coding: utf-8 -*- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License");
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base abstract class for metadata builders.""" import abc _ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) class MetadataBuilder(_ABC): """Abstract base class for metadata builders.""" @abc.abstractmethod def get_metadata(self): """Returns the current metadata as a dictionary.""" @abc.abstractmethod def get_metadata_protobuf(self): """Returns the current metadata as ExplanationMetadata protobuf"""
# you may not use this file except in compliance with the License.
random_line_split
metadata_builder.py
# -*- coding: utf-8 -*- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base abstract class for metadata builders.""" import abc _ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) class MetadataBuilder(_ABC): """Abstract base class for metadata builders.""" @abc.abstractmethod def get_metadata(self): """Returns the current metadata as a dictionary.""" @abc.abstractmethod def
(self): """Returns the current metadata as ExplanationMetadata protobuf"""
get_metadata_protobuf
identifier_name
metadata_builder.py
# -*- coding: utf-8 -*- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base abstract class for metadata builders.""" import abc _ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) class MetadataBuilder(_ABC): """Abstract base class for metadata builders.""" @abc.abstractmethod def get_metadata(self):
@abc.abstractmethod def get_metadata_protobuf(self): """Returns the current metadata as ExplanationMetadata protobuf"""
"""Returns the current metadata as a dictionary."""
identifier_body
kendo-dojo.js
(function($, window) { var dojo = { postSnippet: function (snippet, baseUrl) { snippet = dojo.fixCDNReferences(snippet); snippet = dojo.addBaseRedirectTag(snippet, baseUrl); snippet = dojo.addConsoleScript(snippet); snippet = dojo.fixLineEndings(snippet); snippet = dojo.replaceCommon(snippet, window.kendoCommonFile); snippet = dojo.replaceTheme(snippet, window.kendoTheme); snippet = window.btoa(encodeURIComponent(snippet)); var form = $('<form method="post" action="' + dojo.configuration.url + '" target="_blank" />').hide().appendTo(document.body); $("<input name='snippet'>").val(snippet).appendTo(form); if ($("#mobile-application-container").length) { $("<input name='mode'>").val("ios7").appendTo(form); } form.submit(); }, replaceCommon: function(code, common) { if (common)
return code; }, replaceTheme: function(code, theme) { if (theme) { code = code.replace(/default\.min\.css/g, theme + ".min.css"); } return code; }, addBaseRedirectTag: function (code, baseUrl) { return code.replace( '<head>', '<head>\n' + ' <base href="' + baseUrl + '">\n' + ' <style>html { font-size: 14px; font-family: Arial, Helvetica, sans-serif; }</style>' ); }, addConsoleScript: function (code) { if (code.indexOf("kendoConsole") !== -1) { var styleReference = ' <link rel="stylesheet" href="../content/shared/styles/examples-offline.css">\n'; var scriptReference = ' <script src="../content/shared/js/console.js"></script>\n'; code = code.replace("</head>", styleReference + scriptReference + "</head>"); } return code; }, fixLineEndings: function (code) { return code.replace(/\n/g, "&#10;"); }, fixCDNReferences: function (code) { return code.replace(/<head>[\s\S]*<\/head>/, function (match) { return match .replace(/src="\/?/g, "src=\"" + dojo.configuration.cdnRoot + "/") .replace(/href="\/?/g, "href=\"" + dojo.configuration.cdnRoot + "/"); }); } }; $.extend(window, { dojo: dojo }); })(jQuery, window);
{ code = code.replace(/common\.min\.css/, common + ".min.css"); }
conditional_block
kendo-dojo.js
(function($, window) { var dojo = { postSnippet: function (snippet, baseUrl) { snippet = dojo.fixCDNReferences(snippet); snippet = dojo.addBaseRedirectTag(snippet, baseUrl); snippet = dojo.addConsoleScript(snippet); snippet = dojo.fixLineEndings(snippet); snippet = dojo.replaceCommon(snippet, window.kendoCommonFile); snippet = dojo.replaceTheme(snippet, window.kendoTheme); snippet = window.btoa(encodeURIComponent(snippet)); var form = $('<form method="post" action="' + dojo.configuration.url + '" target="_blank" />').hide().appendTo(document.body); $("<input name='snippet'>").val(snippet).appendTo(form); if ($("#mobile-application-container").length) { $("<input name='mode'>").val("ios7").appendTo(form); } form.submit(); }, replaceCommon: function(code, common) { if (common) { code = code.replace(/common\.min\.css/, common + ".min.css"); } return code; }, replaceTheme: function(code, theme) { if (theme) { code = code.replace(/default\.min\.css/g, theme + ".min.css"); } return code; }, addBaseRedirectTag: function (code, baseUrl) {
' <style>html { font-size: 14px; font-family: Arial, Helvetica, sans-serif; }</style>' ); }, addConsoleScript: function (code) { if (code.indexOf("kendoConsole") !== -1) { var styleReference = ' <link rel="stylesheet" href="../content/shared/styles/examples-offline.css">\n'; var scriptReference = ' <script src="../content/shared/js/console.js"></script>\n'; code = code.replace("</head>", styleReference + scriptReference + "</head>"); } return code; }, fixLineEndings: function (code) { return code.replace(/\n/g, "&#10;"); }, fixCDNReferences: function (code) { return code.replace(/<head>[\s\S]*<\/head>/, function (match) { return match .replace(/src="\/?/g, "src=\"" + dojo.configuration.cdnRoot + "/") .replace(/href="\/?/g, "href=\"" + dojo.configuration.cdnRoot + "/"); }); } }; $.extend(window, { dojo: dojo }); })(jQuery, window);
return code.replace( '<head>', '<head>\n' + ' <base href="' + baseUrl + '">\n' +
random_line_split
logIn.js
/*jslint browser: true*/ /*global $, jQuery, alert*/ (function ($) { "use strict"; $(document).ready(function () { $("input[name=dob]").datepicker({ dateFormat: 'yy-mm-dd', inline: true, showOtherMonths: true }); }); $(document).ready(function () { $("input[name='rep_password']").focusout(function () { var p1 = $('input[name="password"]').val(), p2 = $('input[name="rep_password"]').val(); if (p1 !== p2) { $('#passDM').show(300); } else if (p1 === "") { $('#passDM').show(300); } else { $('#passDM').hide(300); } }); }); $(document).ready(function () { $("input[name=password]").focusin(function () { $('#passDM').hide(300); });
$("input[name=rep_password]").focusin(function () { $('#passDM').hide(300); }); }); }(jQuery));
random_line_split
logIn.js
/*jslint browser: true*/ /*global $, jQuery, alert*/ (function ($) { "use strict"; $(document).ready(function () { $("input[name=dob]").datepicker({ dateFormat: 'yy-mm-dd', inline: true, showOtherMonths: true }); }); $(document).ready(function () { $("input[name='rep_password']").focusout(function () { var p1 = $('input[name="password"]').val(), p2 = $('input[name="rep_password"]').val(); if (p1 !== p2)
else if (p1 === "") { $('#passDM').show(300); } else { $('#passDM').hide(300); } }); }); $(document).ready(function () { $("input[name=password]").focusin(function () { $('#passDM').hide(300); }); $("input[name=rep_password]").focusin(function () { $('#passDM').hide(300); }); }); }(jQuery));
{ $('#passDM').show(300); }
conditional_block
transpiler.py
import os import os.path from collections import OrderedDict from typing import List, Dict, Any, Tuple, Union # noinspection PyPackageRequirements import yaml # from pyyaml import dectree.propfuncs as propfuncs from .codegen import gen_code from .types import TypeDefs def transpile(src_file, out_file=None, **options: Dict[str, Any]) -> str: """ Generate a decision tree function by transpiling *src_file* to *out_file* using the given *options*. Return the path to the generated file, if any, otherwise return ``None``. :param src_file: A file descriptor or a path-like object to the decision tree definition source file (YAML format) :param out_file: A file descriptor or a path-like object to the module output file (Python) :param options: options, refer to `dectree --help` :return: A path to the written module output file (Python) or None if *out_file* is a file descriptor """ try: fd = open(src_file) src_path = src_file except TypeError: fd = src_file src_path = None try: src_code = yaml.load(fd) finally: if src_path: fd.close() if not src_code: raise ValueError('Empty decision tree definition') sections = ('types', 'inputs', 'outputs', 'rules') if not all([section in src_code for section in sections]): raise ValueError('Invalid decision tree definition: missing section {} or all of them'.format(sections)) for section in sections: if not src_code[section]: raise ValueError("Invalid decision tree definition: section '{}' is empty".format(section)) types = _normalize_types(_to_omap(src_code['types'], recursive=True)) input_defs = _to_omap(src_code['inputs']) output_defs = _to_omap(src_code['outputs']) rules = _normalize_rules(src_code['rules']) src_options = dict(src_code.get('options') or {}) src_options.update(options or {}) py_code = gen_code(types, input_defs, output_defs, rules, **src_options) if out_file: try: fd = open(out_file, 'w') out_path = out_file except TypeError: fd = out_file out_path = None else: assert src_path dir_name = os.path.dirname(src_path) base_name = os.path.splitext(os.path.basename(src_path))[0] out_path = os.path.join(dir_name, base_name + ".py") fd = open(out_path, mode='w') fd.write(py_code) if out_path is not None: fd.close() return out_path def _normalize_types(types: Dict[str, Dict[str, str]]) -> TypeDefs: type_defs = OrderedDict() for type_name, type_properties in types.items(): type_def = {} type_defs[type_name] = type_def for prop_name, prop_value in type_properties.items(): try: prop_result = eval(prop_value, vars(propfuncs), {}) except Exception: raise ValueError('Illegal value for property "{}" of type "{}": {}'.format(prop_name, type_name, prop_value)) func_params, func_body = prop_result type_def[prop_name] = prop_value, func_params, func_body return type_defs def _normalize_rules(raw_rules): return [_normalize_rule(raw_rule) for raw_rule in raw_rules] def _normalize_rule(raw_rule: Union[str, List]): if isinstance(raw_rule, str): raw_rule = _load_raw_rule(raw_rule) return _parse_raw_rule(raw_rule) def _parse_raw_rule(raw_rule: List[Union[Dict, List]]) -> List[Union[Tuple, List]]: # print(raw_rule) n = len(raw_rule) parsed_rule = [] for i in range(n): item = raw_rule[i] stmt_part, stmt_body, assignment = None, None, None if isinstance(item, dict): stmt_part, stmt_body = dict(item).popitem() else: assignment = item if stmt_part: stmt_tokens = stmt_part.split(None, 1) if len(stmt_tokens) == 0: raise ValueError('illegal rule part: {}'.format(stmt_part)) keyword = stmt_tokens[0] if keyword == 'if': if i != 0: raise ValueError('"if" must be first in rule: {}'.format(stmt_part)) if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] elif keyword == 'else': if len(stmt_tokens) == 1: if i < n - 2: raise ValueError('"else" must be last in rule: {}'.format(stmt_part)) condition = None else: elif_stmt_tokens = stmt_tokens[1].split(None, 1) if elif_stmt_tokens[0] == 'if': keyword, condition = 'elif', elif_stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) elif keyword == 'elif': if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) if condition: parsed_rule.append((keyword, condition, _parse_raw_rule(stmt_body))) else: parsed_rule.append((keyword, _parse_raw_rule(stmt_body))) elif assignment: # noinspection PyUnresolvedReferences assignment_parts = assignment.split(None, 2) if len(assignment_parts) != 3 \ or not assignment_parts[0].isidentifier() \ or assignment_parts[1] != '=' \ or not assignment_parts[2]: raise ValueError('illegal rule part: {}'.format(stmt_part)) parsed_rule.append(('=', assignment_parts[0], assignment_parts[2])) else: raise ValueError('illegal rule part: {}'.format(stmt_part)) return parsed_rule def
(rule_code: str): raw_lines = rule_code.split('\n') yml_lines = [] for raw_line in raw_lines: i = _count_leading_spaces(raw_line) indent = raw_line[0:i] content = raw_line[i:] if content: if content[0] != '#': yml_lines.append(indent + '- ' + content) else: yml_lines.append(indent + content) return yaml.load('\n'.join(yml_lines)) def _count_leading_spaces(s: str): i = 0 for i in range(len(s)): if not s[i].isspace(): return i return i def _to_omap(list_or_dict, recursive=False): if not list_or_dict: return list_or_dict if _is_list_of_one_key_dicts(list_or_dict): dict_copy = OrderedDict() for item in list_or_dict: key, item = dict(item).popitem() dict_copy[key] = _to_omap(item) if recursive else item return dict_copy if recursive: if isinstance(list_or_dict, list): list_copy = [] for item in list_or_dict: list_copy.append(_to_omap(item, recursive=True)) return list_copy if isinstance(list_or_dict, dict): dict_copy = OrderedDict() for key, item in list_or_dict.items(): dict_copy[key] = _to_omap(item, recursive=True) return dict_copy return list_or_dict def _is_list_of_one_key_dicts(l): try: for item in l: # noinspection PyUnusedLocal (k, v), = item.items() except (AttributeError, TypeError): return False return True
_load_raw_rule
identifier_name
transpiler.py
import os import os.path from collections import OrderedDict from typing import List, Dict, Any, Tuple, Union # noinspection PyPackageRequirements import yaml # from pyyaml import dectree.propfuncs as propfuncs from .codegen import gen_code from .types import TypeDefs def transpile(src_file, out_file=None, **options: Dict[str, Any]) -> str: """ Generate a decision tree function by transpiling *src_file* to *out_file* using the given *options*. Return the path to the generated file, if any, otherwise return ``None``. :param src_file: A file descriptor or a path-like object to the decision tree definition source file (YAML format) :param out_file: A file descriptor or a path-like object to the module output file (Python) :param options: options, refer to `dectree --help` :return: A path to the written module output file (Python) or None if *out_file* is a file descriptor """ try: fd = open(src_file) src_path = src_file except TypeError: fd = src_file src_path = None try: src_code = yaml.load(fd) finally: if src_path: fd.close() if not src_code: raise ValueError('Empty decision tree definition') sections = ('types', 'inputs', 'outputs', 'rules') if not all([section in src_code for section in sections]): raise ValueError('Invalid decision tree definition: missing section {} or all of them'.format(sections)) for section in sections: if not src_code[section]: raise ValueError("Invalid decision tree definition: section '{}' is empty".format(section)) types = _normalize_types(_to_omap(src_code['types'], recursive=True)) input_defs = _to_omap(src_code['inputs']) output_defs = _to_omap(src_code['outputs']) rules = _normalize_rules(src_code['rules']) src_options = dict(src_code.get('options') or {}) src_options.update(options or {}) py_code = gen_code(types, input_defs, output_defs, rules, **src_options) if out_file: try: fd = open(out_file, 'w') out_path = out_file except TypeError: fd = out_file out_path = None else: assert src_path dir_name = os.path.dirname(src_path) base_name = os.path.splitext(os.path.basename(src_path))[0] out_path = os.path.join(dir_name, base_name + ".py") fd = open(out_path, mode='w') fd.write(py_code) if out_path is not None: fd.close() return out_path def _normalize_types(types: Dict[str, Dict[str, str]]) -> TypeDefs: type_defs = OrderedDict() for type_name, type_properties in types.items(): type_def = {} type_defs[type_name] = type_def for prop_name, prop_value in type_properties.items(): try: prop_result = eval(prop_value, vars(propfuncs), {}) except Exception: raise ValueError('Illegal value for property "{}" of type "{}": {}'.format(prop_name, type_name, prop_value)) func_params, func_body = prop_result type_def[prop_name] = prop_value, func_params, func_body return type_defs def _normalize_rules(raw_rules): return [_normalize_rule(raw_rule) for raw_rule in raw_rules] def _normalize_rule(raw_rule: Union[str, List]): if isinstance(raw_rule, str): raw_rule = _load_raw_rule(raw_rule) return _parse_raw_rule(raw_rule) def _parse_raw_rule(raw_rule: List[Union[Dict, List]]) -> List[Union[Tuple, List]]: # print(raw_rule) n = len(raw_rule) parsed_rule = [] for i in range(n): item = raw_rule[i] stmt_part, stmt_body, assignment = None, None, None if isinstance(item, dict): stmt_part, stmt_body = dict(item).popitem() else: assignment = item if stmt_part: stmt_tokens = stmt_part.split(None, 1) if len(stmt_tokens) == 0: raise ValueError('illegal rule part: {}'.format(stmt_part)) keyword = stmt_tokens[0] if keyword == 'if': if i != 0: raise ValueError('"if" must be first in rule: {}'.format(stmt_part)) if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] elif keyword == 'else': if len(stmt_tokens) == 1: if i < n - 2: raise ValueError('"else" must be last in rule: {}'.format(stmt_part)) condition = None else: elif_stmt_tokens = stmt_tokens[1].split(None, 1) if elif_stmt_tokens[0] == 'if': keyword, condition = 'elif', elif_stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) elif keyword == 'elif': if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) if condition: parsed_rule.append((keyword, condition, _parse_raw_rule(stmt_body))) else: parsed_rule.append((keyword, _parse_raw_rule(stmt_body))) elif assignment: # noinspection PyUnresolvedReferences assignment_parts = assignment.split(None, 2) if len(assignment_parts) != 3 \ or not assignment_parts[0].isidentifier() \ or assignment_parts[1] != '=' \ or not assignment_parts[2]: raise ValueError('illegal rule part: {}'.format(stmt_part)) parsed_rule.append(('=', assignment_parts[0], assignment_parts[2])) else:
return parsed_rule def _load_raw_rule(rule_code: str): raw_lines = rule_code.split('\n') yml_lines = [] for raw_line in raw_lines: i = _count_leading_spaces(raw_line) indent = raw_line[0:i] content = raw_line[i:] if content: if content[0] != '#': yml_lines.append(indent + '- ' + content) else: yml_lines.append(indent + content) return yaml.load('\n'.join(yml_lines)) def _count_leading_spaces(s: str): i = 0 for i in range(len(s)): if not s[i].isspace(): return i return i def _to_omap(list_or_dict, recursive=False): if not list_or_dict: return list_or_dict if _is_list_of_one_key_dicts(list_or_dict): dict_copy = OrderedDict() for item in list_or_dict: key, item = dict(item).popitem() dict_copy[key] = _to_omap(item) if recursive else item return dict_copy if recursive: if isinstance(list_or_dict, list): list_copy = [] for item in list_or_dict: list_copy.append(_to_omap(item, recursive=True)) return list_copy if isinstance(list_or_dict, dict): dict_copy = OrderedDict() for key, item in list_or_dict.items(): dict_copy[key] = _to_omap(item, recursive=True) return dict_copy return list_or_dict def _is_list_of_one_key_dicts(l): try: for item in l: # noinspection PyUnusedLocal (k, v), = item.items() except (AttributeError, TypeError): return False return True
raise ValueError('illegal rule part: {}'.format(stmt_part))
conditional_block
transpiler.py
import os import os.path from collections import OrderedDict from typing import List, Dict, Any, Tuple, Union # noinspection PyPackageRequirements import yaml # from pyyaml import dectree.propfuncs as propfuncs from .codegen import gen_code from .types import TypeDefs def transpile(src_file, out_file=None, **options: Dict[str, Any]) -> str: """ Generate a decision tree function by transpiling *src_file* to *out_file* using the given *options*. Return the path to the generated file, if any, otherwise return ``None``. :param src_file: A file descriptor or a path-like object to the decision tree definition source file (YAML format) :param out_file: A file descriptor or a path-like object to the module output file (Python) :param options: options, refer to `dectree --help` :return: A path to the written module output file (Python) or None if *out_file* is a file descriptor """ try: fd = open(src_file) src_path = src_file except TypeError: fd = src_file src_path = None try: src_code = yaml.load(fd) finally: if src_path: fd.close() if not src_code: raise ValueError('Empty decision tree definition') sections = ('types', 'inputs', 'outputs', 'rules') if not all([section in src_code for section in sections]): raise ValueError('Invalid decision tree definition: missing section {} or all of them'.format(sections)) for section in sections: if not src_code[section]: raise ValueError("Invalid decision tree definition: section '{}' is empty".format(section)) types = _normalize_types(_to_omap(src_code['types'], recursive=True)) input_defs = _to_omap(src_code['inputs']) output_defs = _to_omap(src_code['outputs']) rules = _normalize_rules(src_code['rules']) src_options = dict(src_code.get('options') or {}) src_options.update(options or {}) py_code = gen_code(types, input_defs, output_defs, rules, **src_options) if out_file: try: fd = open(out_file, 'w') out_path = out_file
dir_name = os.path.dirname(src_path) base_name = os.path.splitext(os.path.basename(src_path))[0] out_path = os.path.join(dir_name, base_name + ".py") fd = open(out_path, mode='w') fd.write(py_code) if out_path is not None: fd.close() return out_path def _normalize_types(types: Dict[str, Dict[str, str]]) -> TypeDefs: type_defs = OrderedDict() for type_name, type_properties in types.items(): type_def = {} type_defs[type_name] = type_def for prop_name, prop_value in type_properties.items(): try: prop_result = eval(prop_value, vars(propfuncs), {}) except Exception: raise ValueError('Illegal value for property "{}" of type "{}": {}'.format(prop_name, type_name, prop_value)) func_params, func_body = prop_result type_def[prop_name] = prop_value, func_params, func_body return type_defs def _normalize_rules(raw_rules): return [_normalize_rule(raw_rule) for raw_rule in raw_rules] def _normalize_rule(raw_rule: Union[str, List]): if isinstance(raw_rule, str): raw_rule = _load_raw_rule(raw_rule) return _parse_raw_rule(raw_rule) def _parse_raw_rule(raw_rule: List[Union[Dict, List]]) -> List[Union[Tuple, List]]: # print(raw_rule) n = len(raw_rule) parsed_rule = [] for i in range(n): item = raw_rule[i] stmt_part, stmt_body, assignment = None, None, None if isinstance(item, dict): stmt_part, stmt_body = dict(item).popitem() else: assignment = item if stmt_part: stmt_tokens = stmt_part.split(None, 1) if len(stmt_tokens) == 0: raise ValueError('illegal rule part: {}'.format(stmt_part)) keyword = stmt_tokens[0] if keyword == 'if': if i != 0: raise ValueError('"if" must be first in rule: {}'.format(stmt_part)) if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] elif keyword == 'else': if len(stmt_tokens) == 1: if i < n - 2: raise ValueError('"else" must be last in rule: {}'.format(stmt_part)) condition = None else: elif_stmt_tokens = stmt_tokens[1].split(None, 1) if elif_stmt_tokens[0] == 'if': keyword, condition = 'elif', elif_stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) elif keyword == 'elif': if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) if condition: parsed_rule.append((keyword, condition, _parse_raw_rule(stmt_body))) else: parsed_rule.append((keyword, _parse_raw_rule(stmt_body))) elif assignment: # noinspection PyUnresolvedReferences assignment_parts = assignment.split(None, 2) if len(assignment_parts) != 3 \ or not assignment_parts[0].isidentifier() \ or assignment_parts[1] != '=' \ or not assignment_parts[2]: raise ValueError('illegal rule part: {}'.format(stmt_part)) parsed_rule.append(('=', assignment_parts[0], assignment_parts[2])) else: raise ValueError('illegal rule part: {}'.format(stmt_part)) return parsed_rule def _load_raw_rule(rule_code: str): raw_lines = rule_code.split('\n') yml_lines = [] for raw_line in raw_lines: i = _count_leading_spaces(raw_line) indent = raw_line[0:i] content = raw_line[i:] if content: if content[0] != '#': yml_lines.append(indent + '- ' + content) else: yml_lines.append(indent + content) return yaml.load('\n'.join(yml_lines)) def _count_leading_spaces(s: str): i = 0 for i in range(len(s)): if not s[i].isspace(): return i return i def _to_omap(list_or_dict, recursive=False): if not list_or_dict: return list_or_dict if _is_list_of_one_key_dicts(list_or_dict): dict_copy = OrderedDict() for item in list_or_dict: key, item = dict(item).popitem() dict_copy[key] = _to_omap(item) if recursive else item return dict_copy if recursive: if isinstance(list_or_dict, list): list_copy = [] for item in list_or_dict: list_copy.append(_to_omap(item, recursive=True)) return list_copy if isinstance(list_or_dict, dict): dict_copy = OrderedDict() for key, item in list_or_dict.items(): dict_copy[key] = _to_omap(item, recursive=True) return dict_copy return list_or_dict def _is_list_of_one_key_dicts(l): try: for item in l: # noinspection PyUnusedLocal (k, v), = item.items() except (AttributeError, TypeError): return False return True
except TypeError: fd = out_file out_path = None else: assert src_path
random_line_split
transpiler.py
import os import os.path from collections import OrderedDict from typing import List, Dict, Any, Tuple, Union # noinspection PyPackageRequirements import yaml # from pyyaml import dectree.propfuncs as propfuncs from .codegen import gen_code from .types import TypeDefs def transpile(src_file, out_file=None, **options: Dict[str, Any]) -> str:
if src_path: fd.close() if not src_code: raise ValueError('Empty decision tree definition') sections = ('types', 'inputs', 'outputs', 'rules') if not all([section in src_code for section in sections]): raise ValueError('Invalid decision tree definition: missing section {} or all of them'.format(sections)) for section in sections: if not src_code[section]: raise ValueError("Invalid decision tree definition: section '{}' is empty".format(section)) types = _normalize_types(_to_omap(src_code['types'], recursive=True)) input_defs = _to_omap(src_code['inputs']) output_defs = _to_omap(src_code['outputs']) rules = _normalize_rules(src_code['rules']) src_options = dict(src_code.get('options') or {}) src_options.update(options or {}) py_code = gen_code(types, input_defs, output_defs, rules, **src_options) if out_file: try: fd = open(out_file, 'w') out_path = out_file except TypeError: fd = out_file out_path = None else: assert src_path dir_name = os.path.dirname(src_path) base_name = os.path.splitext(os.path.basename(src_path))[0] out_path = os.path.join(dir_name, base_name + ".py") fd = open(out_path, mode='w') fd.write(py_code) if out_path is not None: fd.close() return out_path def _normalize_types(types: Dict[str, Dict[str, str]]) -> TypeDefs: type_defs = OrderedDict() for type_name, type_properties in types.items(): type_def = {} type_defs[type_name] = type_def for prop_name, prop_value in type_properties.items(): try: prop_result = eval(prop_value, vars(propfuncs), {}) except Exception: raise ValueError('Illegal value for property "{}" of type "{}": {}'.format(prop_name, type_name, prop_value)) func_params, func_body = prop_result type_def[prop_name] = prop_value, func_params, func_body return type_defs def _normalize_rules(raw_rules): return [_normalize_rule(raw_rule) for raw_rule in raw_rules] def _normalize_rule(raw_rule: Union[str, List]): if isinstance(raw_rule, str): raw_rule = _load_raw_rule(raw_rule) return _parse_raw_rule(raw_rule) def _parse_raw_rule(raw_rule: List[Union[Dict, List]]) -> List[Union[Tuple, List]]: # print(raw_rule) n = len(raw_rule) parsed_rule = [] for i in range(n): item = raw_rule[i] stmt_part, stmt_body, assignment = None, None, None if isinstance(item, dict): stmt_part, stmt_body = dict(item).popitem() else: assignment = item if stmt_part: stmt_tokens = stmt_part.split(None, 1) if len(stmt_tokens) == 0: raise ValueError('illegal rule part: {}'.format(stmt_part)) keyword = stmt_tokens[0] if keyword == 'if': if i != 0: raise ValueError('"if" must be first in rule: {}'.format(stmt_part)) if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] elif keyword == 'else': if len(stmt_tokens) == 1: if i < n - 2: raise ValueError('"else" must be last in rule: {}'.format(stmt_part)) condition = None else: elif_stmt_tokens = stmt_tokens[1].split(None, 1) if elif_stmt_tokens[0] == 'if': keyword, condition = 'elif', elif_stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) elif keyword == 'elif': if len(stmt_tokens) != 2 or not stmt_tokens[1]: raise ValueError('illegal rule part: {}'.format(stmt_part)) condition = stmt_tokens[1] else: raise ValueError('illegal rule part: {}'.format(stmt_part)) if condition: parsed_rule.append((keyword, condition, _parse_raw_rule(stmt_body))) else: parsed_rule.append((keyword, _parse_raw_rule(stmt_body))) elif assignment: # noinspection PyUnresolvedReferences assignment_parts = assignment.split(None, 2) if len(assignment_parts) != 3 \ or not assignment_parts[0].isidentifier() \ or assignment_parts[1] != '=' \ or not assignment_parts[2]: raise ValueError('illegal rule part: {}'.format(stmt_part)) parsed_rule.append(('=', assignment_parts[0], assignment_parts[2])) else: raise ValueError('illegal rule part: {}'.format(stmt_part)) return parsed_rule def _load_raw_rule(rule_code: str): raw_lines = rule_code.split('\n') yml_lines = [] for raw_line in raw_lines: i = _count_leading_spaces(raw_line) indent = raw_line[0:i] content = raw_line[i:] if content: if content[0] != '#': yml_lines.append(indent + '- ' + content) else: yml_lines.append(indent + content) return yaml.load('\n'.join(yml_lines)) def _count_leading_spaces(s: str): i = 0 for i in range(len(s)): if not s[i].isspace(): return i return i def _to_omap(list_or_dict, recursive=False): if not list_or_dict: return list_or_dict if _is_list_of_one_key_dicts(list_or_dict): dict_copy = OrderedDict() for item in list_or_dict: key, item = dict(item).popitem() dict_copy[key] = _to_omap(item) if recursive else item return dict_copy if recursive: if isinstance(list_or_dict, list): list_copy = [] for item in list_or_dict: list_copy.append(_to_omap(item, recursive=True)) return list_copy if isinstance(list_or_dict, dict): dict_copy = OrderedDict() for key, item in list_or_dict.items(): dict_copy[key] = _to_omap(item, recursive=True) return dict_copy return list_or_dict def _is_list_of_one_key_dicts(l): try: for item in l: # noinspection PyUnusedLocal (k, v), = item.items() except (AttributeError, TypeError): return False return True
""" Generate a decision tree function by transpiling *src_file* to *out_file* using the given *options*. Return the path to the generated file, if any, otherwise return ``None``. :param src_file: A file descriptor or a path-like object to the decision tree definition source file (YAML format) :param out_file: A file descriptor or a path-like object to the module output file (Python) :param options: options, refer to `dectree --help` :return: A path to the written module output file (Python) or None if *out_file* is a file descriptor """ try: fd = open(src_file) src_path = src_file except TypeError: fd = src_file src_path = None try: src_code = yaml.load(fd) finally:
identifier_body
auth.service.ts
import { Injectable } from '@angular/core'; import { HttpRequest } from '@angular/common/http'; import * as jwt_decode_ from 'jwt-decode'; import * as localForage from 'localforage'; const jwt_decode = jwt_decode_; const TOKEN = 'token'; const CLIENT = 'client'; const UID = 'uid'; const CREATEDAT = 'createdAt'; @Injectable() export class AuthService { private _token: string = ''; private _clientToken: string = ''; private _uid: string = ''; private _createdAt: Date = null; private _cachedRequests: Array<HttpRequest<any>> = []; constructor() {} set token(token: string) { this._token = token; localForage.setItem(TOKEN, token); } get token(): string { if (!this._token) { localForage.getItem(TOKEN).then((val: string) => { this._token = val; }); } return this._token; } set personId(uid: string) { localForage.setItem(UID, uid); this._uid = uid; } get personId(): string { localForage .getItem(UID) .then((val: string) => { this._uid = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._uid; } set createdAt(dateTime: Date) { localForage.setItem(CREATEDAT, dateTime); this._createdAt = dateTime; } get createdAt(): Date { localForage .getItem(CREATEDAT) .then((val: Date) => { this._createdAt = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._createdAt; } clearStore(): Promise<void> { this.createdAt = undefined; this._cachedRequests = []; this.token = ''; this.personId = ''; return localForage.clear(); }
(): Date { if (!this.token) { return undefined; } const decoded: any = jwt_decode(this.token); if (!decoded.hasOwnProperty('exp')) { return undefined; } const date = new Date(0); date.setUTCSeconds(decoded.exp); return date; } isTokenExpired(): boolean { if (!this.token) return true; const date = this.getTokenExpirationDate(); if (date === undefined) { return false; } return !(date.valueOf() > new Date().valueOf()); } collectFailedRequest(request): void { this._cachedRequests.push(request); } retryFailedRequests(): void { // retry the requests. this method can // be called after the token is refreshed // console.log(this._cachedRequests); } }
getTokenExpirationDate
identifier_name
auth.service.ts
import { Injectable } from '@angular/core'; import { HttpRequest } from '@angular/common/http'; import * as jwt_decode_ from 'jwt-decode'; import * as localForage from 'localforage'; const jwt_decode = jwt_decode_; const TOKEN = 'token'; const CLIENT = 'client'; const UID = 'uid'; const CREATEDAT = 'createdAt'; @Injectable() export class AuthService { private _token: string = ''; private _clientToken: string = ''; private _uid: string = ''; private _createdAt: Date = null; private _cachedRequests: Array<HttpRequest<any>> = []; constructor() {} set token(token: string) { this._token = token; localForage.setItem(TOKEN, token); } get token(): string { if (!this._token) { localForage.getItem(TOKEN).then((val: string) => { this._token = val; }); } return this._token; } set personId(uid: string) { localForage.setItem(UID, uid); this._uid = uid; } get personId(): string { localForage .getItem(UID) .then((val: string) => { this._uid = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._uid; } set createdAt(dateTime: Date) { localForage.setItem(CREATEDAT, dateTime); this._createdAt = dateTime; } get createdAt(): Date { localForage .getItem(CREATEDAT) .then((val: Date) => { this._createdAt = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._createdAt; } clearStore(): Promise<void> { this.createdAt = undefined; this._cachedRequests = []; this.token = ''; this.personId = ''; return localForage.clear(); } getTokenExpirationDate(): Date { if (!this.token) { return undefined; } const decoded: any = jwt_decode(this.token); if (!decoded.hasOwnProperty('exp')) { return undefined; } const date = new Date(0); date.setUTCSeconds(decoded.exp); return date; } isTokenExpired(): boolean { if (!this.token) return true; const date = this.getTokenExpirationDate(); if (date === undefined) { return false; } return !(date.valueOf() > new Date().valueOf()); } collectFailedRequest(request): void
retryFailedRequests(): void { // retry the requests. this method can // be called after the token is refreshed // console.log(this._cachedRequests); } }
{ this._cachedRequests.push(request); }
identifier_body
auth.service.ts
import { Injectable } from '@angular/core'; import { HttpRequest } from '@angular/common/http'; import * as jwt_decode_ from 'jwt-decode'; import * as localForage from 'localforage'; const jwt_decode = jwt_decode_; const TOKEN = 'token'; const CLIENT = 'client'; const UID = 'uid'; const CREATEDAT = 'createdAt'; @Injectable() export class AuthService { private _token: string = ''; private _clientToken: string = ''; private _uid: string = ''; private _createdAt: Date = null; private _cachedRequests: Array<HttpRequest<any>> = []; constructor() {} set token(token: string) { this._token = token; localForage.setItem(TOKEN, token); } get token(): string { if (!this._token) { localForage.getItem(TOKEN).then((val: string) => { this._token = val; }); } return this._token; } set personId(uid: string) { localForage.setItem(UID, uid); this._uid = uid; } get personId(): string { localForage .getItem(UID) .then((val: string) => { this._uid = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._uid; } set createdAt(dateTime: Date) { localForage.setItem(CREATEDAT, dateTime); this._createdAt = dateTime; } get createdAt(): Date { localForage .getItem(CREATEDAT) .then((val: Date) => { this._createdAt = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._createdAt; }
this.createdAt = undefined; this._cachedRequests = []; this.token = ''; this.personId = ''; return localForage.clear(); } getTokenExpirationDate(): Date { if (!this.token) { return undefined; } const decoded: any = jwt_decode(this.token); if (!decoded.hasOwnProperty('exp')) { return undefined; } const date = new Date(0); date.setUTCSeconds(decoded.exp); return date; } isTokenExpired(): boolean { if (!this.token) return true; const date = this.getTokenExpirationDate(); if (date === undefined) { return false; } return !(date.valueOf() > new Date().valueOf()); } collectFailedRequest(request): void { this._cachedRequests.push(request); } retryFailedRequests(): void { // retry the requests. this method can // be called after the token is refreshed // console.log(this._cachedRequests); } }
clearStore(): Promise<void> {
random_line_split
auth.service.ts
import { Injectable } from '@angular/core'; import { HttpRequest } from '@angular/common/http'; import * as jwt_decode_ from 'jwt-decode'; import * as localForage from 'localforage'; const jwt_decode = jwt_decode_; const TOKEN = 'token'; const CLIENT = 'client'; const UID = 'uid'; const CREATEDAT = 'createdAt'; @Injectable() export class AuthService { private _token: string = ''; private _clientToken: string = ''; private _uid: string = ''; private _createdAt: Date = null; private _cachedRequests: Array<HttpRequest<any>> = []; constructor() {} set token(token: string) { this._token = token; localForage.setItem(TOKEN, token); } get token(): string { if (!this._token) { localForage.getItem(TOKEN).then((val: string) => { this._token = val; }); } return this._token; } set personId(uid: string) { localForage.setItem(UID, uid); this._uid = uid; } get personId(): string { localForage .getItem(UID) .then((val: string) => { this._uid = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._uid; } set createdAt(dateTime: Date) { localForage.setItem(CREATEDAT, dateTime); this._createdAt = dateTime; } get createdAt(): Date { localForage .getItem(CREATEDAT) .then((val: Date) => { this._createdAt = val; }) .catch((err: any) => { // console.log('Error fetching from forage:', err); }); return this._createdAt; } clearStore(): Promise<void> { this.createdAt = undefined; this._cachedRequests = []; this.token = ''; this.personId = ''; return localForage.clear(); } getTokenExpirationDate(): Date { if (!this.token) { return undefined; } const decoded: any = jwt_decode(this.token); if (!decoded.hasOwnProperty('exp'))
const date = new Date(0); date.setUTCSeconds(decoded.exp); return date; } isTokenExpired(): boolean { if (!this.token) return true; const date = this.getTokenExpirationDate(); if (date === undefined) { return false; } return !(date.valueOf() > new Date().valueOf()); } collectFailedRequest(request): void { this._cachedRequests.push(request); } retryFailedRequests(): void { // retry the requests. this method can // be called after the token is refreshed // console.log(this._cachedRequests); } }
{ return undefined; }
conditional_block
lib.rs
{let res = self.0.num_tables() as usize; (res, Some(res))} } impl<'a, T: TagListTable<'a> + Tagged> ExactSizeIterator for TagOffsetIterator<'a, T> {} #[derive(Clone, Copy, PartialEq, Eq)] pub struct Script; pub type ScriptList<'a> = TagOffsetList<'a, Script>; impl<'a> TagListTable<'a> for Script {type Table = ScriptTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {ScriptTable::new(data)}} impl Tagged for Script {} impl Indexed for Script {} impl<'a> ScriptList<'a> { pub fn new(data: &'a[u8]) -> Result<ScriptList<'a>, CorruptFont<'a>> {ScriptList::new_list(data)} pub fn features_for(&self, selector: Option<(Tag<Script>, Option<Tag<LangSys>>)>) -> Result<LangSysTable<'a>, CorruptFont<'a>> { let search = AutoSearch::new(self.num_tables() as usize*6); if let Some((script, lang_sys_opt)) = selector { match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&script.0))) { Ok(idx) => { let script_table = try!(self.table(Index::new(idx)).unwrap()); if let Some(lang_sys) = lang_sys_opt { unimplemented!() } else { return script_table.default_lang_sys() } }, Err(Ok(_)) => {println!("default");return Ok(Default::default())}, Err(Err((_, e))) => return Err(e) } } match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&DFLT_TAG.0))) { Ok(i) => { let script_table = try!(self.table(Index::new(i)).unwrap()); try!(script_table.validate_dflt()); script_table.default_lang_sys() }, Err(Ok(_)) => Ok(Default::default()), Err(Err((_, e))) => Err(e) } } } static DFLT_TAG: Tag<Script> = Tag(0x44464c54, PhantomData); #[derive(Clone, Copy, PartialEq, Eq)] pub struct Feature; pub type FeatureList<'a> = TagOffsetList<'a, Feature>; impl<'a> TagListTable<'a> for Feature {type Table = FeatureTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {FeatureTable::new(data)}} impl Tagged for Feature {} impl Indexed for Feature {} impl<'a> FeatureList<'a> { fn new(data: &'a[u8]) -> Result<FeatureList<'a>, CorruptFont<'a>> {FeatureList::new_list(data)} } pub struct FeatureTable<'a>(&'a[u8]); impl<'a> FeatureTable<'a> { fn new(data: &'a[u8]) -> Result<FeatureTable<'a>, CorruptFont<'a>> { if data.len() < 4 {return Err(CorruptFont(data, TableTooShort))} if read_u16(data).unwrap() != 0 {return Err(CorruptFont(data, ReservedFeature))} let len = read_u16(&data[2..]).unwrap(); if len as usize*2+4 > data.len() {return Err(CorruptFont(data, TableTooShort))} Ok(FeatureTable(&data[4..len as usize*2+4])) } fn lookups(&self) -> IndexList<'a, Lookup> {IndexList(&self.0[4..], PhantomData)} } pub struct IndexList<'a, T: Indexed>(&'a[u8], PhantomData<&'static T>); impl<'a, T: Indexed> IndexList<'a, T> { pub fn len(&self) -> usize {self.0.len()/2} } impl<'a, T: Indexed> ExactSizeIterator for IndexList<'a, T> {} impl<'a, T: Indexed> Iterator for IndexList<'a, T> { type Item = Index<T>; fn next(&mut self) -> Option<Index<T>> { if self.0.len() < 2 { None } else { let res = read_u16(self.0).unwrap(); self.0 = &self.0[2..]; Some(Index::new(res)) } } fn size_hint(&self) -> (usize, Option<usize>) {(self.len(), Some(self.len()))} } pub struct Lookup; impl Indexed for Lookup {} pub trait LookupContainer<'a>: 'static + Sized { type Lookup; fn new_lookup(data: &'a[u8], lut: LookupList<'a, Self>) -> Result<Self::Lookup, CorruptFont<'a>>; } #[derive(Clone, Copy, PartialEq, Eq)] pub struct LookupList<'a, T: LookupContainer<'a>>(&'a[u8], PhantomData<&'static T>); impl<'a, T: LookupContainer<'a>> LookupList<'a, T> { fn new(data: &'a[u8]) -> Result<LookupList<'a, T>, CorruptFont<'a>> { if data.len() < 2 {return Err(CorruptFont(data, TableTooShort))} let res = LookupList(data, PhantomData); if data.len() < res.len() as usize*2+2 {return Err(CorruptFont(data, TableTooShort))} Ok(res) } fn len(&self) -> u16 {read_u16(self.0).unwrap()} fn get_lookup(self, Index(idx, _): Index<Lookup>) -> Option<Result<T::Lookup, CorruptFont<'a>>> { if idx >= self.len() {return None} let offset = read_u16(&self.0[2+idx as usize*2..]).unwrap(); Some(if offset as usize > self.0.len() { Err(CorruptFont(self.0, OffsetOutOfBounds)) } else { T::new_lookup(&self.0[offset as usize..], self) }) } } fn read_range(range: &[u8]) -> Result<(u16, u16, u16), CorruptFont> { let last = read_u16(&range[2..]).unwrap(); let first = read_u16(range).unwrap(); if last < first {return Err(CorruptFont(&range[..4], InvalidRange))} let offset = read_u16(&range[4..]).unwrap(); if 0xffff-(last-first) < offset {return Err(CorruptFont(range, WrappingCoverageIndex))} Ok((first, last, offset)) } pub enum Coverage<'a>{ Single(&'a[u8]), Range(&'a[u8]) } impl<'a> Coverage<'a> { fn new(data: &'a[u8]) -> Result<Coverage<'a>, CorruptFont<'a>> { if data.len() < 4 {return Err(CorruptFont(data, TableTooShort))} match read_u16(data).unwrap() { ver @ 1 | ver @ 2 => { let len = read_u16(&data[2..]).unwrap(); match ver { 1 => if data.len() < len as usize*2 { Err(CorruptFont(data, TableTooShort)) } else { Ok(Coverage::Single(&data[4..][..len as usize*2])) }, 2 => if data.len() < len as usize*6 { Err(CorruptFont(data, TableTooShort)) } else { Ok(Coverage::Range(&data[4..][..len as usize*6])) }, _ => unreachable!() } }, _ => Err(CorruptFont(data, ReservedFeature)) } } fn check(&self, GlyphIndex(glyph): GlyphIndex) -> Result<Option<Index<CoveredGlyph>>, CorruptFont<'a>> { let (data, step) = match self { &Coverage::Single(data) => (data, 2), &Coverage::Range(data) => (data, 6) }; match AutoSearch::new(data.len()).search(0..(data.len()/step) as u16, &mut move|i|Ok(read_u16(&data[*i as usize*step..]).unwrap().cmp(&glyph))) { Ok(i) => Ok(Some(Index::new(if step == 6 {read_u16(&data[i as usize*6+4..]).unwrap()} else {i}))), Err(Ok(i)) => { let range = &data[i as usize*6..][..6]; if step == 2 {return Ok(None)} let (first, last, offset) = try!(read_range(range)); Ok(if last >= glyph { Some(Index::new(glyph-first+offset)) } else { None }) }, Err(Err((_, CorruptFont(..)))) => unreachable!() } } } struct Co
veredGlyph;
identifier_name
lib.rs
Corruption); impl<'a> Error for CorruptFont<'a> { fn description(&self) -> &str {match self.1 { Unimplemented => "The font uses a feature that is not implemented", ReservedFeature => "A reserved field differed from the default value", TableTooShort => "Unexpected end of table", OffsetOutOfBounds => "An Offset pointed outside of the respective table", IncorrectDfltScript => "'DFLT' script with missing DefaultLangSys or LangSysCount ≠ 0", CmapInvalidSegmentCount => "The segment count in the character mapping is invalid", OddSegsX2 => "The doubled segment count in the character mapping is not an even number", CmapMissingGuard => "The character mapping is missing a guard value", NoCmap => "No character mapping found", UnknownTableVersion => "The font uses a table version that is not recognised", InvalidRange => "Invalid index range (last < first) found in font", WrappingCoverageIndex => "Index could wrap in Coverage Range" }} } impl<'a> ::std::fmt::Display for CorruptFont<'a> { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { write!(f, "{}", self.description()) } } mod search; use search::{AutoSearch, SearchStrategy}; pub trait Indexed: 'static {} pub struct Index<T: Indexed>(pub u16, PhantomData<&'static T>); impl<T: Indexed> Index<T> {pub fn new(x: u16) -> Index<T> {Index(x, PhantomData)}} impl<T: Indexed> std::fmt::Debug for Index<T> {fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {self.0.fmt(fmt)}} pub struct LangSysTable<'a>(&'a[u8]); impl<'a> LangSysTable<'a> { pub fn new(data: &'a[u8]) -> Result<LangSysTable<'a>, CorruptFont<'a>> { if data.len() < 6 {return Err(CorruptFont(data, TableTooShort))} if read_u16(data).unwrap() != 0 {return Err(CorruptFont(data, ReservedFeature))} let num_features = read_u16(&data[4..]).unwrap(); if data.len() - 6 < num_features as usize*2 {return Err(CorruptFont(data, TableTooShort))} Ok(LangSysTable(&data[2..num_features as usize*2 + 6])) } pub fn num_features(&self) -> u16 {(self.0.len() / 2 - 2) as u16} pub fn required_feature(&self) -> Option<Index<Feature>> { let res = read_u16(self.0).unwrap(); if res == 0xffff {None} else {Some(Index::new(res))} } pub fn get_feature(&self, idx: u16) -> Option<Index<Feature>> {read_u16(&self.0[4 + idx as usize*2..]).map(|x| Index::new(x))} pub fn features(&self) -> IndexList<'a, Feature> {IndexList(&self.0[4..], PhantomData)} } static DEFAULT_LANGSYS_TABLE: [u8; 4] = [255, 255, 0, 0]; impl<'a> Default for LangSysTable<'a> { fn default() -> LangSysTable<'a> {LangSysTable(&DEFAULT_LANGSYS_TABLE)} } #[derive(Clone, Copy, PartialEq, Eq)] pub struct LangSys; pub type ScriptTable<'a> = TagOffsetList<'a, LangSys>; impl<'a> TagListTable<'a> for LangSys { type Table = LangSysTable<'a>; fn bias() -> usize {2} fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {LangSysTable::new(data)} } impl Tagged for LangSys {} impl Indexed for LangSys {} impl<'a> ScriptTable<'a> { pub fn new(data: &'a[u8]) -> Result<ScriptTable<'a>, CorruptFont<'a>> {ScriptTable::new_list(data)} pub fn default_lang_sys(&self) -> Result<LangSysTable<'a>, CorruptFont<'a>> { let offset = read_u16(self.0).unwrap() as usize; println!("LS offset {:x}", offset); if offset == 0 { Ok(Default::default()) } else { if self.0.len() < offset {return Err(CorruptFont(self.0, OffsetOutOfBounds))} LangSysTable::new(&self.0[offset..]) } } pub fn validate_dflt(&self) -> Result<(), CorruptFont<'a>> { if read_u16(self.0).unwrap() != 0 && self.num_tables() == 0 { Ok(()) } else { Err(CorruptFont(self.0, IncorrectDfltScript)) } } } use std::marker::PhantomData; pub trait TagListTable<'a>: 'static + Indexed + Tagged { type Table; fn bias() -> usize {0} fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>>; } #[derive(Clone, Copy, PartialEq, Eq)] pub struct TagOffsetList<'a, Table: TagListTable<'a>>(&'a[u8], PhantomData<&'static Table>); pub trait Tagged {} pub struct Tag<Table: Tagged>(pub u32, PhantomData<Table>); impl<T: Tagged> Tag<T> {pub fn new(v: u32) -> Tag<T> {Tag(v, PhantomData)}} impl<'a, Table: TagListTable<'a>> TagOffsetList<'a, Table> { fn new_list(data: &'a[u8]) -> Result<TagOffsetList<'a, Table>, CorruptFont<'a>> { if data.len() < 2 + Table::bias() {return Err(CorruptFont(data, TableTooShort))} let res = TagOffsetList(data, PhantomData); if data.len() < res.num_tables() as usize*6 + 2 + Table::bias() {return Err(CorruptFont(data, TableTooShort))} Ok(res) } fn num_tables(&self) -> u16 {read_u16(&self.0[Table::bias()..]).unwrap()} pub fn tag(&self, Index(idx, _): Index<Table>) -> Option<Tag<Table>> {read_u32(&self.0[idx as usize*6+2+Table::bias()..]).map(|x|Tag(x, PhantomData))} pub fn table(&self, Index(index, _): Index<Table>) -> Option<Result<Table::Table, CorruptFont<'a>>> { let offset_pos = &self.0[index as usize*6 + 6 + Table::bias()..]; let offset = read_u16(offset_pos).unwrap() as usize; if self.0.len() < offset {return None} println!("offset {:x}", offset); Some(Table::new(&self.0[offset..])) } } impl<'a, Table: TagListTable<'a>> IntoIterator for TagOffsetList<'a, Table> { type Item = (Tag<Table>, Result<Table::Table, CorruptFont<'a>>); type IntoIter = TagOffsetIterator<'a, Table>; fn into_iter(self) -> TagOffsetIterator<'a, Table> {TagOffsetIterator(self, 0, PhantomData)} } #[derive(Clone, Copy)] pub struct TagOffsetIterator<'a, Table: TagListTable<'a>>(TagOffsetList<'a, Table>, u16, PhantomData<Table>); impl<'a, Table: TagListTable<'a>> Iterator for TagOffsetIterator<'a, Table> { type Item = (Tag<Table>, Result<Table::Table, CorruptFont<'a>>); fn next(&mut self) -> Option<<Self as Iterator>::Item> { if self.1 >= self.0.num_tables() { None } else { self.1 += 1; Some((self.0.tag(Index::new(self.1 - 1)).unwrap(), self.0.table(Index::new(self.1 - 1)).unwrap())) } } fn size_hint(&self) -> (usize, Option<usize>) {let res = self.0.num_tables() as usize; (res, Some(res))} } impl<'a, T: TagListTable<'a> + Tagged> ExactSizeIterator for TagOffsetIterator<'a, T> {} #[derive(Clone, Copy, PartialEq, Eq)] pub struct Script; pub type ScriptList<'a> = TagOffsetList<'a, Script>; impl<'a> TagListTable<'a> for Script {type Table = ScriptTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {ScriptTable::new(data)}} impl Tagged for Script {} impl Indexed for Script {} impl<'a> ScriptList<'a> { pub fn new(data: &'a[u8]) -> Result<ScriptList<'a>, CorruptFont<'a>> {S
criptList::new_list(data)}
identifier_body
lib.rs
let offset = read_u16(self.0).unwrap() as usize; println!("LS offset {:x}", offset); if offset == 0 { Ok(Default::default()) } else { if self.0.len() < offset {return Err(CorruptFont(self.0, OffsetOutOfBounds))} LangSysTable::new(&self.0[offset..]) } } pub fn validate_dflt(&self) -> Result<(), CorruptFont<'a>> { if read_u16(self.0).unwrap() != 0 && self.num_tables() == 0 { Ok(()) } else { Err(CorruptFont(self.0, IncorrectDfltScript)) } } } use std::marker::PhantomData; pub trait TagListTable<'a>: 'static + Indexed + Tagged { type Table; fn bias() -> usize {0} fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>>; } #[derive(Clone, Copy, PartialEq, Eq)] pub struct TagOffsetList<'a, Table: TagListTable<'a>>(&'a[u8], PhantomData<&'static Table>); pub trait Tagged {} pub struct Tag<Table: Tagged>(pub u32, PhantomData<Table>); impl<T: Tagged> Tag<T> {pub fn new(v: u32) -> Tag<T> {Tag(v, PhantomData)}} impl<'a, Table: TagListTable<'a>> TagOffsetList<'a, Table> { fn new_list(data: &'a[u8]) -> Result<TagOffsetList<'a, Table>, CorruptFont<'a>> { if data.len() < 2 + Table::bias() {return Err(CorruptFont(data, TableTooShort))} let res = TagOffsetList(data, PhantomData); if data.len() < res.num_tables() as usize*6 + 2 + Table::bias() {return Err(CorruptFont(data, TableTooShort))} Ok(res) } fn num_tables(&self) -> u16 {read_u16(&self.0[Table::bias()..]).unwrap()} pub fn tag(&self, Index(idx, _): Index<Table>) -> Option<Tag<Table>> {read_u32(&self.0[idx as usize*6+2+Table::bias()..]).map(|x|Tag(x, PhantomData))} pub fn table(&self, Index(index, _): Index<Table>) -> Option<Result<Table::Table, CorruptFont<'a>>> { let offset_pos = &self.0[index as usize*6 + 6 + Table::bias()..]; let offset = read_u16(offset_pos).unwrap() as usize; if self.0.len() < offset {return None} println!("offset {:x}", offset); Some(Table::new(&self.0[offset..])) } } impl<'a, Table: TagListTable<'a>> IntoIterator for TagOffsetList<'a, Table> { type Item = (Tag<Table>, Result<Table::Table, CorruptFont<'a>>); type IntoIter = TagOffsetIterator<'a, Table>; fn into_iter(self) -> TagOffsetIterator<'a, Table> {TagOffsetIterator(self, 0, PhantomData)} } #[derive(Clone, Copy)] pub struct TagOffsetIterator<'a, Table: TagListTable<'a>>(TagOffsetList<'a, Table>, u16, PhantomData<Table>); impl<'a, Table: TagListTable<'a>> Iterator for TagOffsetIterator<'a, Table> { type Item = (Tag<Table>, Result<Table::Table, CorruptFont<'a>>); fn next(&mut self) -> Option<<Self as Iterator>::Item> { if self.1 >= self.0.num_tables() { None } else { self.1 += 1; Some((self.0.tag(Index::new(self.1 - 1)).unwrap(), self.0.table(Index::new(self.1 - 1)).unwrap())) } } fn size_hint(&self) -> (usize, Option<usize>) {let res = self.0.num_tables() as usize; (res, Some(res))} } impl<'a, T: TagListTable<'a> + Tagged> ExactSizeIterator for TagOffsetIterator<'a, T> {} #[derive(Clone, Copy, PartialEq, Eq)] pub struct Script; pub type ScriptList<'a> = TagOffsetList<'a, Script>; impl<'a> TagListTable<'a> for Script {type Table = ScriptTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {ScriptTable::new(data)}} impl Tagged for Script {} impl Indexed for Script {} impl<'a> ScriptList<'a> { pub fn new(data: &'a[u8]) -> Result<ScriptList<'a>, CorruptFont<'a>> {ScriptList::new_list(data)} pub fn features_for(&self, selector: Option<(Tag<Script>, Option<Tag<LangSys>>)>) -> Result<LangSysTable<'a>, CorruptFont<'a>> { let search = AutoSearch::new(self.num_tables() as usize*6); if let Some((script, lang_sys_opt)) = selector { match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&script.0))) { Ok(idx) => { let script_table = try!(self.table(Index::new(idx)).unwrap()); if let Some(lang_sys) = lang_sys_opt { unimplemented!() } else { return script_table.default_lang_sys() } }, Err(Ok(_)) => {println!("default");return Ok(Default::default())}, Err(Err((_, e))) => return Err(e) } } match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&DFLT_TAG.0))) { Ok(i) => { let script_table = try!(self.table(Index::new(i)).unwrap()); try!(script_table.validate_dflt()); script_table.default_lang_sys() }, Err(Ok(_)) => Ok(Default::default()), Err(Err((_, e))) => Err(e) } } } static DFLT_TAG: Tag<Script> = Tag(0x44464c54, PhantomData); #[derive(Clone, Copy, PartialEq, Eq)] pub struct Feature; pub type FeatureList<'a> = TagOffsetList<'a, Feature>; impl<'a> TagListTable<'a> for Feature {type Table = FeatureTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {FeatureTable::new(data)}} impl Tagged for Feature {} impl Indexed for Feature {} impl<'a> FeatureList<'a> { fn new(data: &'a[u8]) -> Result<FeatureList<'a>, CorruptFont<'a>> {FeatureList::new_list(data)} } pub struct FeatureTable<'a>(&'a[u8]); impl<'a> FeatureTable<'a> { fn new(data: &'a[u8]) -> Result<FeatureTable<'a>, CorruptFont<'a>> { if data.len() < 4 {return Err(CorruptFont(data, TableTooShort))} if read_u16(data).unwrap() != 0 {return Err(CorruptFont(data, ReservedFeature))} let len = read_u16(&data[2..]).unwrap(); if len as usize*2+4 > data.len() {return Err(CorruptFont(data, TableTooShort))} Ok(FeatureTable(&data[4..len as usize*2+4])) } fn lookups(&self) -> IndexList<'a, Lookup> {IndexList(&self.0[4..], PhantomData)} } pub struct IndexList<'a, T: Indexed>(&'a[u8], PhantomData<&'static T>); impl<'a, T: Indexed> IndexList<'a, T> { pub fn len(&self) -> usize {self.0.len()/2} } impl<'a, T: Indexed> ExactSizeIterator for IndexList<'a, T> {} impl<'a, T: Indexed> Iterator for IndexList<'a, T> { type Item = Index<T>; fn next(&mut self) -> Option<Index<T>> { if self.0.len() < 2 { None } else { let res = read_u16(self.0).unwrap(); self.0 = &self.0[2..]; Some(Index::new(res)) } } fn size_hint(&self) -> (usize, Option<usize>) {(self.len(), Some(self.len()))} } pub struct Lookup; impl Indexed for Lookup {} pub trait LookupContainer<'a>: 'static + Sized { type Lookup; fn new_lookup(data: &'a[u8], lut: LookupList<'a, Self>) -> Result<Self::Lookup, CorruptFont<'a>>;
} #[derive(Clone, Copy, PartialEq, Eq)] pub struct LookupList<'a, T: LookupContainer<'a>>(&'a[u8], PhantomData<&'static T>); impl<'a, T: LookupContainer<'a>> LookupList<'a, T> {
random_line_split
lib.rs
Table>, Result<Table::Table, CorruptFont<'a>>); fn next(&mut self) -> Option<<Self as Iterator>::Item> { if self.1 >= self.0.num_tables() { None } else { self.1 += 1; Some((self.0.tag(Index::new(self.1 - 1)).unwrap(), self.0.table(Index::new(self.1 - 1)).unwrap())) } } fn size_hint(&self) -> (usize, Option<usize>) {let res = self.0.num_tables() as usize; (res, Some(res))} } impl<'a, T: TagListTable<'a> + Tagged> ExactSizeIterator for TagOffsetIterator<'a, T> {} #[derive(Clone, Copy, PartialEq, Eq)] pub struct Script; pub type ScriptList<'a> = TagOffsetList<'a, Script>; impl<'a> TagListTable<'a> for Script {type Table = ScriptTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {ScriptTable::new(data)}} impl Tagged for Script {} impl Indexed for Script {} impl<'a> ScriptList<'a> { pub fn new(data: &'a[u8]) -> Result<ScriptList<'a>, CorruptFont<'a>> {ScriptList::new_list(data)} pub fn features_for(&self, selector: Option<(Tag<Script>, Option<Tag<LangSys>>)>) -> Result<LangSysTable<'a>, CorruptFont<'a>> { let search = AutoSearch::new(self.num_tables() as usize*6); if let Some((script, lang_sys_opt)) = selector { match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&script.0))) { Ok(idx) => { let script_table = try!(self.table(Index::new(idx)).unwrap()); if let Some(lang_sys) = lang_sys_opt { unimplemented!() } else { return script_table.default_lang_sys() } }, Err(Ok(_)) => {println!("default");return Ok(Default::default())}, Err(Err((_, e))) => return Err(e) } } match search.search(0..self.num_tables(), &mut move|&i| Ok(self.tag(Index::new(i)).unwrap().0.cmp(&DFLT_TAG.0))) { Ok(i) => { let script_table = try!(self.table(Index::new(i)).unwrap()); try!(script_table.validate_dflt()); script_table.default_lang_sys() }, Err(Ok(_)) => Ok(Default::default()), Err(Err((_, e))) => Err(e) } } } static DFLT_TAG: Tag<Script> = Tag(0x44464c54, PhantomData); #[derive(Clone, Copy, PartialEq, Eq)] pub struct Feature; pub type FeatureList<'a> = TagOffsetList<'a, Feature>; impl<'a> TagListTable<'a> for Feature {type Table = FeatureTable<'a>; fn new(data: &'a[u8]) -> Result<Self::Table, CorruptFont<'a>> {FeatureTable::new(data)}} impl Tagged for Feature {} impl Indexed for Feature {} impl<'a> FeatureList<'a> { fn new(data: &'a[u8]) -> Result<FeatureList<'a>, CorruptFont<'a>> {FeatureList::new_list(data)} } pub struct FeatureTable<'a>(&'a[u8]); impl<'a> FeatureTable<'a> { fn new(data: &'a[u8]) -> Result<FeatureTable<'a>, CorruptFont<'a>> { if data.len() < 4 {return Err(CorruptFont(data, TableTooShort))} if read_u16(data).unwrap() != 0 {return Err(CorruptFont(data, ReservedFeature))} let len = read_u16(&data[2..]).unwrap(); if len as usize*2+4 > data.len() {return Err(CorruptFont(data, TableTooShort))} Ok(FeatureTable(&data[4..len as usize*2+4])) } fn lookups(&self) -> IndexList<'a, Lookup> {IndexList(&self.0[4..], PhantomData)} } pub struct IndexList<'a, T: Indexed>(&'a[u8], PhantomData<&'static T>); impl<'a, T: Indexed> IndexList<'a, T> { pub fn len(&self) -> usize {self.0.len()/2} } impl<'a, T: Indexed> ExactSizeIterator for IndexList<'a, T> {} impl<'a, T: Indexed> Iterator for IndexList<'a, T> { type Item = Index<T>; fn next(&mut self) -> Option<Index<T>> { if self.0.len() < 2 { None } else { let res = read_u16(self.0).unwrap(); self.0 = &self.0[2..]; Some(Index::new(res)) } } fn size_hint(&self) -> (usize, Option<usize>) {(self.len(), Some(self.len()))} } pub struct Lookup; impl Indexed for Lookup {} pub trait LookupContainer<'a>: 'static + Sized { type Lookup; fn new_lookup(data: &'a[u8], lut: LookupList<'a, Self>) -> Result<Self::Lookup, CorruptFont<'a>>; } #[derive(Clone, Copy, PartialEq, Eq)] pub struct LookupList<'a, T: LookupContainer<'a>>(&'a[u8], PhantomData<&'static T>); impl<'a, T: LookupContainer<'a>> LookupList<'a, T> { fn new(data: &'a[u8]) -> Result<LookupList<'a, T>, CorruptFont<'a>> { if data.len() < 2 {return Err(CorruptFont(data, TableTooShort))} let res = LookupList(data, PhantomData); if data.len() < res.len() as usize*2+2 {return Err(CorruptFont(data, TableTooShort))} Ok(res) } fn len(&self) -> u16 {read_u16(self.0).unwrap()} fn get_lookup(self, Index(idx, _): Index<Lookup>) -> Option<Result<T::Lookup, CorruptFont<'a>>> { if idx >= self.len() {return None} let offset = read_u16(&self.0[2+idx as usize*2..]).unwrap(); Some(if offset as usize > self.0.len() { Err(CorruptFont(self.0, OffsetOutOfBounds)) } else { T::new_lookup(&self.0[offset as usize..], self) }) } } fn read_range(range: &[u8]) -> Result<(u16, u16, u16), CorruptFont> { let last = read_u16(&range[2..]).unwrap(); let first = read_u16(range).unwrap(); if last < first {return Err(CorruptFont(&range[..4], InvalidRange))} let offset = read_u16(&range[4..]).unwrap(); if 0xffff-(last-first) < offset {return Err(CorruptFont(range, WrappingCoverageIndex))} Ok((first, last, offset)) } pub enum Coverage<'a>{ Single(&'a[u8]), Range(&'a[u8]) } impl<'a> Coverage<'a> { fn new(data: &'a[u8]) -> Result<Coverage<'a>, CorruptFont<'a>> { if data.len() < 4 {return Err(CorruptFont(data, TableTooShort))} match read_u16(data).unwrap() { ver @ 1 | ver @ 2 => { let len = read_u16(&data[2..]).unwrap(); match ver { 1 => if data.len() < len as usize*2 { Err(CorruptFont(data, TableTooShort)) } else { Ok(Coverage::Single(&data[4..][..len as usize*2])) }, 2 => if data.len() < len as usize*6 { Err(CorruptFont(data, TableTooShort)) } else { Ok(Coverage::Range(&data[4..][..len as usize*6])) }, _ => unreachable!() } }, _ => Err(CorruptFont(data, ReservedFeature)) } } fn check(&self, GlyphIndex(glyph): GlyphIndex) -> Result<Option<Index<CoveredGlyph>>, CorruptFont<'a>> { let (data, step) = match self { &Coverage::Single(data) => (data, 2), &Coverage::Range(data) => (data, 6) }; match AutoSearch::new(data.len()).search(0..(data.len()/step) as u16, &mut move|i|Ok(read_u16(&data[*i as usize*step..]).unwrap().cmp(&glyph))) { Ok(i) => Ok(Some(Index::new(if step == 6 {read_u16(&data[i as usize*6+4..]).unwrap()} else {i
}))
conditional_block
buckets.ts
/** * Copyright (c) 2018 mol* contributors, licensed under MIT, See LICENSE file for more info. * * @author David Sehnal <[email protected]> */ import { sort, arraySwap } from './sort'; import { AssignableArrayLike } from '../../mol-util/type-helpers'; type Bucket = { key: any, count: number, offset: number } function sortAsc(bs: Bucket[], i: number, j: number) { return bs[i].key < bs[j].key ? -1 : 1; } function _makeBuckets(indices: AssignableArrayLike<number>, getKey: (i: number) => any, sortBuckets: boolean, start: number, end: number) { const buckets = new Map<any, Bucket>(); const bucketList: Bucket[] = []; let prevKey = getKey(indices[0]); let isBucketed = true; for (let i = start; i < end; i++) { const key = getKey(indices[i]); if (buckets.has(key)) { buckets.get(key)!.count++; if (prevKey !== key) isBucketed = false; } else { const bucket: Bucket = { key, count: 1, offset: i }; buckets.set(key, bucket); bucketList[bucketList.length] = bucket; } prevKey = key; } const bucketOffsets = new Int32Array(bucketList.length + 1); bucketOffsets[bucketList.length] = end; let sorted = true; if (sortBuckets) { for (let i = 1, _i = bucketList.length; i < _i; i++) { if (bucketList[i - 1].key > bucketList[i].key) { sorted = false; break; } } } if (isBucketed && sorted) { for (let i = 0; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i].offset; return bucketOffsets; } if (sortBuckets && !sorted) { sort(bucketList, 0, bucketList.length, sortAsc, arraySwap); } let offset = 0; for (let i = 0; i < bucketList.length; i++) { const b = bucketList[i]; b.offset = offset; offset += b.count; } const reorderedIndices = new Int32Array(end - start); for (let i = start; i < end; i++) { const key = getKey(indices[i]); const bucket = buckets.get(key)!; reorderedIndices[bucket.offset++] = indices[i]; } for (let i = 0, _i = reorderedIndices.length; i < _i; i++) { indices[i + start] = reorderedIndices[i]; } bucketOffsets[0] = start; for (let i = 1; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i - 1].offset + start; return bucketOffsets; } export interface MakeBucketsOptions<K> { // If specified, will be sorted sort?: boolean, // inclusive start indidex start?: number, // exclusive end index end?: number } /** * Reorders indices so that the same keys are next to each other, [start, end) * Returns the offsets of buckets. So that [offsets[i], offsets[i + 1]) determines the range. */ export function
<K extends string | number>( indices: AssignableArrayLike<number>, getKey: (i: number) => K, options?: MakeBucketsOptions<K>): ArrayLike<number> { const s = (options && options.start) || 0; const e = (options && options.end) || indices.length; if (e - s <= 0) throw new Error('Can only bucket non-empty collections.'); return _makeBuckets(indices, getKey, !!(options && options.sort), s, e); }
makeBuckets
identifier_name
buckets.ts
/** * Copyright (c) 2018 mol* contributors, licensed under MIT, See LICENSE file for more info. * * @author David Sehnal <[email protected]> */ import { sort, arraySwap } from './sort'; import { AssignableArrayLike } from '../../mol-util/type-helpers'; type Bucket = { key: any, count: number, offset: number } function sortAsc(bs: Bucket[], i: number, j: number) { return bs[i].key < bs[j].key ? -1 : 1; } function _makeBuckets(indices: AssignableArrayLike<number>, getKey: (i: number) => any, sortBuckets: boolean, start: number, end: number) { const buckets = new Map<any, Bucket>(); const bucketList: Bucket[] = []; let prevKey = getKey(indices[0]); let isBucketed = true; for (let i = start; i < end; i++) { const key = getKey(indices[i]); if (buckets.has(key)) { buckets.get(key)!.count++; if (prevKey !== key) isBucketed = false; } else { const bucket: Bucket = { key, count: 1, offset: i }; buckets.set(key, bucket); bucketList[bucketList.length] = bucket; } prevKey = key; } const bucketOffsets = new Int32Array(bucketList.length + 1); bucketOffsets[bucketList.length] = end; let sorted = true; if (sortBuckets) { for (let i = 1, _i = bucketList.length; i < _i; i++)
} if (isBucketed && sorted) { for (let i = 0; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i].offset; return bucketOffsets; } if (sortBuckets && !sorted) { sort(bucketList, 0, bucketList.length, sortAsc, arraySwap); } let offset = 0; for (let i = 0; i < bucketList.length; i++) { const b = bucketList[i]; b.offset = offset; offset += b.count; } const reorderedIndices = new Int32Array(end - start); for (let i = start; i < end; i++) { const key = getKey(indices[i]); const bucket = buckets.get(key)!; reorderedIndices[bucket.offset++] = indices[i]; } for (let i = 0, _i = reorderedIndices.length; i < _i; i++) { indices[i + start] = reorderedIndices[i]; } bucketOffsets[0] = start; for (let i = 1; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i - 1].offset + start; return bucketOffsets; } export interface MakeBucketsOptions<K> { // If specified, will be sorted sort?: boolean, // inclusive start indidex start?: number, // exclusive end index end?: number } /** * Reorders indices so that the same keys are next to each other, [start, end) * Returns the offsets of buckets. So that [offsets[i], offsets[i + 1]) determines the range. */ export function makeBuckets<K extends string | number>( indices: AssignableArrayLike<number>, getKey: (i: number) => K, options?: MakeBucketsOptions<K>): ArrayLike<number> { const s = (options && options.start) || 0; const e = (options && options.end) || indices.length; if (e - s <= 0) throw new Error('Can only bucket non-empty collections.'); return _makeBuckets(indices, getKey, !!(options && options.sort), s, e); }
{ if (bucketList[i - 1].key > bucketList[i].key) { sorted = false; break; } }
conditional_block
buckets.ts
/** * Copyright (c) 2018 mol* contributors, licensed under MIT, See LICENSE file for more info. * * @author David Sehnal <[email protected]> */ import { sort, arraySwap } from './sort'; import { AssignableArrayLike } from '../../mol-util/type-helpers'; type Bucket = { key: any, count: number, offset: number } function sortAsc(bs: Bucket[], i: number, j: number) { return bs[i].key < bs[j].key ? -1 : 1; } function _makeBuckets(indices: AssignableArrayLike<number>, getKey: (i: number) => any, sortBuckets: boolean, start: number, end: number) { const buckets = new Map<any, Bucket>(); const bucketList: Bucket[] = []; let prevKey = getKey(indices[0]); let isBucketed = true; for (let i = start; i < end; i++) { const key = getKey(indices[i]); if (buckets.has(key)) { buckets.get(key)!.count++; if (prevKey !== key) isBucketed = false; } else { const bucket: Bucket = { key, count: 1, offset: i }; buckets.set(key, bucket); bucketList[bucketList.length] = bucket; } prevKey = key; } const bucketOffsets = new Int32Array(bucketList.length + 1); bucketOffsets[bucketList.length] = end; let sorted = true; if (sortBuckets) { for (let i = 1, _i = bucketList.length; i < _i; i++) { if (bucketList[i - 1].key > bucketList[i].key) { sorted = false; break; }
} if (isBucketed && sorted) { for (let i = 0; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i].offset; return bucketOffsets; } if (sortBuckets && !sorted) { sort(bucketList, 0, bucketList.length, sortAsc, arraySwap); } let offset = 0; for (let i = 0; i < bucketList.length; i++) { const b = bucketList[i]; b.offset = offset; offset += b.count; } const reorderedIndices = new Int32Array(end - start); for (let i = start; i < end; i++) { const key = getKey(indices[i]); const bucket = buckets.get(key)!; reorderedIndices[bucket.offset++] = indices[i]; } for (let i = 0, _i = reorderedIndices.length; i < _i; i++) { indices[i + start] = reorderedIndices[i]; } bucketOffsets[0] = start; for (let i = 1; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i - 1].offset + start; return bucketOffsets; } export interface MakeBucketsOptions<K> { // If specified, will be sorted sort?: boolean, // inclusive start indidex start?: number, // exclusive end index end?: number } /** * Reorders indices so that the same keys are next to each other, [start, end) * Returns the offsets of buckets. So that [offsets[i], offsets[i + 1]) determines the range. */ export function makeBuckets<K extends string | number>( indices: AssignableArrayLike<number>, getKey: (i: number) => K, options?: MakeBucketsOptions<K>): ArrayLike<number> { const s = (options && options.start) || 0; const e = (options && options.end) || indices.length; if (e - s <= 0) throw new Error('Can only bucket non-empty collections.'); return _makeBuckets(indices, getKey, !!(options && options.sort), s, e); }
}
random_line_split
buckets.ts
/** * Copyright (c) 2018 mol* contributors, licensed under MIT, See LICENSE file for more info. * * @author David Sehnal <[email protected]> */ import { sort, arraySwap } from './sort'; import { AssignableArrayLike } from '../../mol-util/type-helpers'; type Bucket = { key: any, count: number, offset: number } function sortAsc(bs: Bucket[], i: number, j: number) { return bs[i].key < bs[j].key ? -1 : 1; } function _makeBuckets(indices: AssignableArrayLike<number>, getKey: (i: number) => any, sortBuckets: boolean, start: number, end: number) { const buckets = new Map<any, Bucket>(); const bucketList: Bucket[] = []; let prevKey = getKey(indices[0]); let isBucketed = true; for (let i = start; i < end; i++) { const key = getKey(indices[i]); if (buckets.has(key)) { buckets.get(key)!.count++; if (prevKey !== key) isBucketed = false; } else { const bucket: Bucket = { key, count: 1, offset: i }; buckets.set(key, bucket); bucketList[bucketList.length] = bucket; } prevKey = key; } const bucketOffsets = new Int32Array(bucketList.length + 1); bucketOffsets[bucketList.length] = end; let sorted = true; if (sortBuckets) { for (let i = 1, _i = bucketList.length; i < _i; i++) { if (bucketList[i - 1].key > bucketList[i].key) { sorted = false; break; } } } if (isBucketed && sorted) { for (let i = 0; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i].offset; return bucketOffsets; } if (sortBuckets && !sorted) { sort(bucketList, 0, bucketList.length, sortAsc, arraySwap); } let offset = 0; for (let i = 0; i < bucketList.length; i++) { const b = bucketList[i]; b.offset = offset; offset += b.count; } const reorderedIndices = new Int32Array(end - start); for (let i = start; i < end; i++) { const key = getKey(indices[i]); const bucket = buckets.get(key)!; reorderedIndices[bucket.offset++] = indices[i]; } for (let i = 0, _i = reorderedIndices.length; i < _i; i++) { indices[i + start] = reorderedIndices[i]; } bucketOffsets[0] = start; for (let i = 1; i < bucketList.length; i++) bucketOffsets[i] = bucketList[i - 1].offset + start; return bucketOffsets; } export interface MakeBucketsOptions<K> { // If specified, will be sorted sort?: boolean, // inclusive start indidex start?: number, // exclusive end index end?: number } /** * Reorders indices so that the same keys are next to each other, [start, end) * Returns the offsets of buckets. So that [offsets[i], offsets[i + 1]) determines the range. */ export function makeBuckets<K extends string | number>( indices: AssignableArrayLike<number>, getKey: (i: number) => K, options?: MakeBucketsOptions<K>): ArrayLike<number>
{ const s = (options && options.start) || 0; const e = (options && options.end) || indices.length; if (e - s <= 0) throw new Error('Can only bucket non-empty collections.'); return _makeBuckets(indices, getKey, !!(options && options.sort), s, e); }
identifier_body
settings.py
""" Django settings for example_site project. Generated by 'django-admin startproject' using Django 1.8.dev20150302062936. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import environ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) env = environ.Env() # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "fbaa1unu0e8z5@9mm%k#+*d@iny*=-)ma2b#ymq)o9z^3%ijh)" # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( "address", "person", "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", ) MIDDLEWARE = ( "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ) ROOT_URLCONF = "example_site.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "example_site.wsgi.application" # Specify your Google API key as environment variable GOOGLE_API_KEY # You may also specify it here, though be sure not to commit it to a repository GOOGLE_API_KEY = "" # Specify your Google API key here GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", GOOGLE_API_KEY) # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { "default": env.db(), } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True
# https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = "/static/" DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
USE_TZ = True # Static files (CSS, JavaScript, Images)
random_line_split
test_metrics.py
# Copyright (c) 2014 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. from unittest import TestCase from datetime import datetime from mock import patch from gofer.metrics import Timer, timestamp class TestUtils(TestCase): @patch('gofer.metrics.datetime') def test_timestamp(self, dt): dt.utcnow.return_value = datetime(2014, 12, 25, 9, 30, 0) ts = timestamp() self.assertEqual(ts, '2014-12-25T09:30:00Z') class
(TestCase): def test_init(self): t = Timer() self.assertEqual(t.started, 0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_start(self, _time): _time.return_value = 10.0 t = Timer() t.start() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_stop(self, _time): _time.return_value = 20.0 t = Timer() t.started = 10.0 t.stop() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 20.0) def duration(self): t = Timer() t.started = 10.0 t.stopped = 100.0 self.assertEqual(t.duration(), 90.0) def test_unicode(self): t = Timer() # not started self.assertEqual(unicode(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(unicode(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(unicode(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(unicode(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(unicode(t), '1.500 (minutes)') def test_str(self): t = Timer() # not started self.assertEqual(str(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(str(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(str(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(str(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(str(t), '1.500 (minutes)')
TestTimer
identifier_name
test_metrics.py
# Copyright (c) 2014 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. from unittest import TestCase from datetime import datetime from mock import patch from gofer.metrics import Timer, timestamp class TestUtils(TestCase): @patch('gofer.metrics.datetime') def test_timestamp(self, dt): dt.utcnow.return_value = datetime(2014, 12, 25, 9, 30, 0) ts = timestamp() self.assertEqual(ts, '2014-12-25T09:30:00Z') class TestTimer(TestCase): def test_init(self): t = Timer() self.assertEqual(t.started, 0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_start(self, _time): _time.return_value = 10.0 t = Timer() t.start() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_stop(self, _time): _time.return_value = 20.0 t = Timer() t.started = 10.0 t.stop() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 20.0) def duration(self):
def test_unicode(self): t = Timer() # not started self.assertEqual(unicode(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(unicode(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(unicode(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(unicode(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(unicode(t), '1.500 (minutes)') def test_str(self): t = Timer() # not started self.assertEqual(str(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(str(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(str(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(str(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(str(t), '1.500 (minutes)')
t = Timer() t.started = 10.0 t.stopped = 100.0 self.assertEqual(t.duration(), 90.0)
identifier_body
test_metrics.py
# Copyright (c) 2014 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. from unittest import TestCase from datetime import datetime from mock import patch from gofer.metrics import Timer, timestamp
class TestUtils(TestCase): @patch('gofer.metrics.datetime') def test_timestamp(self, dt): dt.utcnow.return_value = datetime(2014, 12, 25, 9, 30, 0) ts = timestamp() self.assertEqual(ts, '2014-12-25T09:30:00Z') class TestTimer(TestCase): def test_init(self): t = Timer() self.assertEqual(t.started, 0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_start(self, _time): _time.return_value = 10.0 t = Timer() t.start() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 0) @patch('time.time') def test_stop(self, _time): _time.return_value = 20.0 t = Timer() t.started = 10.0 t.stop() self.assertEqual(t.started, 10.0) self.assertEqual(t.stopped, 20.0) def duration(self): t = Timer() t.started = 10.0 t.stopped = 100.0 self.assertEqual(t.duration(), 90.0) def test_unicode(self): t = Timer() # not started self.assertEqual(unicode(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(unicode(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(unicode(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(unicode(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(unicode(t), '1.500 (minutes)') def test_str(self): t = Timer() # not started self.assertEqual(str(t), 'not-running') # started but not stopped t.started = 1 self.assertEqual(str(t), 'started: %d (running)' % t.started) # milliseconds t.started = 0.10 t.stopped = 0.25 self.assertEqual(str(t), '150 (ms)') # seconds t.started = 10.0 t.stopped = 25.0 self.assertEqual(str(t), '15.000 (seconds)') # minutes t.started = 10.0 t.stopped = 100.0 self.assertEqual(str(t), '1.500 (minutes)')
random_line_split
core.externs.js
/** @externs */ /** * @externs * @suppress {duplicate,checkTypes} */ // NOTE: generated by tsickle, do not edit. // externs from /private/var/folders/7d/r6b3nrdj7bn9t_w_dclm6y9r00kg80/T/angular-release-latest.XXXXXXX.FjdRL2Ds/sandbox/darwin-sandbox/169/execroot/angular/packages/core/src/render3/interfaces/player.ts: /** @const */
/** @typedef {?} */ angular$packages$core$src$render3$interfaces$player.DirectiveInstance; // externs from /private/var/folders/7d/r6b3nrdj7bn9t_w_dclm6y9r00kg80/T/angular-release-latest.XXXXXXX.FjdRL2Ds/sandbox/darwin-sandbox/169/execroot/angular/packages/core/src/render3/util/global_utils.ts: /** @const */ var angular$packages$core$src$render3$util$global_utils = {}; /** @typedef {?} */ angular$packages$core$src$render3$util$global_utils.GlobalDevModeContainer; // externs from /private/var/folders/7d/r6b3nrdj7bn9t_w_dclm6y9r00kg80/T/angular-release-latest.XXXXXXX.FjdRL2Ds/sandbox/darwin-sandbox/169/execroot/angular/packages/core/src/linker/system_js_ng_module_factory_loader.ts: /** @const */ var angular$packages$core$src$linker$system_js_ng_module_factory_loader = {}; /** @type {?} */ var System; // externs from /private/var/folders/7d/r6b3nrdj7bn9t_w_dclm6y9r00kg80/T/angular-release-latest.XXXXXXX.FjdRL2Ds/sandbox/darwin-sandbox/169/execroot/angular/packages/core/src/testability/testability.ts: /** @const */ var angular$packages$core$src$testability$testability = {}; /** * @record * @struct */ angular$packages$core$src$testability$testability.PublicTestability = function() {}; /** * @return {?} */ angular$packages$core$src$testability$testability.PublicTestability.prototype.isStable = function() {}; /** * @param {?} callback * @param {?=} timeout * @param {?=} updateCallback * @return {?} */ angular$packages$core$src$testability$testability.PublicTestability.prototype.whenStable = function(callback, timeout, updateCallback) {}; /** * @param {?} using * @param {?} provider * @param {?} exactMatch * @return {?} */ angular$packages$core$src$testability$testability.PublicTestability.prototype.findProviders = function(using, provider, exactMatch) {};
var angular$packages$core$src$render3$interfaces$player = {}; /** @typedef {?} */ angular$packages$core$src$render3$interfaces$player.ComponentInstance;
random_line_split
view.js
(function () { // The default state core singleton for {@link SceneJS.View} nodes var defaultCore = { type:"view", stateId:SceneJS._baseStateId++, scissorTestEnabled:false }; var coreStack = []; var stackLen = 0; SceneJS_events.addListener( SceneJS_events.SCENE_COMPILING, function (params) { params.engine.display.view = defaultCore; stackLen = 0; }); /** * @class Scene graph node which configures view parameters such as depth range, scissor test and viewport * @extends SceneJS.Node * void depthRange(floatzNear, floatzFar) zNear: Clamped to the range 0 to 1 Must be <= zFar zFar: Clamped to the range 0 to 1. void scissor(int x, int y, long width, long height) void viewport(int x, int y, long width, long height) */ SceneJS.View = SceneJS_NodeFactory.createNodeType("view"); SceneJS.View.prototype._init = function (params) { if (params.scissorTestEnabled != undefined) { this.setScissorTestEnabled(params.scissorTestEnabled); } else if (this._core.useCount == 1)
}; /** * Enable or disables scissor test. * * When enabled, the scissor test will discards fragments that are outside the scissor box. * * Scissor test is initially disabled. * * @param scissorTestEnabled Specifies whether scissor test is enabled or not * @return {*} */ SceneJS.View.prototype.setScissorTestEnabled = function (scissorTestEnabled) { if (this._core.scissorTestEnabled != scissorTestEnabled) { this._core.scissorTestEnabled = scissorTestEnabled; this._engine.display.imageDirty = true; } return this; }; /** * Get whether or not scissor test is enabled. * Initial value will be false. * * @return Boolean */ SceneJS.View.prototype.getScissorTestEnabled = function () { return this._core.scissorTestEnabled; }; SceneJS.View.prototype._compile = function (ctx) { this._engine.display.view = coreStack[stackLen++] = this._core; this._compileNodes(ctx); this._engine.display.view = (--stackLen > 0) ? coreStack[stackLen - 1] : defaultCore; }; })();
{ // This node defines the core this.setScissorTestEnabled(false); }
conditional_block
view.js
(function () { // The default state core singleton for {@link SceneJS.View} nodes var defaultCore = { type:"view", stateId:SceneJS._baseStateId++, scissorTestEnabled:false }; var coreStack = []; var stackLen = 0; SceneJS_events.addListener( SceneJS_events.SCENE_COMPILING, function (params) { params.engine.display.view = defaultCore; stackLen = 0; }); /** * @class Scene graph node which configures view parameters such as depth range, scissor test and viewport * @extends SceneJS.Node * void depthRange(floatzNear, floatzFar) zNear: Clamped to the range 0 to 1 Must be <= zFar zFar: Clamped to the range 0 to 1. void scissor(int x, int y, long width, long height) void viewport(int x, int y, long width, long height) */ SceneJS.View = SceneJS_NodeFactory.createNodeType("view"); SceneJS.View.prototype._init = function (params) { if (params.scissorTestEnabled != undefined) { this.setScissorTestEnabled(params.scissorTestEnabled); } else if (this._core.useCount == 1) { // This node defines the core
this.setScissorTestEnabled(false); } }; /** * Enable or disables scissor test. * * When enabled, the scissor test will discards fragments that are outside the scissor box. * * Scissor test is initially disabled. * * @param scissorTestEnabled Specifies whether scissor test is enabled or not * @return {*} */ SceneJS.View.prototype.setScissorTestEnabled = function (scissorTestEnabled) { if (this._core.scissorTestEnabled != scissorTestEnabled) { this._core.scissorTestEnabled = scissorTestEnabled; this._engine.display.imageDirty = true; } return this; }; /** * Get whether or not scissor test is enabled. * Initial value will be false. * * @return Boolean */ SceneJS.View.prototype.getScissorTestEnabled = function () { return this._core.scissorTestEnabled; }; SceneJS.View.prototype._compile = function (ctx) { this._engine.display.view = coreStack[stackLen++] = this._core; this._compileNodes(ctx); this._engine.display.view = (--stackLen > 0) ? coreStack[stackLen - 1] : defaultCore; }; })();
random_line_split
plot_missing_values.py
""" ==================================================== Imputing missing values before building an estimator ==================================================== Missing values can be replaced by the mean, the median or the most frequent value using the basic :class:`sklearn.impute.SimpleImputer`. The median is a more robust estimator for data with high magnitude variables which could dominate results (otherwise known as a 'long tail'). Another option is the :class:`sklearn.impute.IterativeImputer`. This uses round-robin linear regression, treating every variable as an output in turn. The version implemented assumes Gaussian (output) variables. If your features are obviously non-Normal, consider transforming them to look more Normal so as to potentially improve performance. In addition of using an imputing method, we can also keep an indication of the missing information using :func:`sklearn.impute.MissingIndicator` which might carry some information. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt # To use the experimental IterativeImputer, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_diabetes from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline, make_union from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator from sklearn.model_selection import cross_val_score rng = np.random.RandomState(0) N_SPLITS = 5 REGRESSOR = RandomForestRegressor(random_state=0) def
(imputer, X_missing, y_missing): estimator = make_pipeline( make_union(imputer, MissingIndicator(missing_values=0)), REGRESSOR) impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS) return impute_scores def get_results(dataset): X_full, y_full = dataset.data, dataset.target n_samples = X_full.shape[0] n_features = X_full.shape[1] # Estimate the score on the entire dataset, with no missing values full_scores = cross_val_score(REGRESSOR, X_full, y_full, scoring='neg_mean_squared_error', cv=N_SPLITS) # Add missing values in 75% of the lines missing_rate = 0.75 n_missing_samples = int(np.floor(n_samples * missing_rate)) missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool))) rng.shuffle(missing_samples) missing_features = rng.randint(0, n_features, n_missing_samples) X_missing = X_full.copy() X_missing[np.where(missing_samples)[0], missing_features] = 0 y_missing = y_full.copy() # Estimate the score after replacing missing values by 0 imputer = SimpleImputer(missing_values=0, strategy='constant', fill_value=0) zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after imputation (mean strategy) of the missing values imputer = SimpleImputer(missing_values=0, strategy="mean") mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after iterative imputation of the missing values imputer = IterativeImputer(missing_values=0, random_state=0, n_nearest_features=5, sample_posterior=True) iterative_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) return ((full_scores.mean(), full_scores.std()), (zero_impute_scores.mean(), zero_impute_scores.std()), (mean_impute_scores.mean(), mean_impute_scores.std()), (iterative_impute_scores.mean(), iterative_impute_scores.std())) results_diabetes = np.array(get_results(load_diabetes())) mses_diabetes = results_diabetes[:, 0] * -1 stds_diabetes = results_diabetes[:, 1] results_boston = np.array(get_results(load_boston())) mses_boston = results_boston[:, 0] * -1 stds_boston = results_boston[:, 1] n_bars = len(mses_diabetes) xval = np.arange(n_bars) x_labels = ['Full data', 'Zero imputation', 'Mean Imputation', 'Multivariate Imputation'] colors = ['r', 'g', 'b', 'orange'] # plot diabetes results plt.figure(figsize=(12, 6)) ax1 = plt.subplot(121) for j in xval: ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align='center') ax1.set_title('Imputation Techniques with Diabetes Data') ax1.set_xlim(left=np.min(mses_diabetes) * 0.9, right=np.max(mses_diabetes) * 1.1) ax1.set_yticks(xval) ax1.set_xlabel('MSE') ax1.invert_yaxis() ax1.set_yticklabels(x_labels) # plot boston results ax2 = plt.subplot(122) for j in xval: ax2.barh(j, mses_boston[j], xerr=stds_boston[j], color=colors[j], alpha=0.6, align='center') ax2.set_title('Imputation Techniques with Boston Data') ax2.set_yticks(xval) ax2.set_xlabel('MSE') ax2.invert_yaxis() ax2.set_yticklabels([''] * n_bars) plt.show()
get_scores_for_imputer
identifier_name
plot_missing_values.py
""" ==================================================== Imputing missing values before building an estimator ==================================================== Missing values can be replaced by the mean, the median or the most frequent value using the basic :class:`sklearn.impute.SimpleImputer`. The median is a more robust estimator for data with high magnitude variables which could dominate results (otherwise known as a 'long tail'). Another option is the :class:`sklearn.impute.IterativeImputer`. This uses round-robin linear regression, treating every variable as an output in turn. The version implemented assumes Gaussian (output) variables. If your features are obviously non-Normal, consider transforming them to look more Normal so as to potentially improve performance. In addition of using an imputing method, we can also keep an indication of the missing information using :func:`sklearn.impute.MissingIndicator` which might carry some information. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt # To use the experimental IterativeImputer, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_diabetes from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline, make_union from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator from sklearn.model_selection import cross_val_score rng = np.random.RandomState(0) N_SPLITS = 5 REGRESSOR = RandomForestRegressor(random_state=0)
make_union(imputer, MissingIndicator(missing_values=0)), REGRESSOR) impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS) return impute_scores def get_results(dataset): X_full, y_full = dataset.data, dataset.target n_samples = X_full.shape[0] n_features = X_full.shape[1] # Estimate the score on the entire dataset, with no missing values full_scores = cross_val_score(REGRESSOR, X_full, y_full, scoring='neg_mean_squared_error', cv=N_SPLITS) # Add missing values in 75% of the lines missing_rate = 0.75 n_missing_samples = int(np.floor(n_samples * missing_rate)) missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool))) rng.shuffle(missing_samples) missing_features = rng.randint(0, n_features, n_missing_samples) X_missing = X_full.copy() X_missing[np.where(missing_samples)[0], missing_features] = 0 y_missing = y_full.copy() # Estimate the score after replacing missing values by 0 imputer = SimpleImputer(missing_values=0, strategy='constant', fill_value=0) zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after imputation (mean strategy) of the missing values imputer = SimpleImputer(missing_values=0, strategy="mean") mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after iterative imputation of the missing values imputer = IterativeImputer(missing_values=0, random_state=0, n_nearest_features=5, sample_posterior=True) iterative_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) return ((full_scores.mean(), full_scores.std()), (zero_impute_scores.mean(), zero_impute_scores.std()), (mean_impute_scores.mean(), mean_impute_scores.std()), (iterative_impute_scores.mean(), iterative_impute_scores.std())) results_diabetes = np.array(get_results(load_diabetes())) mses_diabetes = results_diabetes[:, 0] * -1 stds_diabetes = results_diabetes[:, 1] results_boston = np.array(get_results(load_boston())) mses_boston = results_boston[:, 0] * -1 stds_boston = results_boston[:, 1] n_bars = len(mses_diabetes) xval = np.arange(n_bars) x_labels = ['Full data', 'Zero imputation', 'Mean Imputation', 'Multivariate Imputation'] colors = ['r', 'g', 'b', 'orange'] # plot diabetes results plt.figure(figsize=(12, 6)) ax1 = plt.subplot(121) for j in xval: ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align='center') ax1.set_title('Imputation Techniques with Diabetes Data') ax1.set_xlim(left=np.min(mses_diabetes) * 0.9, right=np.max(mses_diabetes) * 1.1) ax1.set_yticks(xval) ax1.set_xlabel('MSE') ax1.invert_yaxis() ax1.set_yticklabels(x_labels) # plot boston results ax2 = plt.subplot(122) for j in xval: ax2.barh(j, mses_boston[j], xerr=stds_boston[j], color=colors[j], alpha=0.6, align='center') ax2.set_title('Imputation Techniques with Boston Data') ax2.set_yticks(xval) ax2.set_xlabel('MSE') ax2.invert_yaxis() ax2.set_yticklabels([''] * n_bars) plt.show()
def get_scores_for_imputer(imputer, X_missing, y_missing): estimator = make_pipeline(
random_line_split
plot_missing_values.py
""" ==================================================== Imputing missing values before building an estimator ==================================================== Missing values can be replaced by the mean, the median or the most frequent value using the basic :class:`sklearn.impute.SimpleImputer`. The median is a more robust estimator for data with high magnitude variables which could dominate results (otherwise known as a 'long tail'). Another option is the :class:`sklearn.impute.IterativeImputer`. This uses round-robin linear regression, treating every variable as an output in turn. The version implemented assumes Gaussian (output) variables. If your features are obviously non-Normal, consider transforming them to look more Normal so as to potentially improve performance. In addition of using an imputing method, we can also keep an indication of the missing information using :func:`sklearn.impute.MissingIndicator` which might carry some information. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt # To use the experimental IterativeImputer, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_diabetes from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline, make_union from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator from sklearn.model_selection import cross_val_score rng = np.random.RandomState(0) N_SPLITS = 5 REGRESSOR = RandomForestRegressor(random_state=0) def get_scores_for_imputer(imputer, X_missing, y_missing): estimator = make_pipeline( make_union(imputer, MissingIndicator(missing_values=0)), REGRESSOR) impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS) return impute_scores def get_results(dataset): X_full, y_full = dataset.data, dataset.target n_samples = X_full.shape[0] n_features = X_full.shape[1] # Estimate the score on the entire dataset, with no missing values full_scores = cross_val_score(REGRESSOR, X_full, y_full, scoring='neg_mean_squared_error', cv=N_SPLITS) # Add missing values in 75% of the lines missing_rate = 0.75 n_missing_samples = int(np.floor(n_samples * missing_rate)) missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool))) rng.shuffle(missing_samples) missing_features = rng.randint(0, n_features, n_missing_samples) X_missing = X_full.copy() X_missing[np.where(missing_samples)[0], missing_features] = 0 y_missing = y_full.copy() # Estimate the score after replacing missing values by 0 imputer = SimpleImputer(missing_values=0, strategy='constant', fill_value=0) zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after imputation (mean strategy) of the missing values imputer = SimpleImputer(missing_values=0, strategy="mean") mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after iterative imputation of the missing values imputer = IterativeImputer(missing_values=0, random_state=0, n_nearest_features=5, sample_posterior=True) iterative_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) return ((full_scores.mean(), full_scores.std()), (zero_impute_scores.mean(), zero_impute_scores.std()), (mean_impute_scores.mean(), mean_impute_scores.std()), (iterative_impute_scores.mean(), iterative_impute_scores.std())) results_diabetes = np.array(get_results(load_diabetes())) mses_diabetes = results_diabetes[:, 0] * -1 stds_diabetes = results_diabetes[:, 1] results_boston = np.array(get_results(load_boston())) mses_boston = results_boston[:, 0] * -1 stds_boston = results_boston[:, 1] n_bars = len(mses_diabetes) xval = np.arange(n_bars) x_labels = ['Full data', 'Zero imputation', 'Mean Imputation', 'Multivariate Imputation'] colors = ['r', 'g', 'b', 'orange'] # plot diabetes results plt.figure(figsize=(12, 6)) ax1 = plt.subplot(121) for j in xval: ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align='center') ax1.set_title('Imputation Techniques with Diabetes Data') ax1.set_xlim(left=np.min(mses_diabetes) * 0.9, right=np.max(mses_diabetes) * 1.1) ax1.set_yticks(xval) ax1.set_xlabel('MSE') ax1.invert_yaxis() ax1.set_yticklabels(x_labels) # plot boston results ax2 = plt.subplot(122) for j in xval:
ax2.set_title('Imputation Techniques with Boston Data') ax2.set_yticks(xval) ax2.set_xlabel('MSE') ax2.invert_yaxis() ax2.set_yticklabels([''] * n_bars) plt.show()
ax2.barh(j, mses_boston[j], xerr=stds_boston[j], color=colors[j], alpha=0.6, align='center')
conditional_block
plot_missing_values.py
""" ==================================================== Imputing missing values before building an estimator ==================================================== Missing values can be replaced by the mean, the median or the most frequent value using the basic :class:`sklearn.impute.SimpleImputer`. The median is a more robust estimator for data with high magnitude variables which could dominate results (otherwise known as a 'long tail'). Another option is the :class:`sklearn.impute.IterativeImputer`. This uses round-robin linear regression, treating every variable as an output in turn. The version implemented assumes Gaussian (output) variables. If your features are obviously non-Normal, consider transforming them to look more Normal so as to potentially improve performance. In addition of using an imputing method, we can also keep an indication of the missing information using :func:`sklearn.impute.MissingIndicator` which might carry some information. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt # To use the experimental IterativeImputer, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_diabetes from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline, make_union from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator from sklearn.model_selection import cross_val_score rng = np.random.RandomState(0) N_SPLITS = 5 REGRESSOR = RandomForestRegressor(random_state=0) def get_scores_for_imputer(imputer, X_missing, y_missing):
def get_results(dataset): X_full, y_full = dataset.data, dataset.target n_samples = X_full.shape[0] n_features = X_full.shape[1] # Estimate the score on the entire dataset, with no missing values full_scores = cross_val_score(REGRESSOR, X_full, y_full, scoring='neg_mean_squared_error', cv=N_SPLITS) # Add missing values in 75% of the lines missing_rate = 0.75 n_missing_samples = int(np.floor(n_samples * missing_rate)) missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool))) rng.shuffle(missing_samples) missing_features = rng.randint(0, n_features, n_missing_samples) X_missing = X_full.copy() X_missing[np.where(missing_samples)[0], missing_features] = 0 y_missing = y_full.copy() # Estimate the score after replacing missing values by 0 imputer = SimpleImputer(missing_values=0, strategy='constant', fill_value=0) zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after imputation (mean strategy) of the missing values imputer = SimpleImputer(missing_values=0, strategy="mean") mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) # Estimate the score after iterative imputation of the missing values imputer = IterativeImputer(missing_values=0, random_state=0, n_nearest_features=5, sample_posterior=True) iterative_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) return ((full_scores.mean(), full_scores.std()), (zero_impute_scores.mean(), zero_impute_scores.std()), (mean_impute_scores.mean(), mean_impute_scores.std()), (iterative_impute_scores.mean(), iterative_impute_scores.std())) results_diabetes = np.array(get_results(load_diabetes())) mses_diabetes = results_diabetes[:, 0] * -1 stds_diabetes = results_diabetes[:, 1] results_boston = np.array(get_results(load_boston())) mses_boston = results_boston[:, 0] * -1 stds_boston = results_boston[:, 1] n_bars = len(mses_diabetes) xval = np.arange(n_bars) x_labels = ['Full data', 'Zero imputation', 'Mean Imputation', 'Multivariate Imputation'] colors = ['r', 'g', 'b', 'orange'] # plot diabetes results plt.figure(figsize=(12, 6)) ax1 = plt.subplot(121) for j in xval: ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align='center') ax1.set_title('Imputation Techniques with Diabetes Data') ax1.set_xlim(left=np.min(mses_diabetes) * 0.9, right=np.max(mses_diabetes) * 1.1) ax1.set_yticks(xval) ax1.set_xlabel('MSE') ax1.invert_yaxis() ax1.set_yticklabels(x_labels) # plot boston results ax2 = plt.subplot(122) for j in xval: ax2.barh(j, mses_boston[j], xerr=stds_boston[j], color=colors[j], alpha=0.6, align='center') ax2.set_title('Imputation Techniques with Boston Data') ax2.set_yticks(xval) ax2.set_xlabel('MSE') ax2.invert_yaxis() ax2.set_yticklabels([''] * n_bars) plt.show()
estimator = make_pipeline( make_union(imputer, MissingIndicator(missing_values=0)), REGRESSOR) impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS) return impute_scores
identifier_body
fmt.rs
#[macro_use] extern crate custom_derive; #[macro_use] extern crate newtype_derive; use std::fmt::{self, Binary, Debug, Display, LowerExp, LowerHex, Octal, Pointer, UpperExp, UpperHex}; macro_rules! impl_fmt { (impl $tr:ident for $name:ident: $msg:expr) => { impl $tr for $name { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, $msg) } } }; } struct Dummy; impl_fmt!(impl Binary for Dummy: "binary"); impl_fmt!(impl Debug for Dummy: "debug"); impl_fmt!(impl Display for Dummy: "display"); impl_fmt!(impl LowerExp for Dummy: "lowerexp"); impl_fmt!(impl LowerHex for Dummy: "lowerhex"); impl_fmt!(impl Octal for Dummy: "octal"); impl_fmt!(impl Pointer for Dummy: "pointer"); impl_fmt!(impl UpperExp for Dummy: "upperexp"); impl_fmt!(impl UpperHex for Dummy: "upperhex"); custom_derive! { #[derive( NewtypeBinary, NewtypeDebug, NewtypeDisplay, NewtypeLowerExp, NewtypeLowerHex, NewtypeOctal,
NewtypePointer, NewtypeUpperExp, NewtypeUpperHex )] struct Wrapper(Dummy); } #[test] fn test_fmt() { let a = Wrapper(Dummy); assert_eq!(&*format!("{:b}", a), "binary"); assert_eq!(&*format!("{:?}", a), "debug"); assert_eq!(&*format!("{}", a), "display"); assert_eq!(&*format!("{:e}", a), "lowerexp"); assert_eq!(&*format!("{:x}", a), "lowerhex"); assert_eq!(&*format!("{:o}", a), "octal"); assert_eq!(&*format!("{:p}", a), "pointer"); assert_eq!(&*format!("{:E}", a), "upperexp"); assert_eq!(&*format!("{:X}", a), "upperhex"); }
random_line_split
fmt.rs
#[macro_use] extern crate custom_derive; #[macro_use] extern crate newtype_derive; use std::fmt::{self, Binary, Debug, Display, LowerExp, LowerHex, Octal, Pointer, UpperExp, UpperHex}; macro_rules! impl_fmt { (impl $tr:ident for $name:ident: $msg:expr) => { impl $tr for $name { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, $msg) } } }; } struct
; impl_fmt!(impl Binary for Dummy: "binary"); impl_fmt!(impl Debug for Dummy: "debug"); impl_fmt!(impl Display for Dummy: "display"); impl_fmt!(impl LowerExp for Dummy: "lowerexp"); impl_fmt!(impl LowerHex for Dummy: "lowerhex"); impl_fmt!(impl Octal for Dummy: "octal"); impl_fmt!(impl Pointer for Dummy: "pointer"); impl_fmt!(impl UpperExp for Dummy: "upperexp"); impl_fmt!(impl UpperHex for Dummy: "upperhex"); custom_derive! { #[derive( NewtypeBinary, NewtypeDebug, NewtypeDisplay, NewtypeLowerExp, NewtypeLowerHex, NewtypeOctal, NewtypePointer, NewtypeUpperExp, NewtypeUpperHex )] struct Wrapper(Dummy); } #[test] fn test_fmt() { let a = Wrapper(Dummy); assert_eq!(&*format!("{:b}", a), "binary"); assert_eq!(&*format!("{:?}", a), "debug"); assert_eq!(&*format!("{}", a), "display"); assert_eq!(&*format!("{:e}", a), "lowerexp"); assert_eq!(&*format!("{:x}", a), "lowerhex"); assert_eq!(&*format!("{:o}", a), "octal"); assert_eq!(&*format!("{:p}", a), "pointer"); assert_eq!(&*format!("{:E}", a), "upperexp"); assert_eq!(&*format!("{:X}", a), "upperhex"); }
Dummy
identifier_name
criteo.js
var CONSTANTS = require('../constants.json'); var utils = require('../utils.js'); var bidfactory = require('../bidfactory.js'); var bidmanager = require('../bidmanager.js'); var adloader = require('../adloader'); /** * Adapter for requesting bids from Criteo. * * @returns {{callBids: _callBids}} * @constructor */ var CriteoAdapter = function CriteoAdapter() { var bids; function _callBids(params) { bids = params.bids || []; // Only make one request per "nid" _getUniqueNids(bids).forEach(_requestBid); } function _getUniqueNids(bids) { var key; var map = {}; var nids = []; bids.forEach(function(bid) { map[bid.params.nid] = bid; }); for (key in map) { if (map.hasOwnProperty(key))
} return nids; } function _requestBid(bid) { var varname = 'crtg_varname_' + bid.params.nid; var scriptUrl = '//rtax.criteo.com/delivery/rta/rta.js?netId=' + encodeURI(bid.params.nid) + '&cookieName=' + encodeURI(bid.params.cookiename) + '&rnd=' + Math.floor(Math.random() * 99999999999) + '&varName=' + encodeURI(varname); adloader.loadScript(scriptUrl, function(response) { var adResponse; var content = window[varname]; // Add a response for each bid matching the "nid" bids.forEach(function(existingBid) { if (existingBid.params.nid === bid.params.nid) { if (content) { adResponse = bidfactory.createBid(1); adResponse.bidderCode = 'criteo'; adResponse.keys = content.split(';'); } else { // Indicate an ad was not returned adResponse = bidfactory.createBid(2); adResponse.bidderCode = 'criteo'; } bidmanager.addBidResponse(existingBid.placementCode, adResponse); } }); }); } return { callBids: _callBids }; }; module.exports = CriteoAdapter;
{ nids.push(map[key]); }
conditional_block
criteo.js
var CONSTANTS = require('../constants.json'); var utils = require('../utils.js'); var bidfactory = require('../bidfactory.js'); var bidmanager = require('../bidmanager.js'); var adloader = require('../adloader'); /** * Adapter for requesting bids from Criteo. * * @returns {{callBids: _callBids}} * @constructor */ var CriteoAdapter = function CriteoAdapter() { var bids; function _callBids(params) { bids = params.bids || []; // Only make one request per "nid" _getUniqueNids(bids).forEach(_requestBid); } function _getUniqueNids(bids) { var key; var map = {}; var nids = []; bids.forEach(function(bid) { map[bid.params.nid] = bid; }); for (key in map) { if (map.hasOwnProperty(key)) { nids.push(map[key]);
function _requestBid(bid) { var varname = 'crtg_varname_' + bid.params.nid; var scriptUrl = '//rtax.criteo.com/delivery/rta/rta.js?netId=' + encodeURI(bid.params.nid) + '&cookieName=' + encodeURI(bid.params.cookiename) + '&rnd=' + Math.floor(Math.random() * 99999999999) + '&varName=' + encodeURI(varname); adloader.loadScript(scriptUrl, function(response) { var adResponse; var content = window[varname]; // Add a response for each bid matching the "nid" bids.forEach(function(existingBid) { if (existingBid.params.nid === bid.params.nid) { if (content) { adResponse = bidfactory.createBid(1); adResponse.bidderCode = 'criteo'; adResponse.keys = content.split(';'); } else { // Indicate an ad was not returned adResponse = bidfactory.createBid(2); adResponse.bidderCode = 'criteo'; } bidmanager.addBidResponse(existingBid.placementCode, adResponse); } }); }); } return { callBids: _callBids }; }; module.exports = CriteoAdapter;
} } return nids; }
random_line_split
criteo.js
var CONSTANTS = require('../constants.json'); var utils = require('../utils.js'); var bidfactory = require('../bidfactory.js'); var bidmanager = require('../bidmanager.js'); var adloader = require('../adloader'); /** * Adapter for requesting bids from Criteo. * * @returns {{callBids: _callBids}} * @constructor */ var CriteoAdapter = function CriteoAdapter() { var bids; function _callBids(params) { bids = params.bids || []; // Only make one request per "nid" _getUniqueNids(bids).forEach(_requestBid); } function _getUniqueNids(bids) { var key; var map = {}; var nids = []; bids.forEach(function(bid) { map[bid.params.nid] = bid; }); for (key in map) { if (map.hasOwnProperty(key)) { nids.push(map[key]); } } return nids; } function _requestBid(bid)
// Indicate an ad was not returned adResponse = bidfactory.createBid(2); adResponse.bidderCode = 'criteo'; } bidmanager.addBidResponse(existingBid.placementCode, adResponse); } }); }); } return { callBids: _callBids }; }; module.exports = CriteoAdapter;
{ var varname = 'crtg_varname_' + bid.params.nid; var scriptUrl = '//rtax.criteo.com/delivery/rta/rta.js?netId=' + encodeURI(bid.params.nid) + '&cookieName=' + encodeURI(bid.params.cookiename) + '&rnd=' + Math.floor(Math.random() * 99999999999) + '&varName=' + encodeURI(varname); adloader.loadScript(scriptUrl, function(response) { var adResponse; var content = window[varname]; // Add a response for each bid matching the "nid" bids.forEach(function(existingBid) { if (existingBid.params.nid === bid.params.nid) { if (content) { adResponse = bidfactory.createBid(1); adResponse.bidderCode = 'criteo'; adResponse.keys = content.split(';'); } else {
identifier_body
criteo.js
var CONSTANTS = require('../constants.json'); var utils = require('../utils.js'); var bidfactory = require('../bidfactory.js'); var bidmanager = require('../bidmanager.js'); var adloader = require('../adloader'); /** * Adapter for requesting bids from Criteo. * * @returns {{callBids: _callBids}} * @constructor */ var CriteoAdapter = function CriteoAdapter() { var bids; function _callBids(params) { bids = params.bids || []; // Only make one request per "nid" _getUniqueNids(bids).forEach(_requestBid); } function
(bids) { var key; var map = {}; var nids = []; bids.forEach(function(bid) { map[bid.params.nid] = bid; }); for (key in map) { if (map.hasOwnProperty(key)) { nids.push(map[key]); } } return nids; } function _requestBid(bid) { var varname = 'crtg_varname_' + bid.params.nid; var scriptUrl = '//rtax.criteo.com/delivery/rta/rta.js?netId=' + encodeURI(bid.params.nid) + '&cookieName=' + encodeURI(bid.params.cookiename) + '&rnd=' + Math.floor(Math.random() * 99999999999) + '&varName=' + encodeURI(varname); adloader.loadScript(scriptUrl, function(response) { var adResponse; var content = window[varname]; // Add a response for each bid matching the "nid" bids.forEach(function(existingBid) { if (existingBid.params.nid === bid.params.nid) { if (content) { adResponse = bidfactory.createBid(1); adResponse.bidderCode = 'criteo'; adResponse.keys = content.split(';'); } else { // Indicate an ad was not returned adResponse = bidfactory.createBid(2); adResponse.bidderCode = 'criteo'; } bidmanager.addBidResponse(existingBid.placementCode, adResponse); } }); }); } return { callBids: _callBids }; }; module.exports = CriteoAdapter;
_getUniqueNids
identifier_name
20210319-tbd-first-round.js
/* eslint no-magic-numbers:0 */ "use strict" const test = require("tape") const utils = require("../utils") const testEvents = (t) => (err, events) => { t.notOk(err) t.equal(events[0].region, "nba finals") t.end() } test("works with tbd first round", (t) => {
t.equal(events.length, 16) t.end() }) }) test.skip("works with tbd first round", (t) => { utils.parseUrl( "https://www.espn.com/mens-college-basketball/scoreboard/_/group/100/date/20210319", (err, events) => { t.equal(events.length, 16) t.end() } ) })
utils.parseFile("20210319-tbd-first-round", (err, events) => {
random_line_split
ManualEntryPanel.spec.tsx
import React from 'react'; import { act } from 'react-dom/test-utils'; import { mount } from 'enzyme'; import { RcThemeProvider } from '@ringcentral-integration/rcui'; import { ManualEntryPanel, ManualEntryPanelProps } from './ManualEntryPanel'; let wrapper; const currentLocale = 'en-US'; const defaultTransferCountryOptions = [ { countryId: 'CAN', countryName: 'Canada' }, { countryId: 'FRA', countryName: 'France' }, { countryId: 'GER', countryName: 'Germany' }, { countryId: 'MEX', countryName: 'Mexico' }, { countryId: 'MTQ', countryName: 'Martinique' }, { countryId: 'USA', countryName: 'US' }, { countryId: 'USX', countryName: 'US Extended' }, ]; function setup({ goBack = () => {}, transferRecipientCountryId = 'USA', changeRecipientNumber = () => {}, changeRecipientCountryId = () => {}, transferRecipientNumber = '6508653454', allowManualInternationalTransfer = false, }: Partial<ManualEntryPanelProps>) { return mount( <RcThemeProvider> <ManualEntryPanel currentLocale={currentLocale} goBack={goBack} transferRecipientCountryId={transferRecipientCountryId} changeRecipientNumber={changeRecipientNumber} changeRecipientCountryId={changeRecipientCountryId} transferCountryOptions={defaultTransferCountryOptions} transferRecipientNumber={transferRecipientNumber} allowManualInternationalTransfer={allowManualInternationalTransfer} /> </RcThemeProvider>, ); } afterEach(async () => { wrapper.unmount();
describe('<ManualEntryPanel />', async () => { it('Display Back Button and when user click it, function goBack will be called', () => { const goBack = jest.fn(() => {}); wrapper = setup({ goBack }); wrapper .find('[data-sign="backButton"]') .at(0) .find('button') .simulate('click'); expect(goBack).toBeCalled(); }); it('Display Next Button and when user click it, function changeRecipientNumber will be called', () => { const changeRecipientNumber = jest.fn(() => {}); wrapper = setup({ changeRecipientNumber }); const userInput = '343535435'; const eventObj = { target: { value: userInput } }; wrapper .find('RecipientsInput') .at(0) .find('input') .simulate('change', eventObj); wrapper .find('[data-sign="nextButton"]') .at(0) .find('button') .simulate('click'); expect(changeRecipientNumber).toBeCalledWith(userInput); }); });
});
random_line_split
ManualEntryPanel.spec.tsx
import React from 'react'; import { act } from 'react-dom/test-utils'; import { mount } from 'enzyme'; import { RcThemeProvider } from '@ringcentral-integration/rcui'; import { ManualEntryPanel, ManualEntryPanelProps } from './ManualEntryPanel'; let wrapper; const currentLocale = 'en-US'; const defaultTransferCountryOptions = [ { countryId: 'CAN', countryName: 'Canada' }, { countryId: 'FRA', countryName: 'France' }, { countryId: 'GER', countryName: 'Germany' }, { countryId: 'MEX', countryName: 'Mexico' }, { countryId: 'MTQ', countryName: 'Martinique' }, { countryId: 'USA', countryName: 'US' }, { countryId: 'USX', countryName: 'US Extended' }, ]; function setup({ goBack = () => {}, transferRecipientCountryId = 'USA', changeRecipientNumber = () => {}, changeRecipientCountryId = () => {}, transferRecipientNumber = '6508653454', allowManualInternationalTransfer = false, }: Partial<ManualEntryPanelProps>)
afterEach(async () => { wrapper.unmount(); }); describe('<ManualEntryPanel />', async () => { it('Display Back Button and when user click it, function goBack will be called', () => { const goBack = jest.fn(() => {}); wrapper = setup({ goBack }); wrapper .find('[data-sign="backButton"]') .at(0) .find('button') .simulate('click'); expect(goBack).toBeCalled(); }); it('Display Next Button and when user click it, function changeRecipientNumber will be called', () => { const changeRecipientNumber = jest.fn(() => {}); wrapper = setup({ changeRecipientNumber }); const userInput = '343535435'; const eventObj = { target: { value: userInput } }; wrapper .find('RecipientsInput') .at(0) .find('input') .simulate('change', eventObj); wrapper .find('[data-sign="nextButton"]') .at(0) .find('button') .simulate('click'); expect(changeRecipientNumber).toBeCalledWith(userInput); }); });
{ return mount( <RcThemeProvider> <ManualEntryPanel currentLocale={currentLocale} goBack={goBack} transferRecipientCountryId={transferRecipientCountryId} changeRecipientNumber={changeRecipientNumber} changeRecipientCountryId={changeRecipientCountryId} transferCountryOptions={defaultTransferCountryOptions} transferRecipientNumber={transferRecipientNumber} allowManualInternationalTransfer={allowManualInternationalTransfer} /> </RcThemeProvider>, ); }
identifier_body
ManualEntryPanel.spec.tsx
import React from 'react'; import { act } from 'react-dom/test-utils'; import { mount } from 'enzyme'; import { RcThemeProvider } from '@ringcentral-integration/rcui'; import { ManualEntryPanel, ManualEntryPanelProps } from './ManualEntryPanel'; let wrapper; const currentLocale = 'en-US'; const defaultTransferCountryOptions = [ { countryId: 'CAN', countryName: 'Canada' }, { countryId: 'FRA', countryName: 'France' }, { countryId: 'GER', countryName: 'Germany' }, { countryId: 'MEX', countryName: 'Mexico' }, { countryId: 'MTQ', countryName: 'Martinique' }, { countryId: 'USA', countryName: 'US' }, { countryId: 'USX', countryName: 'US Extended' }, ]; function
({ goBack = () => {}, transferRecipientCountryId = 'USA', changeRecipientNumber = () => {}, changeRecipientCountryId = () => {}, transferRecipientNumber = '6508653454', allowManualInternationalTransfer = false, }: Partial<ManualEntryPanelProps>) { return mount( <RcThemeProvider> <ManualEntryPanel currentLocale={currentLocale} goBack={goBack} transferRecipientCountryId={transferRecipientCountryId} changeRecipientNumber={changeRecipientNumber} changeRecipientCountryId={changeRecipientCountryId} transferCountryOptions={defaultTransferCountryOptions} transferRecipientNumber={transferRecipientNumber} allowManualInternationalTransfer={allowManualInternationalTransfer} /> </RcThemeProvider>, ); } afterEach(async () => { wrapper.unmount(); }); describe('<ManualEntryPanel />', async () => { it('Display Back Button and when user click it, function goBack will be called', () => { const goBack = jest.fn(() => {}); wrapper = setup({ goBack }); wrapper .find('[data-sign="backButton"]') .at(0) .find('button') .simulate('click'); expect(goBack).toBeCalled(); }); it('Display Next Button and when user click it, function changeRecipientNumber will be called', () => { const changeRecipientNumber = jest.fn(() => {}); wrapper = setup({ changeRecipientNumber }); const userInput = '343535435'; const eventObj = { target: { value: userInput } }; wrapper .find('RecipientsInput') .at(0) .find('input') .simulate('change', eventObj); wrapper .find('[data-sign="nextButton"]') .at(0) .find('button') .simulate('click'); expect(changeRecipientNumber).toBeCalledWith(userInput); }); });
setup
identifier_name
list.tsx
import React, { useRef, useState } from 'react'; import BodyContent from '@/layouts/compents/body_content'; import { Avatar, message } from 'antd'; import { formatTimestamp } from '@/utils/utils'; import { searchAdmins, lockAdmin, setAdminType } from './service'; import { AdminInfo } from './data_d'; import { PlusOutlined, LockOutlined, UnlockOutlined } from '@ant-design/icons'; import AddAdmin from './compents/add_admin'; import { FuncCodes } from '@/utils/resp_d'; import { FormItemFactoryType, FormItemFactoryProps } from '@/components/form/form_item_factory'; import SearchTable, { getTextFromTableStatus, SearchTableAction, } from '@/components/search/search_table'; import SearchForm from '@/components/search/search_form'; import TableFetchButtons from '@/components/button/table_Fetch_buttons'; import AccessButton from '@/components/button/access_button'; const AdminList: React.FC<{}> = () => { const tableRef = useRef<SearchTableAction>(); // const [formRef] = Form.useForm(); const [addAdminVis, setAddAdminVis] = useState(false); const searchFormItems: FormItemFactoryProps[] = [ { type: FormItemFactoryType.input, label: '名称', name: 'admin_name', }, ]; const tableStatus = [ { label: '正常', value: '0' }, { label: '待绑定', value: '-20' }, { label: '已锁定', value: '-100' }, { label: '全部', value: '-999' }, ]; const statusButtons = [ { condition: (r: AdminInfo) => r.status >= -20, buttons: [ { btn_text: '锁定', fetch: (item: AdminInfo) => lockAdmin(item, true), fetch_desp: '锁定当前管理员', func_code: FuncCodes.Portal_AdminLock, icon: <LockOutlined />, }, ], }, { condition: (r: AdminInfo) => r.status == -100, buttons: [ { btn_text: '解锁', fetch: (item: AdminInfo) => lockAdmin(item, false), fetch_desp: '解锁当前管理员', func_code: FuncCodes.Portal_AdminUnLock, icon: <UnlockOutlined />, },
buttons: [ { btn_text: '取消超管权限', fetch: (item: AdminInfo) => setAdminType(item, 0), fetch_desp: '将超级管理员降为普通管理员', func_code: FuncCodes.Portal_AdminSetType, // icon: <settt />, }, ], }, { condition: (r: AdminInfo) => r.admin_type == 0, buttons: [ { btn_text: '设置超管权限', fetch: (item: AdminInfo) => setAdminType(item, 100), fetch_desp: '设置当前管理员为超级管理员', func_code: FuncCodes.Portal_AdminSetType, // icon: <settt />, }, ], }, ]; const tableColumns = [ { title: '头像', hideInSearch: true, dataIndex: 'avatar', render: (_: any, record: AdminInfo) => ( <a> <Avatar src={record.avatar + '/s100'} size={60} /> </a> ), }, { title: '名称', dataIndex: 'admin_name', }, { title: '创建时间', dataIndex: 'add_time', render: (_: any, record: AdminInfo) => formatTimestamp(record.add_time), }, { title: '状态', dataIndex: 'status', render: (v: any, _: any) => getTextFromTableStatus(v, tableStatus), }, { title: '操作', dataIndex: 'id', render: (_: any, r: AdminInfo) => ( <TableFetchButtons record={r} callback={(res, item, aName) => { if (res.is_ok) tableRef.current?.refresh(); }} fetchKey={(item) => item.id} condition_buttons={statusButtons} ></TableFetchButtons> ), }, ]; const defaultFilter = { status: '0' }; return ( <BodyContent> <SearchForm items={searchFormItems} // form={formRef} initialValues={defaultFilter} onFinish={(vals) => { tableRef.current?.reload(vals); }} top_radios={{ name: 'status', options: tableStatus, }} > <AccessButton type="primary" onClick={() => { setAddAdminVis(true); }} func_code={FuncCodes.Portal_AdminCreate} > <PlusOutlined /> 新增管理员 </AccessButton> </SearchForm> <SearchTable<AdminInfo> rowKey="id" columns={tableColumns} search_table_ref={tableRef} search_fetch={searchAdmins} default_filters={defaultFilter} /> <AddAdmin visible={addAdminVis} onClose={() => { setAddAdminVis(false); }} callback={(res) => { if (res.is_failed) { message.error(res.msg); return; } setAddAdminVis(false); tableRef.current?.refresh(true); }} /> </BodyContent> ); }; export default AdminList;
], }, { condition: (r: AdminInfo) => r.admin_type == 100,
random_line_split
sjisprober.rs
use std::ops::Deref; use std::ops::DerefMut; use super::enums::MachineState; use super::mbcharsetprober::MultiByteCharsetProber; use super::charsetprober::CharsetProber; use super::enums::ProbingState; use super::codingstatemachine::CodingStateMachine; use super::mbcssm::SJIS_SM_MODEL; use super::chardistribution::SJISDistributionAnalysis; use super::jpcntx::{JapaneseContextAnalysis, SJISContextAnalysis}; pub struct SJISProber<'a> { base: MultiByteCharsetProber<'a>, m_context_analyzer: SJISContextAnalysis, } impl<'x> Deref for SJISProber<'x> { type Target = MultiByteCharsetProber<'x>; fn deref<'a>(&'a self) -> &'a MultiByteCharsetProber<'x> { &self.base } } impl<'x> DerefMut for SJISProber<'x> { fn deref_mut<'a>(&'a mut self) -> &'a mut MultiByteCharsetProber<'x> { &mut self.base } } impl<'a> CharsetProber for SJISProber<'a> { fn reset(&mut self) { self.base.reset(); self.m_context_analyzer.reset(); } fn feed(&mut self, byte_str: &[u8]) -> &ProbingState { { let sm = self.base.m_coding_sm.as_mut().unwrap(); let da = self.base.m_distribution_analyzer.as_mut().unwrap(); for i in 0..byte_str.len() { match sm.next_state(byte_str[i]) { MachineState::START =>
MachineState::ERROR => { self.base.m_state = ProbingState::NotMe; break; } MachineState::ITS_ME => { self.base.m_state = ProbingState::FoundIt; break; } _ => {} } } } self.base.m_last_char[0] = byte_str[byte_str.len() - 1]; if self.base.m_state == ProbingState::Detecting { if (self.m_context_analyzer.got_enough_data()) && (self.get_confidence() > 0.95) { self.base.m_state = ProbingState::FoundIt; } } &self.base.m_state } fn get_charset(&self) -> String { self.m_context_analyzer.get_charset() } fn get_confidence(&self) -> f32 { let a = self.base.get_confidence(); let b = self.m_context_analyzer.get_confidence(); if a>b { a } else { b } } fn get_language(&self) -> String { "Japanese".to_string() } fn get_state(&self) -> &ProbingState { self.base.get_state() } } impl<'a> SJISProber<'a> { pub fn new() -> SJISProber<'a> { let mut x = SJISProber { base: MultiByteCharsetProber::new(), m_context_analyzer: SJISContextAnalysis::new(), }; x.base.m_coding_sm = Some(CodingStateMachine::new(&SJIS_SM_MODEL)); x.base.m_distribution_analyzer = Some(Box::new(SJISDistributionAnalysis::new())); x } }
{ let char_len = sm.get_current_charlen(); if i == 0 { self.base.m_last_char[1] = byte_str[0]; self.m_context_analyzer.feed( &self.base.m_last_char[(2 - char_len) as usize..], char_len as usize, ); da.feed(&self.base.m_last_char[..], char_len); } else { self.m_context_analyzer.feed( &byte_str[i + 1 - char_len as usize..], char_len as usize, ); da.feed(&byte_str[i - 1..i + 1], char_len); } }
conditional_block
sjisprober.rs
use std::ops::Deref; use std::ops::DerefMut; use super::enums::MachineState; use super::mbcharsetprober::MultiByteCharsetProber; use super::charsetprober::CharsetProber; use super::enums::ProbingState; use super::codingstatemachine::CodingStateMachine; use super::mbcssm::SJIS_SM_MODEL; use super::chardistribution::SJISDistributionAnalysis; use super::jpcntx::{JapaneseContextAnalysis, SJISContextAnalysis}; pub struct SJISProber<'a> { base: MultiByteCharsetProber<'a>, m_context_analyzer: SJISContextAnalysis, } impl<'x> Deref for SJISProber<'x> { type Target = MultiByteCharsetProber<'x>; fn deref<'a>(&'a self) -> &'a MultiByteCharsetProber<'x> { &self.base } } impl<'x> DerefMut for SJISProber<'x> { fn deref_mut<'a>(&'a mut self) -> &'a mut MultiByteCharsetProber<'x> { &mut self.base } } impl<'a> CharsetProber for SJISProber<'a> { fn reset(&mut self) { self.base.reset(); self.m_context_analyzer.reset(); } fn feed(&mut self, byte_str: &[u8]) -> &ProbingState { { let sm = self.base.m_coding_sm.as_mut().unwrap(); let da = self.base.m_distribution_analyzer.as_mut().unwrap(); for i in 0..byte_str.len() { match sm.next_state(byte_str[i]) { MachineState::START => { let char_len = sm.get_current_charlen(); if i == 0 { self.base.m_last_char[1] = byte_str[0]; self.m_context_analyzer.feed( &self.base.m_last_char[(2 - char_len) as usize..], char_len as usize, ); da.feed(&self.base.m_last_char[..], char_len); } else { self.m_context_analyzer.feed( &byte_str[i + 1 - char_len as usize..], char_len as usize, ); da.feed(&byte_str[i - 1..i + 1], char_len); } } MachineState::ERROR => { self.base.m_state = ProbingState::NotMe; break; } MachineState::ITS_ME => { self.base.m_state = ProbingState::FoundIt; break; } _ => {} } } } self.base.m_last_char[0] = byte_str[byte_str.len() - 1]; if self.base.m_state == ProbingState::Detecting { if (self.m_context_analyzer.got_enough_data()) && (self.get_confidence() > 0.95) { self.base.m_state = ProbingState::FoundIt; } } &self.base.m_state } fn get_charset(&self) -> String { self.m_context_analyzer.get_charset() } fn get_confidence(&self) -> f32 { let a = self.base.get_confidence(); let b = self.m_context_analyzer.get_confidence(); if a>b { a } else { b } } fn get_language(&self) -> String {
} } impl<'a> SJISProber<'a> { pub fn new() -> SJISProber<'a> { let mut x = SJISProber { base: MultiByteCharsetProber::new(), m_context_analyzer: SJISContextAnalysis::new(), }; x.base.m_coding_sm = Some(CodingStateMachine::new(&SJIS_SM_MODEL)); x.base.m_distribution_analyzer = Some(Box::new(SJISDistributionAnalysis::new())); x } }
"Japanese".to_string() } fn get_state(&self) -> &ProbingState { self.base.get_state()
random_line_split
sjisprober.rs
use std::ops::Deref; use std::ops::DerefMut; use super::enums::MachineState; use super::mbcharsetprober::MultiByteCharsetProber; use super::charsetprober::CharsetProber; use super::enums::ProbingState; use super::codingstatemachine::CodingStateMachine; use super::mbcssm::SJIS_SM_MODEL; use super::chardistribution::SJISDistributionAnalysis; use super::jpcntx::{JapaneseContextAnalysis, SJISContextAnalysis}; pub struct SJISProber<'a> { base: MultiByteCharsetProber<'a>, m_context_analyzer: SJISContextAnalysis, } impl<'x> Deref for SJISProber<'x> { type Target = MultiByteCharsetProber<'x>; fn deref<'a>(&'a self) -> &'a MultiByteCharsetProber<'x> { &self.base } } impl<'x> DerefMut for SJISProber<'x> { fn deref_mut<'a>(&'a mut self) -> &'a mut MultiByteCharsetProber<'x> { &mut self.base } } impl<'a> CharsetProber for SJISProber<'a> { fn reset(&mut self) { self.base.reset(); self.m_context_analyzer.reset(); } fn feed(&mut self, byte_str: &[u8]) -> &ProbingState { { let sm = self.base.m_coding_sm.as_mut().unwrap(); let da = self.base.m_distribution_analyzer.as_mut().unwrap(); for i in 0..byte_str.len() { match sm.next_state(byte_str[i]) { MachineState::START => { let char_len = sm.get_current_charlen(); if i == 0 { self.base.m_last_char[1] = byte_str[0]; self.m_context_analyzer.feed( &self.base.m_last_char[(2 - char_len) as usize..], char_len as usize, ); da.feed(&self.base.m_last_char[..], char_len); } else { self.m_context_analyzer.feed( &byte_str[i + 1 - char_len as usize..], char_len as usize, ); da.feed(&byte_str[i - 1..i + 1], char_len); } } MachineState::ERROR => { self.base.m_state = ProbingState::NotMe; break; } MachineState::ITS_ME => { self.base.m_state = ProbingState::FoundIt; break; } _ => {} } } } self.base.m_last_char[0] = byte_str[byte_str.len() - 1]; if self.base.m_state == ProbingState::Detecting { if (self.m_context_analyzer.got_enough_data()) && (self.get_confidence() > 0.95) { self.base.m_state = ProbingState::FoundIt; } } &self.base.m_state } fn get_charset(&self) -> String { self.m_context_analyzer.get_charset() } fn get_confidence(&self) -> f32 { let a = self.base.get_confidence(); let b = self.m_context_analyzer.get_confidence(); if a>b { a } else { b } } fn get_language(&self) -> String { "Japanese".to_string() } fn get_state(&self) -> &ProbingState
} impl<'a> SJISProber<'a> { pub fn new() -> SJISProber<'a> { let mut x = SJISProber { base: MultiByteCharsetProber::new(), m_context_analyzer: SJISContextAnalysis::new(), }; x.base.m_coding_sm = Some(CodingStateMachine::new(&SJIS_SM_MODEL)); x.base.m_distribution_analyzer = Some(Box::new(SJISDistributionAnalysis::new())); x } }
{ self.base.get_state() }
identifier_body
sjisprober.rs
use std::ops::Deref; use std::ops::DerefMut; use super::enums::MachineState; use super::mbcharsetprober::MultiByteCharsetProber; use super::charsetprober::CharsetProber; use super::enums::ProbingState; use super::codingstatemachine::CodingStateMachine; use super::mbcssm::SJIS_SM_MODEL; use super::chardistribution::SJISDistributionAnalysis; use super::jpcntx::{JapaneseContextAnalysis, SJISContextAnalysis}; pub struct SJISProber<'a> { base: MultiByteCharsetProber<'a>, m_context_analyzer: SJISContextAnalysis, } impl<'x> Deref for SJISProber<'x> { type Target = MultiByteCharsetProber<'x>; fn deref<'a>(&'a self) -> &'a MultiByteCharsetProber<'x> { &self.base } } impl<'x> DerefMut for SJISProber<'x> { fn
<'a>(&'a mut self) -> &'a mut MultiByteCharsetProber<'x> { &mut self.base } } impl<'a> CharsetProber for SJISProber<'a> { fn reset(&mut self) { self.base.reset(); self.m_context_analyzer.reset(); } fn feed(&mut self, byte_str: &[u8]) -> &ProbingState { { let sm = self.base.m_coding_sm.as_mut().unwrap(); let da = self.base.m_distribution_analyzer.as_mut().unwrap(); for i in 0..byte_str.len() { match sm.next_state(byte_str[i]) { MachineState::START => { let char_len = sm.get_current_charlen(); if i == 0 { self.base.m_last_char[1] = byte_str[0]; self.m_context_analyzer.feed( &self.base.m_last_char[(2 - char_len) as usize..], char_len as usize, ); da.feed(&self.base.m_last_char[..], char_len); } else { self.m_context_analyzer.feed( &byte_str[i + 1 - char_len as usize..], char_len as usize, ); da.feed(&byte_str[i - 1..i + 1], char_len); } } MachineState::ERROR => { self.base.m_state = ProbingState::NotMe; break; } MachineState::ITS_ME => { self.base.m_state = ProbingState::FoundIt; break; } _ => {} } } } self.base.m_last_char[0] = byte_str[byte_str.len() - 1]; if self.base.m_state == ProbingState::Detecting { if (self.m_context_analyzer.got_enough_data()) && (self.get_confidence() > 0.95) { self.base.m_state = ProbingState::FoundIt; } } &self.base.m_state } fn get_charset(&self) -> String { self.m_context_analyzer.get_charset() } fn get_confidence(&self) -> f32 { let a = self.base.get_confidence(); let b = self.m_context_analyzer.get_confidence(); if a>b { a } else { b } } fn get_language(&self) -> String { "Japanese".to_string() } fn get_state(&self) -> &ProbingState { self.base.get_state() } } impl<'a> SJISProber<'a> { pub fn new() -> SJISProber<'a> { let mut x = SJISProber { base: MultiByteCharsetProber::new(), m_context_analyzer: SJISContextAnalysis::new(), }; x.base.m_coding_sm = Some(CodingStateMachine::new(&SJIS_SM_MODEL)); x.base.m_distribution_analyzer = Some(Box::new(SJISDistributionAnalysis::new())); x } }
deref_mut
identifier_name
artsyXapp.ts
import artsyXapp from "@artsy/xapp" const { API_URL, CLIENT_ID, CLIENT_SECRET } = process.env export function initializeArtsyXapp(startServerCallback) { console.log("[Force] Initializing artsyXapp...") /** * If we can't get an xapp token, start the server but retry every 30 seconds. * Until an xapp token is fetched, the `ARTSY_XAPP_TOKEN` sharify value will * not be present, and any requests made via the Force server (or a user's * browser) directly to gravity will fail. e * When an xapp token is fetched, any subsequent requests to Force will have * `ARTSY_XAPP_TOKEN` set and direct gravity requests will resolve. */ artsyXapp.on("error", err => { startServerCallback() console.error(` Force could not fetch an xapp token. This can be due to \`API_URL\`, \`CLIENT_ID\` and \`CLIENT_SECRET\` not being set, but also could be gravity being down. Retrying...`) console.error(err) setTimeout(() => { artsyXapp.init({ id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }) }, 30000) })
artsyXapp.init( { id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }, error => { if (error) { console.error(error) } if (!error) { console.log("[Force] Successfully fetched xapp token.") startServerCallback() } } ) }
// Get an xapp token
random_line_split
artsyXapp.ts
import artsyXapp from "@artsy/xapp" const { API_URL, CLIENT_ID, CLIENT_SECRET } = process.env export function
(startServerCallback) { console.log("[Force] Initializing artsyXapp...") /** * If we can't get an xapp token, start the server but retry every 30 seconds. * Until an xapp token is fetched, the `ARTSY_XAPP_TOKEN` sharify value will * not be present, and any requests made via the Force server (or a user's * browser) directly to gravity will fail. e * When an xapp token is fetched, any subsequent requests to Force will have * `ARTSY_XAPP_TOKEN` set and direct gravity requests will resolve. */ artsyXapp.on("error", err => { startServerCallback() console.error(` Force could not fetch an xapp token. This can be due to \`API_URL\`, \`CLIENT_ID\` and \`CLIENT_SECRET\` not being set, but also could be gravity being down. Retrying...`) console.error(err) setTimeout(() => { artsyXapp.init({ id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }) }, 30000) }) // Get an xapp token artsyXapp.init( { id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }, error => { if (error) { console.error(error) } if (!error) { console.log("[Force] Successfully fetched xapp token.") startServerCallback() } } ) }
initializeArtsyXapp
identifier_name
artsyXapp.ts
import artsyXapp from "@artsy/xapp" const { API_URL, CLIENT_ID, CLIENT_SECRET } = process.env export function initializeArtsyXapp(startServerCallback) { console.log("[Force] Initializing artsyXapp...") /** * If we can't get an xapp token, start the server but retry every 30 seconds. * Until an xapp token is fetched, the `ARTSY_XAPP_TOKEN` sharify value will * not be present, and any requests made via the Force server (or a user's * browser) directly to gravity will fail. e * When an xapp token is fetched, any subsequent requests to Force will have * `ARTSY_XAPP_TOKEN` set and direct gravity requests will resolve. */ artsyXapp.on("error", err => { startServerCallback() console.error(` Force could not fetch an xapp token. This can be due to \`API_URL\`, \`CLIENT_ID\` and \`CLIENT_SECRET\` not being set, but also could be gravity being down. Retrying...`) console.error(err) setTimeout(() => { artsyXapp.init({ id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }) }, 30000) }) // Get an xapp token artsyXapp.init( { id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }, error => { if (error) { console.error(error) } if (!error)
} ) }
{ console.log("[Force] Successfully fetched xapp token.") startServerCallback() }
conditional_block
artsyXapp.ts
import artsyXapp from "@artsy/xapp" const { API_URL, CLIENT_ID, CLIENT_SECRET } = process.env export function initializeArtsyXapp(startServerCallback)
setTimeout(() => { artsyXapp.init({ id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }) }, 30000) }) // Get an xapp token artsyXapp.init( { id: CLIENT_ID, secret: CLIENT_SECRET, url: API_URL }, error => { if (error) { console.error(error) } if (!error) { console.log("[Force] Successfully fetched xapp token.") startServerCallback() } } ) }
{ console.log("[Force] Initializing artsyXapp...") /** * If we can't get an xapp token, start the server but retry every 30 seconds. * Until an xapp token is fetched, the `ARTSY_XAPP_TOKEN` sharify value will * not be present, and any requests made via the Force server (or a user's * browser) directly to gravity will fail. e * When an xapp token is fetched, any subsequent requests to Force will have * `ARTSY_XAPP_TOKEN` set and direct gravity requests will resolve. */ artsyXapp.on("error", err => { startServerCallback() console.error(` Force could not fetch an xapp token. This can be due to \`API_URL\`, \`CLIENT_ID\` and \`CLIENT_SECRET\` not being set, but also could be gravity being down. Retrying...`) console.error(err)
identifier_body
github_stats.py
#!/usr/bin/env python """Simple tools to query github.com and gather stats about issues. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function import json import re import sys from datetime import datetime, timedelta from subprocess import check_output from urllib2 import urlopen #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- ISO8601 = "%Y-%m-%dT%H:%M:%SZ" PER_PAGE = 100 element_pat = re.compile(r'<(.+?)>') rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def parse_link_header(headers): link_s = headers.get('link', '') urls = element_pat.findall(link_s) rels = rel_pat.findall(link_s) d = {} for rel,url in zip(rels, urls): d[rel] = url return d def get_paged_request(url): """get a full list, handling APIv3's paging""" results = [] while url: print("fetching %s" % url, file=sys.stderr) f = urlopen(url) results.extend(json.load(f)) links = parse_link_header(f.headers) url = links.get('next') return results def get_issues(project="matplotlib/matplotlib", state="closed", pulls=False): """Get a list of the issues from the Github API.""" which = 'pulls' if pulls else 'issues' url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE) return get_paged_request(url) def _parse_datetime(s): """Parse dates in the format returned by the Github API.""" if s: return datetime.strptime(s, ISO8601) else: return datetime.fromtimestamp(0) def issues2dict(issues): """Convert a list of issues to a dict, keyed by issue number.""" idict = {} for i in issues: idict[i['number']] = i return idict def is_pull_request(issue): """Return True if the given issue is a pull request.""" return 'pull_request_url' in issue def issues_closed_since(period=timedelta(days=365), project="matplotlib/matplotlib", pulls=False): """Get all issues closed since a particular point in time. period can either be a datetime object, or a timedelta object. In the latter case, it is used as a time before the present.""" which = 'pulls' if pulls else 'issues' if isinstance(period, timedelta): period = datetime.now() - period url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE) allclosed = get_paged_request(url) # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period) filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period] # exclude rejected PRs if pulls: filtered = [ pr for pr in filtered if pr['merged_at'] ] return filtered def sorted_by_field(issues, field='closed_at', reverse=False): """Return a list of issues sorted by closing date date.""" return sorted(issues, key = lambda i:i[field], reverse=reverse) def report(issues, show_urls=False): """Summary report about a list of issues, printing number and title. """ # titles may have unicode in them, so we must encode everything below if show_urls: for i in issues: role = 'ghpull' if 'merged_at' in i else 'ghissue' print('* :%s:`%d`: %s' % (role, i['number'], i['title'].encode('utf-8'))) else: for i in issues: print('* %d: %s' % (i['number'], i['title'].encode('utf-8'))) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": # Whether to add reST urls for all issues in printout. show_urls = True # By default, search one month back tag = None if len(sys.argv) > 1: try: days = int(sys.argv[1]) except: tag = sys.argv[1] else: tag = check_output(['git', 'describe', '--abbrev=0']).strip() if tag: cmd = ['git', 'log', '-1', '--format=%ai', tag] tagday, tz = check_output(cmd).strip().rsplit(' ', 1) since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S") else: since = datetime.now() - timedelta(days=days) print("fetching GitHub stats since %s (tag: %s)" % (since, tag), file=sys.stderr) # turn off to play interactively without redownloading, use %run -i if 1: issues = issues_closed_since(since, pulls=False) pulls = issues_closed_since(since, pulls=True) # For regular reports, it's nice to show them in reverse chronological order issues = sorted_by_field(issues, reverse=True) pulls = sorted_by_field(pulls, reverse=True) n_issues, n_pulls = map(len, (issues, pulls)) n_total = n_issues + n_pulls # Print summary report we can directly include into release notes. print() since_day = since.strftime("%Y/%m/%d") today = datetime.today().strftime("%Y/%m/%d") print(".. _github-stats:") print('Github stats') print('============') print() print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag)) print() print("These lists are automatically generated, and may be incomplete or contain duplicates.") print() if tag: # print git info, in addition to GitHub info: since_tag = tag+'..' cmd = ['git', 'log', '--oneline', since_tag] ncommits = len(check_output(cmd).splitlines()) author_cmd = ['git', 'log', '--format=* %aN', since_tag] all_authors = check_output(author_cmd).splitlines() unique_authors = sorted(set(all_authors)) print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits)) print() print('\n'.join(unique_authors)) print() print() print("We closed a total of %d issues, %d pull requests and %d regular issues;\n"
"this is the full list (generated with the script \n" ":file:`tools/github_stats.py`):" % (n_total, n_pulls, n_issues)) print() print('Pull Requests (%d):\n' % n_pulls) report(pulls, show_urls) print() print('Issues (%d):\n' % n_issues) report(issues, show_urls)
random_line_split
github_stats.py
#!/usr/bin/env python """Simple tools to query github.com and gather stats about issues. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function import json import re import sys from datetime import datetime, timedelta from subprocess import check_output from urllib2 import urlopen #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- ISO8601 = "%Y-%m-%dT%H:%M:%SZ" PER_PAGE = 100 element_pat = re.compile(r'<(.+?)>') rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def parse_link_header(headers): link_s = headers.get('link', '') urls = element_pat.findall(link_s) rels = rel_pat.findall(link_s) d = {} for rel,url in zip(rels, urls): d[rel] = url return d def get_paged_request(url): """get a full list, handling APIv3's paging""" results = [] while url: print("fetching %s" % url, file=sys.stderr) f = urlopen(url) results.extend(json.load(f)) links = parse_link_header(f.headers) url = links.get('next') return results def
(project="matplotlib/matplotlib", state="closed", pulls=False): """Get a list of the issues from the Github API.""" which = 'pulls' if pulls else 'issues' url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE) return get_paged_request(url) def _parse_datetime(s): """Parse dates in the format returned by the Github API.""" if s: return datetime.strptime(s, ISO8601) else: return datetime.fromtimestamp(0) def issues2dict(issues): """Convert a list of issues to a dict, keyed by issue number.""" idict = {} for i in issues: idict[i['number']] = i return idict def is_pull_request(issue): """Return True if the given issue is a pull request.""" return 'pull_request_url' in issue def issues_closed_since(period=timedelta(days=365), project="matplotlib/matplotlib", pulls=False): """Get all issues closed since a particular point in time. period can either be a datetime object, or a timedelta object. In the latter case, it is used as a time before the present.""" which = 'pulls' if pulls else 'issues' if isinstance(period, timedelta): period = datetime.now() - period url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE) allclosed = get_paged_request(url) # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period) filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period] # exclude rejected PRs if pulls: filtered = [ pr for pr in filtered if pr['merged_at'] ] return filtered def sorted_by_field(issues, field='closed_at', reverse=False): """Return a list of issues sorted by closing date date.""" return sorted(issues, key = lambda i:i[field], reverse=reverse) def report(issues, show_urls=False): """Summary report about a list of issues, printing number and title. """ # titles may have unicode in them, so we must encode everything below if show_urls: for i in issues: role = 'ghpull' if 'merged_at' in i else 'ghissue' print('* :%s:`%d`: %s' % (role, i['number'], i['title'].encode('utf-8'))) else: for i in issues: print('* %d: %s' % (i['number'], i['title'].encode('utf-8'))) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": # Whether to add reST urls for all issues in printout. show_urls = True # By default, search one month back tag = None if len(sys.argv) > 1: try: days = int(sys.argv[1]) except: tag = sys.argv[1] else: tag = check_output(['git', 'describe', '--abbrev=0']).strip() if tag: cmd = ['git', 'log', '-1', '--format=%ai', tag] tagday, tz = check_output(cmd).strip().rsplit(' ', 1) since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S") else: since = datetime.now() - timedelta(days=days) print("fetching GitHub stats since %s (tag: %s)" % (since, tag), file=sys.stderr) # turn off to play interactively without redownloading, use %run -i if 1: issues = issues_closed_since(since, pulls=False) pulls = issues_closed_since(since, pulls=True) # For regular reports, it's nice to show them in reverse chronological order issues = sorted_by_field(issues, reverse=True) pulls = sorted_by_field(pulls, reverse=True) n_issues, n_pulls = map(len, (issues, pulls)) n_total = n_issues + n_pulls # Print summary report we can directly include into release notes. print() since_day = since.strftime("%Y/%m/%d") today = datetime.today().strftime("%Y/%m/%d") print(".. _github-stats:") print('Github stats') print('============') print() print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag)) print() print("These lists are automatically generated, and may be incomplete or contain duplicates.") print() if tag: # print git info, in addition to GitHub info: since_tag = tag+'..' cmd = ['git', 'log', '--oneline', since_tag] ncommits = len(check_output(cmd).splitlines()) author_cmd = ['git', 'log', '--format=* %aN', since_tag] all_authors = check_output(author_cmd).splitlines() unique_authors = sorted(set(all_authors)) print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits)) print() print('\n'.join(unique_authors)) print() print() print("We closed a total of %d issues, %d pull requests and %d regular issues;\n" "this is the full list (generated with the script \n" ":file:`tools/github_stats.py`):" % (n_total, n_pulls, n_issues)) print() print('Pull Requests (%d):\n' % n_pulls) report(pulls, show_urls) print() print('Issues (%d):\n' % n_issues) report(issues, show_urls)
get_issues
identifier_name
github_stats.py
#!/usr/bin/env python """Simple tools to query github.com and gather stats about issues. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function import json import re import sys from datetime import datetime, timedelta from subprocess import check_output from urllib2 import urlopen #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- ISO8601 = "%Y-%m-%dT%H:%M:%SZ" PER_PAGE = 100 element_pat = re.compile(r'<(.+?)>') rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def parse_link_header(headers): link_s = headers.get('link', '') urls = element_pat.findall(link_s) rels = rel_pat.findall(link_s) d = {} for rel,url in zip(rels, urls): d[rel] = url return d def get_paged_request(url): """get a full list, handling APIv3's paging""" results = [] while url: print("fetching %s" % url, file=sys.stderr) f = urlopen(url) results.extend(json.load(f)) links = parse_link_header(f.headers) url = links.get('next') return results def get_issues(project="matplotlib/matplotlib", state="closed", pulls=False): """Get a list of the issues from the Github API.""" which = 'pulls' if pulls else 'issues' url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE) return get_paged_request(url) def _parse_datetime(s): """Parse dates in the format returned by the Github API.""" if s: return datetime.strptime(s, ISO8601) else: return datetime.fromtimestamp(0) def issues2dict(issues): """Convert a list of issues to a dict, keyed by issue number.""" idict = {} for i in issues: idict[i['number']] = i return idict def is_pull_request(issue): """Return True if the given issue is a pull request.""" return 'pull_request_url' in issue def issues_closed_since(period=timedelta(days=365), project="matplotlib/matplotlib", pulls=False): """Get all issues closed since a particular point in time. period can either be a datetime object, or a timedelta object. In the latter case, it is used as a time before the present.""" which = 'pulls' if pulls else 'issues' if isinstance(period, timedelta): period = datetime.now() - period url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE) allclosed = get_paged_request(url) # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period) filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period] # exclude rejected PRs if pulls: filtered = [ pr for pr in filtered if pr['merged_at'] ] return filtered def sorted_by_field(issues, field='closed_at', reverse=False): """Return a list of issues sorted by closing date date.""" return sorted(issues, key = lambda i:i[field], reverse=reverse) def report(issues, show_urls=False): """Summary report about a list of issues, printing number and title. """ # titles may have unicode in them, so we must encode everything below if show_urls: for i in issues: role = 'ghpull' if 'merged_at' in i else 'ghissue' print('* :%s:`%d`: %s' % (role, i['number'], i['title'].encode('utf-8'))) else: for i in issues: print('* %d: %s' % (i['number'], i['title'].encode('utf-8'))) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": # Whether to add reST urls for all issues in printout. show_urls = True # By default, search one month back tag = None if len(sys.argv) > 1: try: days = int(sys.argv[1]) except: tag = sys.argv[1] else: tag = check_output(['git', 'describe', '--abbrev=0']).strip() if tag: cmd = ['git', 'log', '-1', '--format=%ai', tag] tagday, tz = check_output(cmd).strip().rsplit(' ', 1) since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S") else: since = datetime.now() - timedelta(days=days) print("fetching GitHub stats since %s (tag: %s)" % (since, tag), file=sys.stderr) # turn off to play interactively without redownloading, use %run -i if 1:
# For regular reports, it's nice to show them in reverse chronological order issues = sorted_by_field(issues, reverse=True) pulls = sorted_by_field(pulls, reverse=True) n_issues, n_pulls = map(len, (issues, pulls)) n_total = n_issues + n_pulls # Print summary report we can directly include into release notes. print() since_day = since.strftime("%Y/%m/%d") today = datetime.today().strftime("%Y/%m/%d") print(".. _github-stats:") print('Github stats') print('============') print() print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag)) print() print("These lists are automatically generated, and may be incomplete or contain duplicates.") print() if tag: # print git info, in addition to GitHub info: since_tag = tag+'..' cmd = ['git', 'log', '--oneline', since_tag] ncommits = len(check_output(cmd).splitlines()) author_cmd = ['git', 'log', '--format=* %aN', since_tag] all_authors = check_output(author_cmd).splitlines() unique_authors = sorted(set(all_authors)) print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits)) print() print('\n'.join(unique_authors)) print() print() print("We closed a total of %d issues, %d pull requests and %d regular issues;\n" "this is the full list (generated with the script \n" ":file:`tools/github_stats.py`):" % (n_total, n_pulls, n_issues)) print() print('Pull Requests (%d):\n' % n_pulls) report(pulls, show_urls) print() print('Issues (%d):\n' % n_issues) report(issues, show_urls)
issues = issues_closed_since(since, pulls=False) pulls = issues_closed_since(since, pulls=True)
conditional_block
github_stats.py
#!/usr/bin/env python """Simple tools to query github.com and gather stats about issues. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function import json import re import sys from datetime import datetime, timedelta from subprocess import check_output from urllib2 import urlopen #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- ISO8601 = "%Y-%m-%dT%H:%M:%SZ" PER_PAGE = 100 element_pat = re.compile(r'<(.+?)>') rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def parse_link_header(headers): link_s = headers.get('link', '') urls = element_pat.findall(link_s) rels = rel_pat.findall(link_s) d = {} for rel,url in zip(rels, urls): d[rel] = url return d def get_paged_request(url): """get a full list, handling APIv3's paging""" results = [] while url: print("fetching %s" % url, file=sys.stderr) f = urlopen(url) results.extend(json.load(f)) links = parse_link_header(f.headers) url = links.get('next') return results def get_issues(project="matplotlib/matplotlib", state="closed", pulls=False):
def _parse_datetime(s): """Parse dates in the format returned by the Github API.""" if s: return datetime.strptime(s, ISO8601) else: return datetime.fromtimestamp(0) def issues2dict(issues): """Convert a list of issues to a dict, keyed by issue number.""" idict = {} for i in issues: idict[i['number']] = i return idict def is_pull_request(issue): """Return True if the given issue is a pull request.""" return 'pull_request_url' in issue def issues_closed_since(period=timedelta(days=365), project="matplotlib/matplotlib", pulls=False): """Get all issues closed since a particular point in time. period can either be a datetime object, or a timedelta object. In the latter case, it is used as a time before the present.""" which = 'pulls' if pulls else 'issues' if isinstance(period, timedelta): period = datetime.now() - period url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE) allclosed = get_paged_request(url) # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period) filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period] # exclude rejected PRs if pulls: filtered = [ pr for pr in filtered if pr['merged_at'] ] return filtered def sorted_by_field(issues, field='closed_at', reverse=False): """Return a list of issues sorted by closing date date.""" return sorted(issues, key = lambda i:i[field], reverse=reverse) def report(issues, show_urls=False): """Summary report about a list of issues, printing number and title. """ # titles may have unicode in them, so we must encode everything below if show_urls: for i in issues: role = 'ghpull' if 'merged_at' in i else 'ghissue' print('* :%s:`%d`: %s' % (role, i['number'], i['title'].encode('utf-8'))) else: for i in issues: print('* %d: %s' % (i['number'], i['title'].encode('utf-8'))) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": # Whether to add reST urls for all issues in printout. show_urls = True # By default, search one month back tag = None if len(sys.argv) > 1: try: days = int(sys.argv[1]) except: tag = sys.argv[1] else: tag = check_output(['git', 'describe', '--abbrev=0']).strip() if tag: cmd = ['git', 'log', '-1', '--format=%ai', tag] tagday, tz = check_output(cmd).strip().rsplit(' ', 1) since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S") else: since = datetime.now() - timedelta(days=days) print("fetching GitHub stats since %s (tag: %s)" % (since, tag), file=sys.stderr) # turn off to play interactively without redownloading, use %run -i if 1: issues = issues_closed_since(since, pulls=False) pulls = issues_closed_since(since, pulls=True) # For regular reports, it's nice to show them in reverse chronological order issues = sorted_by_field(issues, reverse=True) pulls = sorted_by_field(pulls, reverse=True) n_issues, n_pulls = map(len, (issues, pulls)) n_total = n_issues + n_pulls # Print summary report we can directly include into release notes. print() since_day = since.strftime("%Y/%m/%d") today = datetime.today().strftime("%Y/%m/%d") print(".. _github-stats:") print('Github stats') print('============') print() print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag)) print() print("These lists are automatically generated, and may be incomplete or contain duplicates.") print() if tag: # print git info, in addition to GitHub info: since_tag = tag+'..' cmd = ['git', 'log', '--oneline', since_tag] ncommits = len(check_output(cmd).splitlines()) author_cmd = ['git', 'log', '--format=* %aN', since_tag] all_authors = check_output(author_cmd).splitlines() unique_authors = sorted(set(all_authors)) print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits)) print() print('\n'.join(unique_authors)) print() print() print("We closed a total of %d issues, %d pull requests and %d regular issues;\n" "this is the full list (generated with the script \n" ":file:`tools/github_stats.py`):" % (n_total, n_pulls, n_issues)) print() print('Pull Requests (%d):\n' % n_pulls) report(pulls, show_urls) print() print('Issues (%d):\n' % n_issues) report(issues, show_urls)
"""Get a list of the issues from the Github API.""" which = 'pulls' if pulls else 'issues' url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE) return get_paged_request(url)
identifier_body
itSystemService.ts
module Kitos.Services { "use strict"; interface ISystemRoleModel { Id: number; HasReadAccess: boolean; HasWriteAccess: boolean;
Description?: any; ObjectOwnerId: number; LastChanged: Date; LastChangedByUserId: number; } interface ISystemRightsModel { Id: number; UserId: number; RoleId: number; ObjectId: number; ObjectOwnerId: number; LastChanged: Date; LastChangedByUserId: number; } export class ItSystemService { public static $inject: string[] = ["$http"]; constructor(private $http: IHttpServiceWithCustomConfig) { } GetSystemById = (id: number) => { return this.$http.get<Models.ItSystem.IItSystem>(`odata/ItSystems(${id})`); } GetAllSystems = () => { return this.$http.get<Models.ItSystem.IItSystem>(`odata/ItSystems`); } GetSystemRoleById = (roleId: number) => { return this.$http.get<ISystemRoleModel>(`odata/ItSystemRoles(${roleId})`); } GetAllSystemRoles = () => { return this.$http.get<ISystemRoleModel>(`odata/ItSystemRoles`); } GetSystemRightsById = (id: number) => { return this.$http.get<ISystemRightsModel>(`odata/ItSystemRights?$filter=UserId eq (${id})`); } GetSystemDataById = (id: number) => { return this.$http.get(`odata/ItSystemRights?$expand=role,object&$filter=UserId eq (${id})`); } GetSystemDataByIdFiltered = (id: number, orgId: number) => { return this.$http .get(`odata/ItSystemRights?$expand=role($select=Name),object($select=Id;$expand=ItSystem($select=Id,Name))&$filter=Object/OrganizationId eq (${orgId}) AND UserId eq (${id})&$select=Id`); } } app.service("ItSystemService", ItSystemService); }
Name: string; IsActive: boolean;
random_line_split