file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
testidentifysourcefiles.py
import unittest import sys import os import errno import commands from xml.dom import minidom sys.path.append('bin') from umdinst import wrap from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring from testcapturecompile import programcheck def createemptyfile(fname): """Creates an empty file. Throws an exception if the file alrady exists""" if os.access(fname,os.R_OK): raise ValueError,"File already exists" f = open(fname,'w') f.close() class TestIdentifySourcefiles(unittest.TestCase): def setUp(self): # Create some source files createemptyfile("foo.c") createemptyfile("bar.cpp") createemptyfile("baz.upc") createemptyfile("quux.f77") self.args = "foo.c bar.cpp baz.upc quux.f77 others x.o y.exe -Dgoomba".split() self.argsdasho = "foo.c -o baz.upc bar.cpp".split() def testBasic(self): files = wrap.identify_sourcefiles(self.args) self.assertEquals(files, ['foo.c','bar.cpp','baz.upc','quux.f77']) def testWithDashO(self): files = wrap.identify_sourcefiles(self.argsdasho) self.assertEquals(files,['foo.c','bar.cpp']) def
(self): os.remove("foo.c") os.remove("bar.cpp") os.remove("baz.upc") os.remove("quux.f77") if __name__ == '__main__': unittest.main()
tearDown
identifier_name
testidentifysourcefiles.py
import unittest import sys import os import errno import commands from xml.dom import minidom sys.path.append('bin') from umdinst import wrap from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring from testcapturecompile import programcheck def createemptyfile(fname): """Creates an empty file. Throws an exception if the file alrady exists""" if os.access(fname,os.R_OK): raise ValueError,"File already exists" f = open(fname,'w') f.close()
def setUp(self): # Create some source files createemptyfile("foo.c") createemptyfile("bar.cpp") createemptyfile("baz.upc") createemptyfile("quux.f77") self.args = "foo.c bar.cpp baz.upc quux.f77 others x.o y.exe -Dgoomba".split() self.argsdasho = "foo.c -o baz.upc bar.cpp".split() def testBasic(self): files = wrap.identify_sourcefiles(self.args) self.assertEquals(files, ['foo.c','bar.cpp','baz.upc','quux.f77']) def testWithDashO(self): files = wrap.identify_sourcefiles(self.argsdasho) self.assertEquals(files,['foo.c','bar.cpp']) def tearDown(self): os.remove("foo.c") os.remove("bar.cpp") os.remove("baz.upc") os.remove("quux.f77") if __name__ == '__main__': unittest.main()
class TestIdentifySourcefiles(unittest.TestCase):
random_line_split
testidentifysourcefiles.py
import unittest import sys import os import errno import commands from xml.dom import minidom sys.path.append('bin') from umdinst import wrap from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring from testcapturecompile import programcheck def createemptyfile(fname): """Creates an empty file. Throws an exception if the file alrady exists""" if os.access(fname,os.R_OK): raise ValueError,"File already exists" f = open(fname,'w') f.close() class TestIdentifySourcefiles(unittest.TestCase): def setUp(self): # Create some source files
def testBasic(self): files = wrap.identify_sourcefiles(self.args) self.assertEquals(files, ['foo.c','bar.cpp','baz.upc','quux.f77']) def testWithDashO(self): files = wrap.identify_sourcefiles(self.argsdasho) self.assertEquals(files,['foo.c','bar.cpp']) def tearDown(self): os.remove("foo.c") os.remove("bar.cpp") os.remove("baz.upc") os.remove("quux.f77") if __name__ == '__main__': unittest.main()
createemptyfile("foo.c") createemptyfile("bar.cpp") createemptyfile("baz.upc") createemptyfile("quux.f77") self.args = "foo.c bar.cpp baz.upc quux.f77 others x.o y.exe -Dgoomba".split() self.argsdasho = "foo.c -o baz.upc bar.cpp".split()
identifier_body
testidentifysourcefiles.py
import unittest import sys import os import errno import commands from xml.dom import minidom sys.path.append('bin') from umdinst import wrap from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring from testcapturecompile import programcheck def createemptyfile(fname): """Creates an empty file. Throws an exception if the file alrady exists""" if os.access(fname,os.R_OK):
f = open(fname,'w') f.close() class TestIdentifySourcefiles(unittest.TestCase): def setUp(self): # Create some source files createemptyfile("foo.c") createemptyfile("bar.cpp") createemptyfile("baz.upc") createemptyfile("quux.f77") self.args = "foo.c bar.cpp baz.upc quux.f77 others x.o y.exe -Dgoomba".split() self.argsdasho = "foo.c -o baz.upc bar.cpp".split() def testBasic(self): files = wrap.identify_sourcefiles(self.args) self.assertEquals(files, ['foo.c','bar.cpp','baz.upc','quux.f77']) def testWithDashO(self): files = wrap.identify_sourcefiles(self.argsdasho) self.assertEquals(files,['foo.c','bar.cpp']) def tearDown(self): os.remove("foo.c") os.remove("bar.cpp") os.remove("baz.upc") os.remove("quux.f77") if __name__ == '__main__': unittest.main()
raise ValueError,"File already exists"
conditional_block
lib.rs
r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len() != shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 255) as usize]) } } #[inline] fn multiply_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut resultterms: Vec<u8> = vec![]; let mut termpadding: Vec<u8> = vec![]; for bterm in b { let mut thisvalue = termpadding.clone(); for aterm in a { thisvalue.push(SecretData::gf256_mul(*aterm, *bterm)); } resultterms = SecretData::add_polynomials(&resultterms, &thisvalue); termpadding.push(0); } resultterms } #[inline] fn add_polynomials(a: &[u8], b: &[u8]) -> Vec<u8>
} static GF256_EXP: [u8; 256] = [ 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31, 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd, 0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x
{ let mut a = a.to_owned(); let mut b = b.to_owned(); if a.len() < b.len() { let mut t = vec![0; b.len() - a.len()]; a.append(&mut t); } else if a.len() > b.len() { let mut t = vec![0; a.len() - b.len()]; b.append(&mut t); } let mut results: Vec<u8> = vec![]; for i in 0..a.len() { results.push(SecretData::gf256_add(a[i], b[i])); } results }
identifier_body
lib.rs
} #[test] fn it_can_recover_secret() { let s1 = vec![1, 184, 190, 251, 87, 232, 39, 47, 17, 4, 36, 190, 245]; let s2 = vec![2, 231, 107, 52, 138, 34, 221, 9, 221, 67, 79, 33, 16]; let s3 = vec![3, 23, 176, 163, 177, 165, 218, 113, 163, 53, 7, 251, 196]; let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello World!"); } #[test] fn it_can_recover_a_generated_secret() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("s1: {:?}", s1); let s2 = secret_data.get_share(2).unwrap(); println!("s2: {:?}", s2); let s3 = secret_data.get_share(3).unwrap(); println!("s3: {:?}", s3); let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello, world!"); } #[test] fn it_requires_enough_shares() { fn try_recover(n: u8, shares: &Vec<Vec<u8>>) -> Option<String> { let shares = shares.iter().take(n as usize).cloned().collect::<Vec<_>>(); SecretData::recover_secret(n, shares) } let secret_data = SecretData::with_secret("Hello World!", 5); let shares = vec![ secret_data.get_share(1).unwrap(), secret_data.get_share(2).unwrap(), secret_data.get_share(3).unwrap(), secret_data.get_share(4).unwrap(), secret_data.get_share(5).unwrap(), ]; let recovered = try_recover(5, &shares); assert!(recovered.is_some()); let recovered = try_recover(3, &shares); assert!(recovered.is_none()); } } pub struct SecretData { pub secret_data: Option<String>, pub coefficients: Vec<Vec<u8>>, } #[derive(Debug)] pub enum ShamirError { /// The number of shares must be between 1 and 255 InvalidShareCount, } impl SecretData { pub fn with_secret(secret: &str, threshold: u8) -> SecretData { let mut coefficients: Vec<Vec<u8>> = vec![]; let mut rng = thread_rng(); let mut rand_container = vec![0u8; (threshold - 1) as usize]; for c in secret.as_bytes() { rng.fill_bytes(&mut rand_container); let mut coef: Vec<u8> = vec![*c]; for r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len() != shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 25
assert!(secret_data.is_valid_share(&s1)); let s2 = secret_data.get_share(1).unwrap(); assert_eq!(s1, s2);
random_line_split
lib.rs
{ /// The number of shares must be between 1 and 255 InvalidShareCount, } impl SecretData { pub fn with_secret(secret: &str, threshold: u8) -> SecretData { let mut coefficients: Vec<Vec<u8>> = vec![]; let mut rng = thread_rng(); let mut rand_container = vec![0u8; (threshold - 1) as usize]; for c in secret.as_bytes() { rng.fill_bytes(&mut rand_container); let mut coef: Vec<u8> = vec![*c]; for r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len() != shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 255) as usize]) } } #[inline] fn multiply_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut resultterms: Vec<u8> = vec![]; let mut termpadding: Vec<u8> = vec![]; for bterm in b { let mut thisvalue = termpadding.clone(); for aterm in a { thisvalue.push(SecretData::gf256_mul(*aterm, *bterm)); } resultterms = SecretData::add_polynomials(&resultterms, &thisvalue); termpadding.push(0); } resultterms } #[inline] fn add_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut a = a.to_owned(); let mut b = b.to_owned(); if a.len() < b.len() { let mut t = vec![0; b.len() - a.len()]; a.append(&mut t); } else if a.len() > b.len() { let mut t = vec![0; a.len() - b.len()]; b.append(&mut t); } let mut results: Vec<u8> = vec![]; for i in 0..a.len() { results.push(SecretData::gf256_add(a[i], b[i])); } results } } static GF256_EXP: [u8; 256] = [ 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31, 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44,
ShamirError
identifier_name
microversion.py
copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Microversion handling.""" # NOTE(cdent): This code is taken from enamel: # https://github.com/jaypipes/enamel and was the original source of # the code now used in microversion_parse library. import collections import inspect import microversion_parse import webob # NOTE(cdent): avoid cyclical import conflict between util and # microversion import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) # The Canonical Version List VERSIONS = [ '1.0', '1.1', # initial support for aggregate.get_aggregates and set_aggregates '1.2', # Adds /resource_classes resource endpoint '1.3', # Adds 'member_of' query parameter to get resource providers # that are members of any of the listed aggregates '1.4', # Adds resources query string parameter in GET /resource_providers '1.5', # Adds DELETE /resource_providers/{uuid}/inventories '1.6', # Adds /traits and /resource_providers{uuid}/traits resource # endpoints '1.7', # PUT /resource_classes/{name} is bodiless create or update '1.8', # Adds 'project_id' and 'user_id' required request parameters to # PUT /allocations '1.9', # Adds GET /usages '1.10', # Adds GET /allocation_candidates resource endpoint ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): """Turn a version string into a Version :param version_string: A string of two numerals, X.Y, or 'latest' :returns: a Version :raises: TypeError """ if version_string == 'latest': version_string = max_version_string() try: # The combination of int and a limited split with the # named tuple means that this incantation will raise # ValueError or TypeError when the incoming data is # poorly formed but will, however, naturally adapt to # extraneous whitespace. return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None): """Utility to raise a http status code if the wanted microversion does not match. :param req: The HTTP request for the placement api :param status_code: HTTP status code (integer value) to be raised :param min_version: Minimum placement microversion level :param max_version: Maximum placement microversion level :returns: None :raises: HTTP status code if the specified microversion does not match :raises: KeyError if status_code is not a valid HTTP status code """ if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code] class MicroversionMiddleware(object): """WSGI middleware for getting microversion info.""" def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: # If there was an error in the application we still need # to send the microversion header, so add the header and # re-raise the exception. exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): """A namedtuple containing major and minor values. Since it is a tuple is automatically comparable. """ HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def
(headers): """Extract the microversion from Version.HEADER There may be multiple headers and some which don't match our service. """ found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) # We need a version that is in VERSION and within MIX and MAX. # This gives us the option to administratively disable a # version if we really need to. if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) # From twisted # https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py def _fully_qualified_name(obj): """Return the fully qualified name of a module, class, method or function. Classes and functions need to be module level ones to be correctly qualified. """ try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj): try: cls = obj.im_class except AttributeError: # Python 3 eliminates im_class, substitutes __module__ and # __qualname__ to provide similar information. return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name) return name def _find_method(f, version): """Look in VERSIONED_METHODS for method with right name matching version. If no match is found raise a 404. """ qualified_name = _fully_qualified_name(f) # A KeyError shouldn't be possible here, but let's be robust # just in case. method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): """Decorator for versioning API methods. Add as a decorator to a placement API handler to constrain the microversions at which it will run. Add after the ``wsgify`` decorator. This does not check for version intersections. That's the domain of tests. :param min_ver: A string of two numerals, X.Y indicating the minimum version allowed for the decorated method. :param min_ver: A string of two numerals, X.Y, indicating the maximum version allowed for the decorated method. """ def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f) VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f
extract_version
identifier_name
microversion.py
required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Microversion handling.""" # NOTE(cdent): This code is taken from enamel: # https://github.com/jaypipes/enamel and was the original source of # the code now used in microversion_parse library. import collections import inspect import microversion_parse import webob # NOTE(cdent): avoid cyclical import conflict between util and # microversion import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) # The Canonical Version List VERSIONS = [ '1.0', '1.1', # initial support for aggregate.get_aggregates and set_aggregates '1.2', # Adds /resource_classes resource endpoint '1.3', # Adds 'member_of' query parameter to get resource providers # that are members of any of the listed aggregates '1.4', # Adds resources query string parameter in GET /resource_providers '1.5', # Adds DELETE /resource_providers/{uuid}/inventories '1.6', # Adds /traits and /resource_providers{uuid}/traits resource # endpoints '1.7', # PUT /resource_classes/{name} is bodiless create or update '1.8', # Adds 'project_id' and 'user_id' required request parameters to # PUT /allocations '1.9', # Adds GET /usages '1.10', # Adds GET /allocation_candidates resource endpoint ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): """Turn a version string into a Version :param version_string: A string of two numerals, X.Y, or 'latest' :returns: a Version :raises: TypeError """ if version_string == 'latest': version_string = max_version_string() try: # The combination of int and a limited split with the # named tuple means that this incantation will raise # ValueError or TypeError when the incoming data is # poorly formed but will, however, naturally adapt to # extraneous whitespace. return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None): """Utility to raise a http status code if the wanted microversion does not match. :param req: The HTTP request for the placement api :param status_code: HTTP status code (integer value) to be raised :param min_version: Minimum placement microversion level :param max_version: Maximum placement microversion level :returns: None :raises: HTTP status code if the specified microversion does not match :raises: KeyError if status_code is not a valid HTTP status code """ if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code] class MicroversionMiddleware(object): """WSGI middleware for getting microversion info.""" def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: # If there was an error in the application we still need # to send the microversion header, so add the header and # re-raise the exception. exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): """A namedtuple containing major and minor values. Since it is a tuple is automatically comparable. """ HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def extract_version(headers): """Extract the microversion from Version.HEADER There may be multiple headers and some which don't match our service. """ found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) # We need a version that is in VERSION and within MIX and MAX. # This gives us the option to administratively disable a # version if we really need to. if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) # From twisted # https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py def _fully_qualified_name(obj): """Return the fully qualified name of a module, class, method or function. Classes and functions need to be module level ones to be correctly qualified. """ try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj): try: cls = obj.im_class except AttributeError: # Python 3 eliminates im_class, substitutes __module__ and # __qualname__ to provide similar information. return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name) return name def _find_method(f, version): """Look in VERSIONED_METHODS for method with right name matching version. If no match is found raise a 404. """ qualified_name = _fully_qualified_name(f) # A KeyError shouldn't be possible here, but let's be robust # just in case. method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): """Decorator for versioning API methods. Add as a decorator to a placement API handler to constrain the microversions at which it will run. Add after the ``wsgify`` decorator. This does not check for version intersections. That's the domain of tests. :param min_ver: A string of two numerals, X.Y indicating the minimum version allowed for the decorated method. :param min_ver: A string of two numerals, X.Y, indicating the maximum version allowed for the decorated method. """ def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f)
VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f)) def decorated_func(req, *args, **kwargs): version = req.environ[MICROVERSION_ENVIRON]
random_line_split
microversion.py
copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Microversion handling.""" # NOTE(cdent): This code is taken from enamel: # https://github.com/jaypipes/enamel and was the original source of # the code now used in microversion_parse library. import collections import inspect import microversion_parse import webob # NOTE(cdent): avoid cyclical import conflict between util and # microversion import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) # The Canonical Version List VERSIONS = [ '1.0', '1.1', # initial support for aggregate.get_aggregates and set_aggregates '1.2', # Adds /resource_classes resource endpoint '1.3', # Adds 'member_of' query parameter to get resource providers # that are members of any of the listed aggregates '1.4', # Adds resources query string parameter in GET /resource_providers '1.5', # Adds DELETE /resource_providers/{uuid}/inventories '1.6', # Adds /traits and /resource_providers{uuid}/traits resource # endpoints '1.7', # PUT /resource_classes/{name} is bodiless create or update '1.8', # Adds 'project_id' and 'user_id' required request parameters to # PUT /allocations '1.9', # Adds GET /usages '1.10', # Adds GET /allocation_candidates resource endpoint ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): """Turn a version string into a Version :param version_string: A string of two numerals, X.Y, or 'latest' :returns: a Version :raises: TypeError """ if version_string == 'latest': version_string = max_version_string() try: # The combination of int and a limited split with the # named tuple means that this incantation will raise # ValueError or TypeError when the incoming data is # poorly formed but will, however, naturally adapt to # extraneous whitespace. return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None): """Utility to raise a http status code if the wanted microversion does not match. :param req: The HTTP request for the placement api :param status_code: HTTP status code (integer value) to be raised :param min_version: Minimum placement microversion level :param max_version: Maximum placement microversion level :returns: None :raises: HTTP status code if the specified microversion does not match :raises: KeyError if status_code is not a valid HTTP status code """ if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code] class MicroversionMiddleware(object): """WSGI middleware for getting microversion info.""" def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: # If there was an error in the application we still need # to send the microversion header, so add the header and # re-raise the exception. exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): """A namedtuple containing major and minor values. Since it is a tuple is automatically comparable. """ HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def extract_version(headers): """Extract the microversion from Version.HEADER There may be multiple headers and some which don't match our service. """ found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) # We need a version that is in VERSION and within MIX and MAX. # This gives us the option to administratively disable a # version if we really need to. if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) # From twisted # https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py def _fully_qualified_name(obj): """Return the fully qualified name of a module, class, method or function. Classes and functions need to be module level ones to be correctly qualified. """ try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj):
return name def _find_method(f, version): """Look in VERSIONED_METHODS for method with right name matching version. If no match is found raise a 404. """ qualified_name = _fully_qualified_name(f) # A KeyError shouldn't be possible here, but let's be robust # just in case. method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): """Decorator for versioning API methods. Add as a decorator to a placement API handler to constrain the microversions at which it will run. Add after the ``wsgify`` decorator. This does not check for version intersections. That's the domain of tests. :param min_ver: A string of two numerals, X.Y indicating the minimum version allowed for the decorated method. :param min_ver: A string of two numerals, X.Y, indicating the maximum version allowed for the decorated method. """ def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f) VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f
try: cls = obj.im_class except AttributeError: # Python 3 eliminates im_class, substitutes __module__ and # __qualname__ to provide similar information. return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name)
conditional_block
microversion.py
copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Microversion handling.""" # NOTE(cdent): This code is taken from enamel: # https://github.com/jaypipes/enamel and was the original source of # the code now used in microversion_parse library. import collections import inspect import microversion_parse import webob # NOTE(cdent): avoid cyclical import conflict between util and # microversion import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) # The Canonical Version List VERSIONS = [ '1.0', '1.1', # initial support for aggregate.get_aggregates and set_aggregates '1.2', # Adds /resource_classes resource endpoint '1.3', # Adds 'member_of' query parameter to get resource providers # that are members of any of the listed aggregates '1.4', # Adds resources query string parameter in GET /resource_providers '1.5', # Adds DELETE /resource_providers/{uuid}/inventories '1.6', # Adds /traits and /resource_providers{uuid}/traits resource # endpoints '1.7', # PUT /resource_classes/{name} is bodiless create or update '1.8', # Adds 'project_id' and 'user_id' required request parameters to # PUT /allocations '1.9', # Adds GET /usages '1.10', # Adds GET /allocation_candidates resource endpoint ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): """Turn a version string into a Version :param version_string: A string of two numerals, X.Y, or 'latest' :returns: a Version :raises: TypeError """ if version_string == 'latest': version_string = max_version_string() try: # The combination of int and a limited split with the # named tuple means that this incantation will raise # ValueError or TypeError when the incoming data is # poorly formed but will, however, naturally adapt to # extraneous whitespace. return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None):
class MicroversionMiddleware(object): """WSGI middleware for getting microversion info.""" def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: # If there was an error in the application we still need # to send the microversion header, so add the header and # re-raise the exception. exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): """A namedtuple containing major and minor values. Since it is a tuple is automatically comparable. """ HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def extract_version(headers): """Extract the microversion from Version.HEADER There may be multiple headers and some which don't match our service. """ found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) # We need a version that is in VERSION and within MIX and MAX. # This gives us the option to administratively disable a # version if we really need to. if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) # From twisted # https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py def _fully_qualified_name(obj): """Return the fully qualified name of a module, class, method or function. Classes and functions need to be module level ones to be correctly qualified. """ try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj): try: cls = obj.im_class except AttributeError: # Python 3 eliminates im_class, substitutes __module__ and # __qualname__ to provide similar information. return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name) return name def _find_method(f, version): """Look in VERSIONED_METHODS for method with right name matching version. If no match is found raise a 404. """ qualified_name = _fully_qualified_name(f) # A KeyError shouldn't be possible here, but let's be robust # just in case. method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): """Decorator for versioning API methods. Add as a decorator to a placement API handler to constrain the microversions at which it will run. Add after the ``wsgify`` decorator. This does not check for version intersections. That's the domain of tests. :param min_ver: A string of two numerals, X.Y indicating the minimum version allowed for the decorated method. :param min_ver: A string of two numerals, X.Y, indicating the maximum version allowed for the decorated method. """ def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f) VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f
"""Utility to raise a http status code if the wanted microversion does not match. :param req: The HTTP request for the placement api :param status_code: HTTP status code (integer value) to be raised :param min_version: Minimum placement microversion level :param max_version: Maximum placement microversion level :returns: None :raises: HTTP status code if the specified microversion does not match :raises: KeyError if status_code is not a valid HTTP status code """ if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code]
identifier_body
build.js
(function() { 'use strict'; var getModulesList = function(modules) { return modules.map(function(moduleName) { return {name: moduleName}; }); };
return { namespace: 'RequireJS', /** * List the modules that will be optimized. All their immediate and deep * dependencies will be included in the module's file when the build is * done. */ modules: getModulesList([ 'course_bookmarks/js/course_bookmarks_factory', 'course_search/js/course_search_factory', 'course_search/js/dashboard_search_factory', 'discussion/js/discussion_board_factory', 'discussion/js/discussion_profile_page_factory', 'js/api_admin/catalog_preview_factory', 'js/courseware/courseware_factory', 'js/discovery/discovery_factory', 'js/edxnotes/views/notes_visibility_factory', 'js/edxnotes/views/page_factory', 'js/financial-assistance/financial_assistance_form_factory', 'js/groups/views/cohorts_dashboard_factory', 'js/discussions_management/views/discussions_dashboard_factory', 'js/header_factory', 'js/learner_dashboard/course_entitlement_factory', 'js/learner_dashboard/unenrollment_factory', 'js/learner_dashboard/entitlement_unenrollment_factory', 'js/learner_dashboard/program_details_factory', 'js/learner_dashboard/program_list_factory', 'js/student_account/logistration_factory', 'js/student_account/views/account_settings_factory', 'js/student_account/views/finish_auth_factory', 'js/views/message_banner', 'learner_profile/js/learner_profile_factory', 'lms/js/preview/preview_factory', 'support/js/certificates_factory', 'support/js/enrollment_factory', 'support/js/manage_user_factory', 'teams/js/teams_tab_factory', 'js/dateutil_factory' ]), /** * By default all the configuration for optimization happens from the command * line or by properties in the config file, and configuration that was * passed to requirejs as part of the app's runtime "main" JS file is *not* * considered. However, if you prefer the "main" JS file configuration * to be read for the build so that you do not have to duplicate the values * in a separate configuration, set this property to the location of that * main JS file. The first requirejs({}), require({}), requirejs.config({}), * or require.config({}) call found in that file will be used. * As of 2.1.10, mainConfigFile can be an array of values, with the last * value's config take precedence over previous values in the array. */ mainConfigFile: 'require-config.js', /** * Set paths for modules. If relative paths, set relative to baseUrl above. * If a special value of "empty:" is used for the path value, then that * acts like mapping the path to an empty file. It allows the optimizer to * resolve the dependency to path, but then does not include it in the output. * Useful to map module names that are to resources on a CDN or other * http: URL when running in the browser and during an optimization that * file should be skipped because it has no dependencies. */ paths: { gettext: 'empty:', 'coffee/src/ajax_prefix': 'empty:', jquery: 'empty:', 'jquery-migrate': 'empty:', 'jquery.cookie': 'empty:', 'jquery.url': 'empty:', backbone: 'empty:', underscore: 'empty:', 'underscore.string': 'empty:', logger: 'empty:', utility: 'empty:', URI: 'empty:', 'common/js/discussion/views/discussion_inline_view': 'empty:', modernizr: 'empty', 'which-country': 'empty', // Don't bundle UI Toolkit helpers as they are loaded into the "edx" namespace 'edx-ui-toolkit/js/utils/html-utils': 'empty:', 'edx-ui-toolkit/js/utils/string-utils': 'empty:' }, /** * Inline requireJS text templates. */ inlineText: true, /** * Stub out requireJS text in the optimized file, but leave available for non-optimized development use. */ stubModules: ['text'], /** * If shim config is used in the app during runtime, duplicate the config * here. Necessary if shim config is used, so that the shim's dependencies * are included in the build. Using "mainConfigFile" is a better way to * pass this information though, so that it is only listed in one place. * However, if mainConfigFile is not an option, the shim config can be * inlined in the build config. */ shim: {}, /** * Introduced in 2.1.2: If using "dir" for an output directory, normally the * optimize setting is used to optimize the build bundles (the "modules" * section of the config) and any other JS file in the directory. However, if * the non-build bundle JS files will not be loaded after a build, you can * skip the optimization of those files, to speed up builds. Set this value * to true if you want to skip optimizing those other non-build bundle JS * files. */ skipDirOptimize: true, /** * When the optimizer copies files from the source location to the * destination directory, it will skip directories and files that start * with a ".". If you want to copy .directories or certain .files, for * instance if you keep some packages in a .packages directory, or copy * over .htaccess files, you can set this to null. If you want to change * the exclusion rules, change it to a different regexp. If the regexp * matches, it means the directory will be excluded. This used to be * called dirExclusionRegExp before the 1.0.2 release. * As of 1.0.3, this value can also be a string that is converted to a * RegExp via new RegExp(). */ fileExclusionRegExp: /^\.|spec|spec_helpers/, /** * Allow CSS optimizations. Allowed values: * - "standard": @import inlining and removal of comments, unnecessary * whitespace and line returns. * Removing line returns may have problems in IE, depending on the type * of CSS. * - "standard.keepLines": like "standard" but keeps line returns. * - "none": skip CSS optimizations. * - "standard.keepComments": keeps the file comments, but removes line * returns. (r.js 1.0.8+) * - "standard.keepComments.keepLines": keeps the file comments and line * returns. (r.js 1.0.8+) * - "standard.keepWhitespace": like "standard" but keeps unnecessary whitespace. */ optimizeCss: 'none', /** * How to optimize all the JS files in the build output directory. * Right now only the following values are supported: * - "uglify": Uses UglifyJS to minify the code. * - "uglify2": Uses UglifyJS2. * - "closure": Uses Google's Closure Compiler in simple optimization * mode to minify the code. Only available if REQUIRE_ENVIRONMENT is "rhino" (the default). * - "none": No minification will be done. */ optimize: jsOptimize, /** * Sets the logging level. It is a number: * TRACE: 0, * INFO: 1, * WARN: 2, * ERROR: 3, * SILENT: 4 * Default is 0. */ logLevel: 1 }; }());
var jsOptimize = process.env.REQUIRE_BUILD_PROFILE_OPTIMIZE !== undefined ? process.env.REQUIRE_BUILD_PROFILE_OPTIMIZE : 'uglify2';
random_line_split
sitecustomize.py
import os import socket import sys input_host = '127.0.0.1' input_port = 65000 batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0')) if batch_enabled: # Since latest Python 2 has `builtins`and `input`, # we cannot detect Python 2 with the existence of them. if sys.version_info.major > 2: import builtins def _input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((input_host, input_port)) userdata = sock.recv(1024) except ConnectionRefusedError: userdata = b'<user-input-unavailable>' return userdata.decode() builtins._input = input # type: ignore builtins.input = _input
import __builtin__ builtins = __builtin__ def _raw_input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((input_host, input_port)) userdata = sock.recv(1024) except socket.error: userdata = b'<user-input-unavailable>' finally: sock.close() return userdata.decode() builtins._raw_input = builtins.raw_input # type: ignore builtins.raw_input = _raw_input # type: ignore
else: # __builtins__ is an alias dict for __builtin__ in modules other than __main__. # Thus, we have to explicitly import __builtin__ module in Python 2.
random_line_split
sitecustomize.py
import os import socket import sys input_host = '127.0.0.1' input_port = 65000 batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0')) if batch_enabled: # Since latest Python 2 has `builtins`and `input`, # we cannot detect Python 2 with the existence of them. if sys.version_info.major > 2: import builtins def _input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((input_host, input_port)) userdata = sock.recv(1024) except ConnectionRefusedError: userdata = b'<user-input-unavailable>' return userdata.decode() builtins._input = input # type: ignore builtins.input = _input else: # __builtins__ is an alias dict for __builtin__ in modules other than __main__. # Thus, we have to explicitly import __builtin__ module in Python 2. import __builtin__ builtins = __builtin__ def _raw_input(prompt=''):
builtins._raw_input = builtins.raw_input # type: ignore builtins.raw_input = _raw_input # type: ignore
sys.stdout.write(prompt) sys.stdout.flush() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((input_host, input_port)) userdata = sock.recv(1024) except socket.error: userdata = b'<user-input-unavailable>' finally: sock.close() return userdata.decode()
identifier_body
sitecustomize.py
import os import socket import sys input_host = '127.0.0.1' input_port = 65000 batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0')) if batch_enabled: # Since latest Python 2 has `builtins`and `input`, # we cannot detect Python 2 with the existence of them. if sys.version_info.major > 2: import builtins def
(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((input_host, input_port)) userdata = sock.recv(1024) except ConnectionRefusedError: userdata = b'<user-input-unavailable>' return userdata.decode() builtins._input = input # type: ignore builtins.input = _input else: # __builtins__ is an alias dict for __builtin__ in modules other than __main__. # Thus, we have to explicitly import __builtin__ module in Python 2. import __builtin__ builtins = __builtin__ def _raw_input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((input_host, input_port)) userdata = sock.recv(1024) except socket.error: userdata = b'<user-input-unavailable>' finally: sock.close() return userdata.decode() builtins._raw_input = builtins.raw_input # type: ignore builtins.raw_input = _raw_input # type: ignore
_input
identifier_name
sitecustomize.py
import os import socket import sys input_host = '127.0.0.1' input_port = 65000 batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0')) if batch_enabled: # Since latest Python 2 has `builtins`and `input`, # we cannot detect Python 2 with the existence of them. if sys.version_info.major > 2:
else: # __builtins__ is an alias dict for __builtin__ in modules other than __main__. # Thus, we have to explicitly import __builtin__ module in Python 2. import __builtin__ builtins = __builtin__ def _raw_input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((input_host, input_port)) userdata = sock.recv(1024) except socket.error: userdata = b'<user-input-unavailable>' finally: sock.close() return userdata.decode() builtins._raw_input = builtins.raw_input # type: ignore builtins.raw_input = _raw_input # type: ignore
import builtins def _input(prompt=''): sys.stdout.write(prompt) sys.stdout.flush() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((input_host, input_port)) userdata = sock.recv(1024) except ConnectionRefusedError: userdata = b'<user-input-unavailable>' return userdata.decode() builtins._input = input # type: ignore builtins.input = _input
conditional_block
index.d.ts
// Type definitions for karma-jasmine-spec-tags 1.2 // Project: https://github.com/mnasyrov/karma-jasmine-spec-tags#readme // Definitions by: Piotr Błażejewicz (Peter Blazejewicz) <https://github.com/peterblazejewicz> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped // TypeScript Version: 3.2 import 'karma'; // A plugin for karma-jasmine which helps to filter tests (specs) by tags. declare module 'karma' { /** * Default values can be configured using client map in Karma configuration */ interface ClientOptions { /** * defines a prefix for a tag name * @default '#' */ tagPrefix?: string; /** * defines a comma-separated list of tag names: * * if `names` is defined then specs which match to tags will be executed. * * If `names` is not defined then all specs with a tag will be executed.
tags?: string | string[] | boolean; /** * defines a comma-separated list of tag names * * If `names` is defined then specs which match to tags will be skipped. * * If `names` is not defined then all specs with a tag will be skipped. */ skipTags?: string | string[] | boolean; } }
*/
random_line_split
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while !is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64>
/// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
{ let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks }
identifier_body
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while !is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0),
(0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
random_line_split
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while !is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn
<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
inv_out_deg
identifier_name
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while !is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else
} inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
{ inv_out_deg.push(0.0); }
conditional_block
header-test.js
const { expect } = require('chai') const path = require('path') const ldnode = require('../../index') const supertest = require('supertest') const serverOptions = { root: path.join(__dirname, '../resources/headers'), multiuser: false, webid: true, sslKey: path.join(__dirname, '../keys/key.pem'), sslCert: path.join(__dirname, '../keys/cert.pem'), forceUser: 'https://ruben.verborgh.org/profile/#me' } describe('Header handler', () => { let request before(() => { const server = ldnode.createServer(serverOptions) request = supertest(server) }) describe('WAC-Allow', () => { describeHeaderTest('read/append for the public', { resource: '/public-ra', headers: { 'WAC-Allow': 'user="read append",public="read append"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write for the user, read for the public', { resource: '/user-rw-public-r', headers: { 'WAC-Allow': 'user="read write append",public="read"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write/append/control for the user, nothing for the public', { resource: '/user-rwac-public-0', headers: { 'WAC-Allow': 'user="read write append control",public=""', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) }) function describeHeaderTest (label, { resource, headers })
} })
{ describe(`a resource that is ${label}`, () => { let response before(() => request.get(resource).then(res => { response = res })) for (const header in headers) { const value = headers[header] const name = header.toLowerCase() if (value instanceof RegExp) { it(`has a ${header} header matching ${value}`, () => { expect(response.headers).to.have.property(name) expect(response.headers[name]).to.match(value) }) } else { it(`has a ${header} header of ${value}`, () => { expect(response.headers).to.have.property(name, value) }) } } })
identifier_body
header-test.js
const { expect } = require('chai') const path = require('path') const ldnode = require('../../index') const supertest = require('supertest') const serverOptions = { root: path.join(__dirname, '../resources/headers'), multiuser: false, webid: true, sslKey: path.join(__dirname, '../keys/key.pem'), sslCert: path.join(__dirname, '../keys/cert.pem'), forceUser: 'https://ruben.verborgh.org/profile/#me' } describe('Header handler', () => { let request before(() => { const server = ldnode.createServer(serverOptions) request = supertest(server) }) describe('WAC-Allow', () => { describeHeaderTest('read/append for the public', {
headers: { 'WAC-Allow': 'user="read append",public="read append"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write for the user, read for the public', { resource: '/user-rw-public-r', headers: { 'WAC-Allow': 'user="read write append",public="read"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write/append/control for the user, nothing for the public', { resource: '/user-rwac-public-0', headers: { 'WAC-Allow': 'user="read write append control",public=""', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) }) function describeHeaderTest (label, { resource, headers }) { describe(`a resource that is ${label}`, () => { let response before(() => request.get(resource).then(res => { response = res })) for (const header in headers) { const value = headers[header] const name = header.toLowerCase() if (value instanceof RegExp) { it(`has a ${header} header matching ${value}`, () => { expect(response.headers).to.have.property(name) expect(response.headers[name]).to.match(value) }) } else { it(`has a ${header} header of ${value}`, () => { expect(response.headers).to.have.property(name, value) }) } } }) } })
resource: '/public-ra',
random_line_split
header-test.js
const { expect } = require('chai') const path = require('path') const ldnode = require('../../index') const supertest = require('supertest') const serverOptions = { root: path.join(__dirname, '../resources/headers'), multiuser: false, webid: true, sslKey: path.join(__dirname, '../keys/key.pem'), sslCert: path.join(__dirname, '../keys/cert.pem'), forceUser: 'https://ruben.verborgh.org/profile/#me' } describe('Header handler', () => { let request before(() => { const server = ldnode.createServer(serverOptions) request = supertest(server) }) describe('WAC-Allow', () => { describeHeaderTest('read/append for the public', { resource: '/public-ra', headers: { 'WAC-Allow': 'user="read append",public="read append"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write for the user, read for the public', { resource: '/user-rw-public-r', headers: { 'WAC-Allow': 'user="read write append",public="read"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write/append/control for the user, nothing for the public', { resource: '/user-rwac-public-0', headers: { 'WAC-Allow': 'user="read write append control",public=""', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) }) function describeHeaderTest (label, { resource, headers }) { describe(`a resource that is ${label}`, () => { let response before(() => request.get(resource).then(res => { response = res })) for (const header in headers) { const value = headers[header] const name = header.toLowerCase() if (value instanceof RegExp)
else { it(`has a ${header} header of ${value}`, () => { expect(response.headers).to.have.property(name, value) }) } } }) } })
{ it(`has a ${header} header matching ${value}`, () => { expect(response.headers).to.have.property(name) expect(response.headers[name]).to.match(value) }) }
conditional_block
header-test.js
const { expect } = require('chai') const path = require('path') const ldnode = require('../../index') const supertest = require('supertest') const serverOptions = { root: path.join(__dirname, '../resources/headers'), multiuser: false, webid: true, sslKey: path.join(__dirname, '../keys/key.pem'), sslCert: path.join(__dirname, '../keys/cert.pem'), forceUser: 'https://ruben.verborgh.org/profile/#me' } describe('Header handler', () => { let request before(() => { const server = ldnode.createServer(serverOptions) request = supertest(server) }) describe('WAC-Allow', () => { describeHeaderTest('read/append for the public', { resource: '/public-ra', headers: { 'WAC-Allow': 'user="read append",public="read append"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write for the user, read for the public', { resource: '/user-rw-public-r', headers: { 'WAC-Allow': 'user="read write append",public="read"', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) describeHeaderTest('read/write/append/control for the user, nothing for the public', { resource: '/user-rwac-public-0', headers: { 'WAC-Allow': 'user="read write append control",public=""', 'Access-Control-Expose-Headers': /(^|,\s*)WAC-Allow(,|$)/ } }) }) function
(label, { resource, headers }) { describe(`a resource that is ${label}`, () => { let response before(() => request.get(resource).then(res => { response = res })) for (const header in headers) { const value = headers[header] const name = header.toLowerCase() if (value instanceof RegExp) { it(`has a ${header} header matching ${value}`, () => { expect(response.headers).to.have.property(name) expect(response.headers[name]).to.match(value) }) } else { it(`has a ${header} header of ${value}`, () => { expect(response.headers).to.have.property(name, value) }) } } }) } })
describeHeaderTest
identifier_name
errors.py
# -*- coding: utf-8 -*- # Copyright 2009-2013 Jaap Karssenberg <[email protected]> # The Error class needed to be put in a separate file to avoid recursive # imports. '''This module contains the base class for all errors in zim''' import sys import logging logger = logging.getLogger('zim') use_gtk_errordialog = False def set_use_gtk(use_gtk): '''Set whether or not L{show_error} and L{exception_handler} shold use the L{ErrorDialog} or not. @param use_gtk: set C{True} for interactive gui, C{False} for terminal mode ''' global use_gtk_errordialog use_gtk_errordialog = use_gtk def get_error_msg(error): '''Returns the message to show for an error @param error: error object or string @returns: 2-tuple of: message string and a boolean whether a traceback should be shown or not ''' if isinstance(error, Error): # An "expected" error return error.msg, False elif isinstance(error, EnvironmentError): # Normal error, e.g. OSError or IOError msg = error.strerror if hasattr(error, 'filename') and error.filename: msg += ': ' + error.filename return msg, False else: # An unexpected error, all other Exception's msg = _('Looks like you found a bug') # T: generic error dialog return msg, True def log_error(error, debug=None): '''Log error and traceback @param error: error as understood by L{get_error_msg()} @param debug: optional debug message, defaults to the error itself ''' msg, show_trace = get_error_msg(error) if debug is None: debug = msg if show_trace: # unexpected error - will be logged with traceback logger.exception(debug) else: # expected error - log trace to debug logger.debug(debug, exc_info=1) logger.error(msg) def _run_error_dialog(error): #~ try: from zim.gui.widgets import ErrorDialog ErrorDialog(None, error, do_logging=False).run() #~ except: #~ logger.error('Failed to run error dialog') def show_error(error): '''Show an error by calling L{log_error()} and when running interactive also calling L{ErrorDialog}. @param error: the error object ''' log_error(error) if use_gtk_errordialog: _run_error_dialog(error) def exception_handler(debug): '''Like C{show_error()} but with debug message instead of the actual error. Intended to be used in C{except} blocks as a catch-all for both intended and unintended errors. @param debug: debug message for logging ''' # We use debug as log message, rather than the error itself # the error itself shows up in the traceback anyway
error = exc_info[1] del exc_info # recommended by manual log_error(error, debug=debug) if use_gtk_errordialog: _run_error_dialog(error) class Error(Exception): '''Base class for all errors in zim. This class is intended for application and usage errors, these will be caught in the user interface and presented as error dialogs. In contrast and Exception that does I{not} derive from this base class will result in a "You found a bug" dialog. Do not use this class e.g. to catch programming errors. Subclasses should define two attributes. The first is 'msg', which is the short description of the error. Typically this gives the specific input / page / ... which caused the error. In there should be an attribute 'description' (either as class attribute or object attribute) with a verbose description. This description can be less specific but should explain the error in a user friendly way. The default behavior is to take 'msg' as the single argument for the constructor. So a minimal subclass only needs to define a class attribute 'description'. For a typical error dialog in the Gtk interface the short string from 'msg' will be shown as the title in bold letters while the longer 'description' is shown below it in normal letters. As a guideline error classes that are used in the gui or that can be e.g. be raised on invalid input from the user should be translated. ''' description = '' msg = '<Unknown Error>' # in case subclass does not define instance attribute def __init__(self, msg, description=None): self.msg = msg if description: self.description = description # else use class attribute def __str__(self): msg = self.__unicode__() return msg.encode('utf-8') def __unicode__(self): msg = u'' + self.msg.strip() if self.description: msg += '\n\n' + self.description.strip() + '\n' return msg def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.msg) # Defined here because these errors are not specific to files, but can # occur in different storage models as well class TrashNotSupportedError(Error): '''Error raised when trashing is not supported and delete should be used instead ''' pass class TrashCancelledError(Error): '''Error raised when a trashign operation is cancelled. (E.g. on windows the system will prompt the user with a confirmation dialog which has a Cancel button.) ''' pass
exc_info = sys.exc_info()
random_line_split
errors.py
# -*- coding: utf-8 -*- # Copyright 2009-2013 Jaap Karssenberg <[email protected]> # The Error class needed to be put in a separate file to avoid recursive # imports. '''This module contains the base class for all errors in zim''' import sys import logging logger = logging.getLogger('zim') use_gtk_errordialog = False def set_use_gtk(use_gtk): '''Set whether or not L{show_error} and L{exception_handler} shold use the L{ErrorDialog} or not. @param use_gtk: set C{True} for interactive gui, C{False} for terminal mode ''' global use_gtk_errordialog use_gtk_errordialog = use_gtk def get_error_msg(error): '''Returns the message to show for an error @param error: error object or string @returns: 2-tuple of: message string and a boolean whether a traceback should be shown or not ''' if isinstance(error, Error): # An "expected" error return error.msg, False elif isinstance(error, EnvironmentError): # Normal error, e.g. OSError or IOError msg = error.strerror if hasattr(error, 'filename') and error.filename: msg += ': ' + error.filename return msg, False else: # An unexpected error, all other Exception's msg = _('Looks like you found a bug') # T: generic error dialog return msg, True def log_error(error, debug=None): '''Log error and traceback @param error: error as understood by L{get_error_msg()} @param debug: optional debug message, defaults to the error itself ''' msg, show_trace = get_error_msg(error) if debug is None: debug = msg if show_trace: # unexpected error - will be logged with traceback logger.exception(debug) else: # expected error - log trace to debug logger.debug(debug, exc_info=1) logger.error(msg) def _run_error_dialog(error): #~ try: from zim.gui.widgets import ErrorDialog ErrorDialog(None, error, do_logging=False).run() #~ except: #~ logger.error('Failed to run error dialog') def show_error(error): '''Show an error by calling L{log_error()} and when running interactive also calling L{ErrorDialog}. @param error: the error object ''' log_error(error) if use_gtk_errordialog: _run_error_dialog(error) def exception_handler(debug): '''Like C{show_error()} but with debug message instead of the actual error. Intended to be used in C{except} blocks as a catch-all for both intended and unintended errors. @param debug: debug message for logging ''' # We use debug as log message, rather than the error itself # the error itself shows up in the traceback anyway exc_info = sys.exc_info() error = exc_info[1] del exc_info # recommended by manual log_error(error, debug=debug) if use_gtk_errordialog: _run_error_dialog(error) class Error(Exception): '''Base class for all errors in zim. This class is intended for application and usage errors, these will be caught in the user interface and presented as error dialogs. In contrast and Exception that does I{not} derive from this base class will result in a "You found a bug" dialog. Do not use this class e.g. to catch programming errors. Subclasses should define two attributes. The first is 'msg', which is the short description of the error. Typically this gives the specific input / page / ... which caused the error. In there should be an attribute 'description' (either as class attribute or object attribute) with a verbose description. This description can be less specific but should explain the error in a user friendly way. The default behavior is to take 'msg' as the single argument for the constructor. So a minimal subclass only needs to define a class attribute 'description'. For a typical error dialog in the Gtk interface the short string from 'msg' will be shown as the title in bold letters while the longer 'description' is shown below it in normal letters. As a guideline error classes that are used in the gui or that can be e.g. be raised on invalid input from the user should be translated. ''' description = '' msg = '<Unknown Error>' # in case subclass does not define instance attribute def
(self, msg, description=None): self.msg = msg if description: self.description = description # else use class attribute def __str__(self): msg = self.__unicode__() return msg.encode('utf-8') def __unicode__(self): msg = u'' + self.msg.strip() if self.description: msg += '\n\n' + self.description.strip() + '\n' return msg def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.msg) # Defined here because these errors are not specific to files, but can # occur in different storage models as well class TrashNotSupportedError(Error): '''Error raised when trashing is not supported and delete should be used instead ''' pass class TrashCancelledError(Error): '''Error raised when a trashign operation is cancelled. (E.g. on windows the system will prompt the user with a confirmation dialog which has a Cancel button.) ''' pass
__init__
identifier_name
errors.py
# -*- coding: utf-8 -*- # Copyright 2009-2013 Jaap Karssenberg <[email protected]> # The Error class needed to be put in a separate file to avoid recursive # imports. '''This module contains the base class for all errors in zim''' import sys import logging logger = logging.getLogger('zim') use_gtk_errordialog = False def set_use_gtk(use_gtk): '''Set whether or not L{show_error} and L{exception_handler} shold use the L{ErrorDialog} or not. @param use_gtk: set C{True} for interactive gui, C{False} for terminal mode ''' global use_gtk_errordialog use_gtk_errordialog = use_gtk def get_error_msg(error): '''Returns the message to show for an error @param error: error object or string @returns: 2-tuple of: message string and a boolean whether a traceback should be shown or not ''' if isinstance(error, Error): # An "expected" error return error.msg, False elif isinstance(error, EnvironmentError): # Normal error, e.g. OSError or IOError msg = error.strerror if hasattr(error, 'filename') and error.filename: msg += ': ' + error.filename return msg, False else: # An unexpected error, all other Exception's msg = _('Looks like you found a bug') # T: generic error dialog return msg, True def log_error(error, debug=None):
def _run_error_dialog(error): #~ try: from zim.gui.widgets import ErrorDialog ErrorDialog(None, error, do_logging=False).run() #~ except: #~ logger.error('Failed to run error dialog') def show_error(error): '''Show an error by calling L{log_error()} and when running interactive also calling L{ErrorDialog}. @param error: the error object ''' log_error(error) if use_gtk_errordialog: _run_error_dialog(error) def exception_handler(debug): '''Like C{show_error()} but with debug message instead of the actual error. Intended to be used in C{except} blocks as a catch-all for both intended and unintended errors. @param debug: debug message for logging ''' # We use debug as log message, rather than the error itself # the error itself shows up in the traceback anyway exc_info = sys.exc_info() error = exc_info[1] del exc_info # recommended by manual log_error(error, debug=debug) if use_gtk_errordialog: _run_error_dialog(error) class Error(Exception): '''Base class for all errors in zim. This class is intended for application and usage errors, these will be caught in the user interface and presented as error dialogs. In contrast and Exception that does I{not} derive from this base class will result in a "You found a bug" dialog. Do not use this class e.g. to catch programming errors. Subclasses should define two attributes. The first is 'msg', which is the short description of the error. Typically this gives the specific input / page / ... which caused the error. In there should be an attribute 'description' (either as class attribute or object attribute) with a verbose description. This description can be less specific but should explain the error in a user friendly way. The default behavior is to take 'msg' as the single argument for the constructor. So a minimal subclass only needs to define a class attribute 'description'. For a typical error dialog in the Gtk interface the short string from 'msg' will be shown as the title in bold letters while the longer 'description' is shown below it in normal letters. As a guideline error classes that are used in the gui or that can be e.g. be raised on invalid input from the user should be translated. ''' description = '' msg = '<Unknown Error>' # in case subclass does not define instance attribute def __init__(self, msg, description=None): self.msg = msg if description: self.description = description # else use class attribute def __str__(self): msg = self.__unicode__() return msg.encode('utf-8') def __unicode__(self): msg = u'' + self.msg.strip() if self.description: msg += '\n\n' + self.description.strip() + '\n' return msg def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.msg) # Defined here because these errors are not specific to files, but can # occur in different storage models as well class TrashNotSupportedError(Error): '''Error raised when trashing is not supported and delete should be used instead ''' pass class TrashCancelledError(Error): '''Error raised when a trashign operation is cancelled. (E.g. on windows the system will prompt the user with a confirmation dialog which has a Cancel button.) ''' pass
'''Log error and traceback @param error: error as understood by L{get_error_msg()} @param debug: optional debug message, defaults to the error itself ''' msg, show_trace = get_error_msg(error) if debug is None: debug = msg if show_trace: # unexpected error - will be logged with traceback logger.exception(debug) else: # expected error - log trace to debug logger.debug(debug, exc_info=1) logger.error(msg)
identifier_body
errors.py
# -*- coding: utf-8 -*- # Copyright 2009-2013 Jaap Karssenberg <[email protected]> # The Error class needed to be put in a separate file to avoid recursive # imports. '''This module contains the base class for all errors in zim''' import sys import logging logger = logging.getLogger('zim') use_gtk_errordialog = False def set_use_gtk(use_gtk): '''Set whether or not L{show_error} and L{exception_handler} shold use the L{ErrorDialog} or not. @param use_gtk: set C{True} for interactive gui, C{False} for terminal mode ''' global use_gtk_errordialog use_gtk_errordialog = use_gtk def get_error_msg(error): '''Returns the message to show for an error @param error: error object or string @returns: 2-tuple of: message string and a boolean whether a traceback should be shown or not ''' if isinstance(error, Error): # An "expected" error
elif isinstance(error, EnvironmentError): # Normal error, e.g. OSError or IOError msg = error.strerror if hasattr(error, 'filename') and error.filename: msg += ': ' + error.filename return msg, False else: # An unexpected error, all other Exception's msg = _('Looks like you found a bug') # T: generic error dialog return msg, True def log_error(error, debug=None): '''Log error and traceback @param error: error as understood by L{get_error_msg()} @param debug: optional debug message, defaults to the error itself ''' msg, show_trace = get_error_msg(error) if debug is None: debug = msg if show_trace: # unexpected error - will be logged with traceback logger.exception(debug) else: # expected error - log trace to debug logger.debug(debug, exc_info=1) logger.error(msg) def _run_error_dialog(error): #~ try: from zim.gui.widgets import ErrorDialog ErrorDialog(None, error, do_logging=False).run() #~ except: #~ logger.error('Failed to run error dialog') def show_error(error): '''Show an error by calling L{log_error()} and when running interactive also calling L{ErrorDialog}. @param error: the error object ''' log_error(error) if use_gtk_errordialog: _run_error_dialog(error) def exception_handler(debug): '''Like C{show_error()} but with debug message instead of the actual error. Intended to be used in C{except} blocks as a catch-all for both intended and unintended errors. @param debug: debug message for logging ''' # We use debug as log message, rather than the error itself # the error itself shows up in the traceback anyway exc_info = sys.exc_info() error = exc_info[1] del exc_info # recommended by manual log_error(error, debug=debug) if use_gtk_errordialog: _run_error_dialog(error) class Error(Exception): '''Base class for all errors in zim. This class is intended for application and usage errors, these will be caught in the user interface and presented as error dialogs. In contrast and Exception that does I{not} derive from this base class will result in a "You found a bug" dialog. Do not use this class e.g. to catch programming errors. Subclasses should define two attributes. The first is 'msg', which is the short description of the error. Typically this gives the specific input / page / ... which caused the error. In there should be an attribute 'description' (either as class attribute or object attribute) with a verbose description. This description can be less specific but should explain the error in a user friendly way. The default behavior is to take 'msg' as the single argument for the constructor. So a minimal subclass only needs to define a class attribute 'description'. For a typical error dialog in the Gtk interface the short string from 'msg' will be shown as the title in bold letters while the longer 'description' is shown below it in normal letters. As a guideline error classes that are used in the gui or that can be e.g. be raised on invalid input from the user should be translated. ''' description = '' msg = '<Unknown Error>' # in case subclass does not define instance attribute def __init__(self, msg, description=None): self.msg = msg if description: self.description = description # else use class attribute def __str__(self): msg = self.__unicode__() return msg.encode('utf-8') def __unicode__(self): msg = u'' + self.msg.strip() if self.description: msg += '\n\n' + self.description.strip() + '\n' return msg def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.msg) # Defined here because these errors are not specific to files, but can # occur in different storage models as well class TrashNotSupportedError(Error): '''Error raised when trashing is not supported and delete should be used instead ''' pass class TrashCancelledError(Error): '''Error raised when a trashign operation is cancelled. (E.g. on windows the system will prompt the user with a confirmation dialog which has a Cancel button.) ''' pass
return error.msg, False
conditional_block
results_fetcher.py
Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections import logging import json import re import six.moves.urllib.request import six.moves.urllib.parse import six.moves.urllib.error from blinkpy.common.memoized import memoized from blinkpy.common.net.web import Web from blinkpy.common.net.web_test_results import WebTestResults from blinkpy.common.system.filesystem import FileSystem from blinkpy.web_tests.builder_list import BuilderList from blinkpy.web_tests.layout_package import json_results_generator _log = logging.getLogger(__name__) TEST_RESULTS_SERVER = 'https://test-results.appspot.com' RESULTS_URL_BASE = '%s/data/layout_results' % TEST_RESULTS_SERVER RESULTS_SUMMARY_URL_BASE = 'https://storage.googleapis.com/chromium-layout-test-archives' class Build(collections.namedtuple('Build', ('builder_name', 'build_number', 'build_id'))): """Represents a combination of builder and build number. If build number is None, this represents the latest build for a given builder. """ def __new__(cls, builder_name, build_number=None, build_id=None): return super(Build, cls).__new__(cls, builder_name, build_number, build_id) class TestResultsFetcher(object): """This class represents an interface to test results for particular builds. This includes fetching web test results from Google Storage; for more information about the web test result format, see: https://www.chromium.org/developers/the-json-test-results-format """ def
(self): self.web = Web() self.builders = BuilderList.load_default_builder_list(FileSystem()) def results_url(self, builder_name, build_number=None, step_name=None): """Returns a URL for one set of archived web test results. If a build number is given, this will be results for a particular run; otherwise it will be the accumulated results URL, which should have the latest results. """ if build_number: assert str(build_number).isdigit(), \ 'expected numeric build number, got %s' % build_number url_base = self.builder_results_url_base(builder_name) if step_name is None: step_name = self.get_layout_test_step_name( Build(builder_name, build_number)) if step_name: return '%s/%s/%s/layout-test-results' % ( url_base, build_number, six.moves.urllib.parse.quote(step_name)) return '%s/%s/layout-test-results' % (url_base, build_number) return self.accumulated_results_url_base(builder_name) def get_full_builder_url(self, url_base, builder_name): """ Returns the url for a builder directory in google storage. Each builder has a directory in the GS bucket, and the directory name is the builder name transformed to be more URL-friendly by replacing all spaces, periods and parentheses with underscores. """ return '%s/%s' % (url_base, re.sub('[ .()]', '_', builder_name)) def builder_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. """ return self.get_full_builder_url(RESULTS_URL_BASE, builder_name) def builder_retry_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. This is used for fetching the retry data which is now contained in test_results_summary.json which cannot be fetched from https://test-results.appspot.com anymore. Migrating this tool to use resultDB is the ideal solution. """ return self.get_full_builder_url(RESULTS_SUMMARY_URL_BASE, builder_name) @memoized def fetch_retry_summary_json(self, build): """Fetches and returns the text of the archived test_results_summary.json file. This file is expected to contain the results of retrying web tests with and without a patch in a try job. It includes lists of tests that failed only with the patch ("failures"), and tests that failed both with and without ("ignored"). """ url_base = '%s/%s' % (self.builder_retry_results_url_base( build.builder_name), build.build_number) # NOTE(crbug.com/1082907): We used to fetch retry_with_patch_summary.json from # test-results.appspot.com. The file has been renamed and can no longer be # accessed via test-results, so we download it from GCS directly. # There is still a bug in uploading this json file for other platforms than linux. # see https://crbug.com/1157202 return self.web.get_binary('%s/%s' % (url_base, 'test_results_summary.json'), return_none_on_404=True) def accumulated_results_url_base(self, builder_name): return self.builder_results_url_base( builder_name) + '/results/layout-test-results' @memoized def fetch_results(self, build, full=False, step_name=None): """Returns a WebTestResults object for results from a given Build. Uses full_results.json if full is True, otherwise failing_results.json. """ if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None step_name = step_name or self.get_layout_test_step_name(build) return self.fetch_web_test_results( self.results_url( build.builder_name, build.build_number, step_name=step_name), full, step_name) @memoized def get_layout_test_step_name(self, build): if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None # We were not able to retrieve step name for some builders from # https://test-results.appspot.com. Read from config file instead step_name = self.builders.step_name_for_builder(build.builder_name) if step_name: return step_name url = '%s/testfile?%s' % ( TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode([ ('buildnumber', build.build_number), # This forces the server to gives us JSON rather than an HTML page. ('callback', json_results_generator.JSON_CALLBACK), ('builder', build.builder_name), ('name', 'full_results.json') ])) data = self.web.get_binary(url, return_none_on_404=True) if not data: _log.debug('Got 404 response from:\n%s', url) return None # Strip out the callback data = json.loads(json_results_generator.strip_json_wrapper(data)) suites = [ entry['TestType'] for entry in data # Some suite names are like 'blink_web_tests on Intel GPU (with # patch)'. Only make sure it starts with blink_web_tests and # runs with a patch. This should be changed eventually to use actual # structured data from the test results server. if re.match( r'(blink_web_tests|wpt_tests_suite|high_dpi_blink_web_tests).*\(with patch\)$', entry['TestType']) ] # In manual testing, I sometimes saw results where the same suite was # repeated twice. De-duplicate here to try to catch this. suites = list(set(suites)) if len(suites) != 1: raise Exception( 'build %s on builder %s expected to only have one web test ' 'step, instead has %s' % (build.build_number, build.builder_name, suites)) return suites[0] @memoized def fetch_web_test_results(self, results_url, full=False, step_name=None): """Returns a WebTestResults object for results fetched from a given URL. Uses full_results.json if full is True, otherwise failing_results.json. """ base_filename = 'full_results.json' if full else 'failing_results.json' results_file = self.web.get_binary( '%s/%s' % (results_url, base_filename), return_none_on_404=True) if results_file is None: _log.debug('Got 404 response from:\n%s/%s', results_url, base_filename) return None return WebTestResults.results_from_string(results_file, step_name) def fetch_webdriver_test_results(self, build, master): if not build.builder_name or not build.build_number or not master: _log.debug('Builder name or build number or master is None') return None url = '%s/testfile?%s' % (TEST_RESULTS
__init__
identifier_name
results_fetcher.py
FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections import logging import json import re import six.moves.urllib.request import six.moves.urllib.parse import six.moves.urllib.error from blinkpy.common.memoized import memoized from blinkpy.common.net.web import Web from blinkpy.common.net.web_test_results import WebTestResults from blinkpy.common.system.filesystem import FileSystem from blinkpy.web_tests.builder_list import BuilderList from blinkpy.web_tests.layout_package import json_results_generator _log = logging.getLogger(__name__) TEST_RESULTS_SERVER = 'https://test-results.appspot.com' RESULTS_URL_BASE = '%s/data/layout_results' % TEST_RESULTS_SERVER RESULTS_SUMMARY_URL_BASE = 'https://storage.googleapis.com/chromium-layout-test-archives' class Build(collections.namedtuple('Build', ('builder_name', 'build_number', 'build_id'))): """Represents a combination of builder and build number. If build number is None, this represents the latest build for a given builder. """ def __new__(cls, builder_name, build_number=None, build_id=None): return super(Build, cls).__new__(cls, builder_name, build_number, build_id) class TestResultsFetcher(object): """This class represents an interface to test results for particular builds. This includes fetching web test results from Google Storage; for more information about the web test result format, see: https://www.chromium.org/developers/the-json-test-results-format """ def __init__(self): self.web = Web() self.builders = BuilderList.load_default_builder_list(FileSystem()) def results_url(self, builder_name, build_number=None, step_name=None): """Returns a URL for one set of archived web test results. If a build number is given, this will be results for a particular run; otherwise it will be the accumulated results URL, which should have the latest results. """ if build_number: assert str(build_number).isdigit(), \ 'expected numeric build number, got %s' % build_number url_base = self.builder_results_url_base(builder_name) if step_name is None: step_name = self.get_layout_test_step_name( Build(builder_name, build_number)) if step_name: return '%s/%s/%s/layout-test-results' % ( url_base, build_number, six.moves.urllib.parse.quote(step_name)) return '%s/%s/layout-test-results' % (url_base, build_number) return self.accumulated_results_url_base(builder_name) def get_full_builder_url(self, url_base, builder_name): """ Returns the url for a builder directory in google storage. Each builder has a directory in the GS bucket, and the directory name is the builder name transformed to be more URL-friendly by replacing all spaces, periods and parentheses with underscores. """ return '%s/%s' % (url_base, re.sub('[ .()]', '_', builder_name)) def builder_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. """ return self.get_full_builder_url(RESULTS_URL_BASE, builder_name) def builder_retry_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. This is used for fetching the retry data which is now contained in test_results_summary.json which cannot be fetched from https://test-results.appspot.com anymore. Migrating this tool to use resultDB is the ideal solution. """ return self.get_full_builder_url(RESULTS_SUMMARY_URL_BASE, builder_name) @memoized def fetch_retry_summary_json(self, build): """Fetches and returns the text of the archived test_results_summary.json file. This file is expected to contain the results of retrying web tests with and without a patch in a try job. It includes lists of tests that failed only with the patch ("failures"), and tests that failed both with and without ("ignored"). """ url_base = '%s/%s' % (self.builder_retry_results_url_base( build.builder_name), build.build_number) # NOTE(crbug.com/1082907): We used to fetch retry_with_patch_summary.json from # test-results.appspot.com. The file has been renamed and can no longer be # accessed via test-results, so we download it from GCS directly. # There is still a bug in uploading this json file for other platforms than linux. # see https://crbug.com/1157202 return self.web.get_binary('%s/%s' % (url_base, 'test_results_summary.json'), return_none_on_404=True) def accumulated_results_url_base(self, builder_name): return self.builder_results_url_base( builder_name) + '/results/layout-test-results' @memoized def fetch_results(self, build, full=False, step_name=None): """Returns a WebTestResults object for results from a given Build. Uses full_results.json if full is True, otherwise failing_results.json. """ if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None step_name = step_name or self.get_layout_test_step_name(build) return self.fetch_web_test_results( self.results_url( build.builder_name, build.build_number, step_name=step_name), full, step_name) @memoized def get_layout_test_step_name(self, build): if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None # We were not able to retrieve step name for some builders from # https://test-results.appspot.com. Read from config file instead step_name = self.builders.step_name_for_builder(build.builder_name) if step_name: return step_name url = '%s/testfile?%s' % ( TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode([ ('buildnumber', build.build_number), # This forces the server to gives us JSON rather than an HTML page. ('callback', json_results_generator.JSON_CALLBACK), ('builder', build.builder_name), ('name', 'full_results.json') ])) data = self.web.get_binary(url, return_none_on_404=True) if not data: _log.debug('Got 404 response from:\n%s', url) return None # Strip out the callback data = json.loads(json_results_generator.strip_json_wrapper(data)) suites = [ entry['TestType'] for entry in data # Some suite names are like 'blink_web_tests on Intel GPU (with # patch)'. Only make sure it starts with blink_web_tests and # runs with a patch. This should be changed eventually to use actual # structured data from the test results server. if re.match( r'(blink_web_tests|wpt_tests_suite|high_dpi_blink_web_tests).*\(with patch\)$', entry['TestType']) ] # In manual testing, I sometimes saw results where the same suite was # repeated twice. De-duplicate here to try to catch this. suites = list(set(suites)) if len(suites) != 1: raise Exception( 'build %s on builder %s expected to only have one web test ' 'step, instead has %s' % (build.build_number, build.builder_name, suites)) return suites[0] @memoized def fetch_web_test_results(self, results_url, full=False, step_name=None): """Returns a WebTestResults object for results fetched from a given URL. Uses full_results.json if full is True, otherwise failing_results.json. """ base_filename = 'full_results.json' if full else 'failing_results.json' results_file = self.web.get_binary( '%s/%s' % (results_url, base_filename), return_none_on_404=True) if results_file is None: _log.debug('Got 404 response from:\n%s/%s', results_url, base_filename) return None return WebTestResults.results_from_string(results_file, step_name) def fetch_webdriver_test_results(self, build, master): if not build.builder_name or not build.build_number or not master: _log.debug('Builder name or build number or master is None') return None url = '%s/testfile?%s' % (TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode( [('buildnumber', build.build_number), ('master', master), ('builder', build.builder_name), ('testtype', 'webdriver_tests_suite (with patch)'),
('name', 'full_results.json')])) data = self.web.get_binary(url, return_none_on_404=True)
random_line_split
results_fetcher.py
Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections import logging import json import re import six.moves.urllib.request import six.moves.urllib.parse import six.moves.urllib.error from blinkpy.common.memoized import memoized from blinkpy.common.net.web import Web from blinkpy.common.net.web_test_results import WebTestResults from blinkpy.common.system.filesystem import FileSystem from blinkpy.web_tests.builder_list import BuilderList from blinkpy.web_tests.layout_package import json_results_generator _log = logging.getLogger(__name__) TEST_RESULTS_SERVER = 'https://test-results.appspot.com' RESULTS_URL_BASE = '%s/data/layout_results' % TEST_RESULTS_SERVER RESULTS_SUMMARY_URL_BASE = 'https://storage.googleapis.com/chromium-layout-test-archives' class Build(collections.namedtuple('Build', ('builder_name', 'build_number', 'build_id'))): """Represents a combination of builder and build number. If build number is None, this represents the latest build for a given builder. """ def __new__(cls, builder_name, build_number=None, build_id=None): return super(Build, cls).__new__(cls, builder_name, build_number, build_id) class TestResultsFetcher(object): """This class represents an interface to test results for particular builds. This includes fetching web test results from Google Storage; for more information about the web test result format, see: https://www.chromium.org/developers/the-json-test-results-format """ def __init__(self): self.web = Web() self.builders = BuilderList.load_default_builder_list(FileSystem()) def results_url(self, builder_name, build_number=None, step_name=None): """Returns a URL for one set of archived web test results. If a build number is given, this will be results for a particular run; otherwise it will be the accumulated results URL, which should have the latest results. """ if build_number: assert str(build_number).isdigit(), \ 'expected numeric build number, got %s' % build_number url_base = self.builder_results_url_base(builder_name) if step_name is None: step_name = self.get_layout_test_step_name( Build(builder_name, build_number)) if step_name: return '%s/%s/%s/layout-test-results' % ( url_base, build_number, six.moves.urllib.parse.quote(step_name)) return '%s/%s/layout-test-results' % (url_base, build_number) return self.accumulated_results_url_base(builder_name) def get_full_builder_url(self, url_base, builder_name): """ Returns the url for a builder directory in google storage. Each builder has a directory in the GS bucket, and the directory name is the builder name transformed to be more URL-friendly by replacing all spaces, periods and parentheses with underscores. """ return '%s/%s' % (url_base, re.sub('[ .()]', '_', builder_name)) def builder_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. """ return self.get_full_builder_url(RESULTS_URL_BASE, builder_name) def builder_retry_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. This is used for fetching the retry data which is now contained in test_results_summary.json which cannot be fetched from https://test-results.appspot.com anymore. Migrating this tool to use resultDB is the ideal solution. """ return self.get_full_builder_url(RESULTS_SUMMARY_URL_BASE, builder_name) @memoized def fetch_retry_summary_json(self, build): """Fetches and returns the text of the archived test_results_summary.json file. This file is expected to contain the results of retrying web tests with and without a patch in a try job. It includes lists of tests that failed only with the patch ("failures"), and tests that failed both with and without ("ignored"). """ url_base = '%s/%s' % (self.builder_retry_results_url_base( build.builder_name), build.build_number) # NOTE(crbug.com/1082907): We used to fetch retry_with_patch_summary.json from # test-results.appspot.com. The file has been renamed and can no longer be # accessed via test-results, so we download it from GCS directly. # There is still a bug in uploading this json file for other platforms than linux. # see https://crbug.com/1157202 return self.web.get_binary('%s/%s' % (url_base, 'test_results_summary.json'), return_none_on_404=True) def accumulated_results_url_base(self, builder_name): return self.builder_results_url_base( builder_name) + '/results/layout-test-results' @memoized def fetch_results(self, build, full=False, step_name=None): """Returns a WebTestResults object for results from a given Build. Uses full_results.json if full is True, otherwise failing_results.json. """ if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None step_name = step_name or self.get_layout_test_step_name(build) return self.fetch_web_test_results( self.results_url( build.builder_name, build.build_number, step_name=step_name), full, step_name) @memoized def get_layout_test_step_name(self, build): if not build.builder_name or not build.build_number:
# We were not able to retrieve step name for some builders from # https://test-results.appspot.com. Read from config file instead step_name = self.builders.step_name_for_builder(build.builder_name) if step_name: return step_name url = '%s/testfile?%s' % ( TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode([ ('buildnumber', build.build_number), # This forces the server to gives us JSON rather than an HTML page. ('callback', json_results_generator.JSON_CALLBACK), ('builder', build.builder_name), ('name', 'full_results.json') ])) data = self.web.get_binary(url, return_none_on_404=True) if not data: _log.debug('Got 404 response from:\n%s', url) return None # Strip out the callback data = json.loads(json_results_generator.strip_json_wrapper(data)) suites = [ entry['TestType'] for entry in data # Some suite names are like 'blink_web_tests on Intel GPU (with # patch)'. Only make sure it starts with blink_web_tests and # runs with a patch. This should be changed eventually to use actual # structured data from the test results server. if re.match( r'(blink_web_tests|wpt_tests_suite|high_dpi_blink_web_tests).*\(with patch\)$', entry['TestType']) ] # In manual testing, I sometimes saw results where the same suite was # repeated twice. De-duplicate here to try to catch this. suites = list(set(suites)) if len(suites) != 1: raise Exception( 'build %s on builder %s expected to only have one web test ' 'step, instead has %s' % (build.build_number, build.builder_name, suites)) return suites[0] @memoized def fetch_web_test_results(self, results_url, full=False, step_name=None): """Returns a WebTestResults object for results fetched from a given URL. Uses full_results.json if full is True, otherwise failing_results.json. """ base_filename = 'full_results.json' if full else 'failing_results.json' results_file = self.web.get_binary( '%s/%s' % (results_url, base_filename), return_none_on_404=True) if results_file is None: _log.debug('Got 404 response from:\n%s/%s', results_url, base_filename) return None return WebTestResults.results_from_string(results_file, step_name) def fetch_webdriver_test_results(self, build, master): if not build.builder_name or not build.build_number or not master: _log.debug('Builder name or build number or master is None') return None url = '%s/testfile?%s' % (TEST_RESULTS_SERVER,
_log.debug('Builder name or build number is None') return None
conditional_block
results_fetcher.py
py.web_tests.builder_list import BuilderList from blinkpy.web_tests.layout_package import json_results_generator _log = logging.getLogger(__name__) TEST_RESULTS_SERVER = 'https://test-results.appspot.com' RESULTS_URL_BASE = '%s/data/layout_results' % TEST_RESULTS_SERVER RESULTS_SUMMARY_URL_BASE = 'https://storage.googleapis.com/chromium-layout-test-archives' class Build(collections.namedtuple('Build', ('builder_name', 'build_number', 'build_id'))): """Represents a combination of builder and build number. If build number is None, this represents the latest build for a given builder. """ def __new__(cls, builder_name, build_number=None, build_id=None): return super(Build, cls).__new__(cls, builder_name, build_number, build_id) class TestResultsFetcher(object): """This class represents an interface to test results for particular builds. This includes fetching web test results from Google Storage; for more information about the web test result format, see: https://www.chromium.org/developers/the-json-test-results-format """ def __init__(self): self.web = Web() self.builders = BuilderList.load_default_builder_list(FileSystem()) def results_url(self, builder_name, build_number=None, step_name=None): """Returns a URL for one set of archived web test results. If a build number is given, this will be results for a particular run; otherwise it will be the accumulated results URL, which should have the latest results. """ if build_number: assert str(build_number).isdigit(), \ 'expected numeric build number, got %s' % build_number url_base = self.builder_results_url_base(builder_name) if step_name is None: step_name = self.get_layout_test_step_name( Build(builder_name, build_number)) if step_name: return '%s/%s/%s/layout-test-results' % ( url_base, build_number, six.moves.urllib.parse.quote(step_name)) return '%s/%s/layout-test-results' % (url_base, build_number) return self.accumulated_results_url_base(builder_name) def get_full_builder_url(self, url_base, builder_name): """ Returns the url for a builder directory in google storage. Each builder has a directory in the GS bucket, and the directory name is the builder name transformed to be more URL-friendly by replacing all spaces, periods and parentheses with underscores. """ return '%s/%s' % (url_base, re.sub('[ .()]', '_', builder_name)) def builder_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. """ return self.get_full_builder_url(RESULTS_URL_BASE, builder_name) def builder_retry_results_url_base(self, builder_name): """Returns the URL for the given builder's directory in Google Storage. This is used for fetching the retry data which is now contained in test_results_summary.json which cannot be fetched from https://test-results.appspot.com anymore. Migrating this tool to use resultDB is the ideal solution. """ return self.get_full_builder_url(RESULTS_SUMMARY_URL_BASE, builder_name) @memoized def fetch_retry_summary_json(self, build): """Fetches and returns the text of the archived test_results_summary.json file. This file is expected to contain the results of retrying web tests with and without a patch in a try job. It includes lists of tests that failed only with the patch ("failures"), and tests that failed both with and without ("ignored"). """ url_base = '%s/%s' % (self.builder_retry_results_url_base( build.builder_name), build.build_number) # NOTE(crbug.com/1082907): We used to fetch retry_with_patch_summary.json from # test-results.appspot.com. The file has been renamed and can no longer be # accessed via test-results, so we download it from GCS directly. # There is still a bug in uploading this json file for other platforms than linux. # see https://crbug.com/1157202 return self.web.get_binary('%s/%s' % (url_base, 'test_results_summary.json'), return_none_on_404=True) def accumulated_results_url_base(self, builder_name): return self.builder_results_url_base( builder_name) + '/results/layout-test-results' @memoized def fetch_results(self, build, full=False, step_name=None): """Returns a WebTestResults object for results from a given Build. Uses full_results.json if full is True, otherwise failing_results.json. """ if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None step_name = step_name or self.get_layout_test_step_name(build) return self.fetch_web_test_results( self.results_url( build.builder_name, build.build_number, step_name=step_name), full, step_name) @memoized def get_layout_test_step_name(self, build): if not build.builder_name or not build.build_number: _log.debug('Builder name or build number is None') return None # We were not able to retrieve step name for some builders from # https://test-results.appspot.com. Read from config file instead step_name = self.builders.step_name_for_builder(build.builder_name) if step_name: return step_name url = '%s/testfile?%s' % ( TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode([ ('buildnumber', build.build_number), # This forces the server to gives us JSON rather than an HTML page. ('callback', json_results_generator.JSON_CALLBACK), ('builder', build.builder_name), ('name', 'full_results.json') ])) data = self.web.get_binary(url, return_none_on_404=True) if not data: _log.debug('Got 404 response from:\n%s', url) return None # Strip out the callback data = json.loads(json_results_generator.strip_json_wrapper(data)) suites = [ entry['TestType'] for entry in data # Some suite names are like 'blink_web_tests on Intel GPU (with # patch)'. Only make sure it starts with blink_web_tests and # runs with a patch. This should be changed eventually to use actual # structured data from the test results server. if re.match( r'(blink_web_tests|wpt_tests_suite|high_dpi_blink_web_tests).*\(with patch\)$', entry['TestType']) ] # In manual testing, I sometimes saw results where the same suite was # repeated twice. De-duplicate here to try to catch this. suites = list(set(suites)) if len(suites) != 1: raise Exception( 'build %s on builder %s expected to only have one web test ' 'step, instead has %s' % (build.build_number, build.builder_name, suites)) return suites[0] @memoized def fetch_web_test_results(self, results_url, full=False, step_name=None): """Returns a WebTestResults object for results fetched from a given URL. Uses full_results.json if full is True, otherwise failing_results.json. """ base_filename = 'full_results.json' if full else 'failing_results.json' results_file = self.web.get_binary( '%s/%s' % (results_url, base_filename), return_none_on_404=True) if results_file is None: _log.debug('Got 404 response from:\n%s/%s', results_url, base_filename) return None return WebTestResults.results_from_string(results_file, step_name) def fetch_webdriver_test_results(self, build, master): if not build.builder_name or not build.build_number or not master: _log.debug('Builder name or build number or master is None') return None url = '%s/testfile?%s' % (TEST_RESULTS_SERVER, six.moves.urllib.parse.urlencode( [('buildnumber', build.build_number), ('master', master), ('builder', build.builder_name), ('testtype', 'webdriver_tests_suite (with patch)'), ('name', 'full_results.json')])) data = self.web.get_binary(url, return_none_on_404=True) if not data: _log.debug('Got 404 response from:\n%s', url) return None return WebTestResults.results_from_string(data) def filter_latest_builds(builds):
"""Filters Build objects to include only the latest for each builder. Args: builds: A collection of Build objects. Returns: A list of Build objects; only one Build object per builder name. If there are only Builds with no build number, then one is kept; if there are Builds with build numbers, then the one with the highest build number is kept. """ latest_builds = {} for build in builds: builder = build.builder_name if builder not in latest_builds or ( build.build_number and build.build_number > latest_builds[builder].build_number): latest_builds[builder] = build return sorted(latest_builds.values())
identifier_body
power_measurement_suite_new.py
import SCPI import time import numpy totalSamples = 10 sampleFreq = 100 #freq= SCPI.SCPI("172.17.5.121") dmm = SCPI.SCPI("172.17.5.131") #setup freq gen #freq.setSquare() #freq.setVoltage(0,3) #freq.setFrequency(sampleFreq) #setup voltage meter #dmm.setVoltageDC("10V", "MAX") # set external trigger #dmm.setTriggerSource("INT") #dmm.setTriggerCount(str(totalSamples)) # wait for trigger dmm.setInitiate() dmm.setCurrentDC("500mA", "MAX") dmm.setTriggerSource("INT") dmm.setTriggerCount(str(totalSamples)) dmm.setInitiate() time.sleep(1) #freq.setOutput(1) currentMeasurements = [] #voltageMeasurements = [] while 1:
#freq.setOutput(0) s = 0 for i in range(0, totalSamples): print float(currentMeasurements[i]) #print "Average Power Consumption: ", s/float(totalSamples), "W avg volt: ", numpy.mean(voltageMeasurements), "V avg current: ", numpy.mean(currentMeasurements), "A"
if len(currentMeasurements) < totalSamples: currentMeasurements += dmm.getMeasurements() if (len(currentMeasurements) >= totalSamples): break time.sleep(0.1)
conditional_block
power_measurement_suite_new.py
totalSamples = 10 sampleFreq = 100 #freq= SCPI.SCPI("172.17.5.121") dmm = SCPI.SCPI("172.17.5.131") #setup freq gen #freq.setSquare() #freq.setVoltage(0,3) #freq.setFrequency(sampleFreq) #setup voltage meter #dmm.setVoltageDC("10V", "MAX") # set external trigger #dmm.setTriggerSource("INT") #dmm.setTriggerCount(str(totalSamples)) # wait for trigger dmm.setInitiate() dmm.setCurrentDC("500mA", "MAX") dmm.setTriggerSource("INT") dmm.setTriggerCount(str(totalSamples)) dmm.setInitiate() time.sleep(1) #freq.setOutput(1) currentMeasurements = [] #voltageMeasurements = [] while 1: if len(currentMeasurements) < totalSamples: currentMeasurements += dmm.getMeasurements() if (len(currentMeasurements) >= totalSamples): break time.sleep(0.1) #freq.setOutput(0) s = 0 for i in range(0, totalSamples): print float(currentMeasurements[i]) #print "Average Power Consumption: ", s/float(totalSamples), "W avg volt: ", numpy.mean(voltageMeasurements), "V avg current: ", numpy.mean(currentMeasurements), "A"
import SCPI import time import numpy
random_line_split
api-data.js
import supertest from 'supertest'; import { publicChannelName, privateChannelName } from './channel.js'; import { roleNameUsers, roleNameSubscriptions, roleScopeUsers, roleScopeSubscriptions, roleDescription } from './role.js'; import { username, email, adminUsername, adminPassword } from './user.js'; export const request = supertest('http://localhost:3000'); const prefix = '/api/v1/'; export function wait(cb, time) { return () => setTimeout(cb, time); } export const apiUsername = `api${ username }`; export const apiEmail = `api${ email }`; export const apiPublicChannelName = `api${ publicChannelName }`; export const apiPrivateChannelName = `api${ privateChannelName }`; export const apiRoleNameUsers = `api${ roleNameUsers }`; export const apiRoleNameSubscriptions = `api${ roleNameSubscriptions }`; export const apiRoleScopeUsers = `${ roleScopeUsers }`; export const apiRoleScopeSubscriptions = `${ roleScopeSubscriptions }`; export const apiRoleDescription = `api${ roleDescription }`; export const reservedWords = [ 'admin', 'administrator', 'system', 'user', ]; export const targetUser = {}; export const channel = {}; export const group = {}; export const message = {}; export const directMessage = {}; export const integration = {}; export const credentials = { 'X-Auth-Token': undefined, 'X-User-Id': undefined, }; export const login = { user: adminUsername, password: adminPassword, }; export function api(path) { return prefix + path; } export function methodCall(methodName)
export function log(res) { console.log(res.req.path); console.log({ body: res.body, headers: res.headers, }); } export function getCredentials(done = function() {}) { request.post(api('login')) .send(login) .expect('Content-Type', 'application/json') .expect(200) .expect((res) => { credentials['X-Auth-Token'] = res.body.data.authToken; credentials['X-User-Id'] = res.body.data.userId; }) .end(done); }
{ return api(`method.call/${ methodName }`); }
identifier_body
api-data.js
import supertest from 'supertest'; import { publicChannelName, privateChannelName } from './channel.js'; import { roleNameUsers, roleNameSubscriptions, roleScopeUsers, roleScopeSubscriptions, roleDescription } from './role.js'; import { username, email, adminUsername, adminPassword } from './user.js'; export const request = supertest('http://localhost:3000'); const prefix = '/api/v1/'; export function wait(cb, time) { return () => setTimeout(cb, time); } export const apiUsername = `api${ username }`; export const apiEmail = `api${ email }`; export const apiPublicChannelName = `api${ publicChannelName }`; export const apiPrivateChannelName = `api${ privateChannelName }`; export const apiRoleNameUsers = `api${ roleNameUsers }`; export const apiRoleNameSubscriptions = `api${ roleNameSubscriptions }`; export const apiRoleScopeUsers = `${ roleScopeUsers }`; export const apiRoleScopeSubscriptions = `${ roleScopeSubscriptions }`; export const apiRoleDescription = `api${ roleDescription }`; export const reservedWords = [ 'admin', 'administrator', 'system', 'user', ]; export const targetUser = {}; export const channel = {}; export const group = {}; export const message = {};
'X-User-Id': undefined, }; export const login = { user: adminUsername, password: adminPassword, }; export function api(path) { return prefix + path; } export function methodCall(methodName) { return api(`method.call/${ methodName }`); } export function log(res) { console.log(res.req.path); console.log({ body: res.body, headers: res.headers, }); } export function getCredentials(done = function() {}) { request.post(api('login')) .send(login) .expect('Content-Type', 'application/json') .expect(200) .expect((res) => { credentials['X-Auth-Token'] = res.body.data.authToken; credentials['X-User-Id'] = res.body.data.userId; }) .end(done); }
export const directMessage = {}; export const integration = {}; export const credentials = { 'X-Auth-Token': undefined,
random_line_split
api-data.js
import supertest from 'supertest'; import { publicChannelName, privateChannelName } from './channel.js'; import { roleNameUsers, roleNameSubscriptions, roleScopeUsers, roleScopeSubscriptions, roleDescription } from './role.js'; import { username, email, adminUsername, adminPassword } from './user.js'; export const request = supertest('http://localhost:3000'); const prefix = '/api/v1/'; export function
(cb, time) { return () => setTimeout(cb, time); } export const apiUsername = `api${ username }`; export const apiEmail = `api${ email }`; export const apiPublicChannelName = `api${ publicChannelName }`; export const apiPrivateChannelName = `api${ privateChannelName }`; export const apiRoleNameUsers = `api${ roleNameUsers }`; export const apiRoleNameSubscriptions = `api${ roleNameSubscriptions }`; export const apiRoleScopeUsers = `${ roleScopeUsers }`; export const apiRoleScopeSubscriptions = `${ roleScopeSubscriptions }`; export const apiRoleDescription = `api${ roleDescription }`; export const reservedWords = [ 'admin', 'administrator', 'system', 'user', ]; export const targetUser = {}; export const channel = {}; export const group = {}; export const message = {}; export const directMessage = {}; export const integration = {}; export const credentials = { 'X-Auth-Token': undefined, 'X-User-Id': undefined, }; export const login = { user: adminUsername, password: adminPassword, }; export function api(path) { return prefix + path; } export function methodCall(methodName) { return api(`method.call/${ methodName }`); } export function log(res) { console.log(res.req.path); console.log({ body: res.body, headers: res.headers, }); } export function getCredentials(done = function() {}) { request.post(api('login')) .send(login) .expect('Content-Type', 'application/json') .expect(200) .expect((res) => { credentials['X-Auth-Token'] = res.body.data.authToken; credentials['X-User-Id'] = res.body.data.userId; }) .end(done); }
wait
identifier_name
activity.py
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations from typing import List, Literal, Optional, TypedDict from .user import User from .snowflake import Snowflake StatusType = Literal['idle', 'dnd', 'online', 'offline'] class PartialPresenceUpdate(TypedDict): user: User guild_id: Snowflake status: StatusType activities: List[Activity] client_status: ClientStatus class ClientStatus(TypedDict, total=False): desktop: StatusType mobile: StatusType web: StatusType class ActivityTimestamps(TypedDict, total=False): start: int end: int class ActivityParty(TypedDict, total=False): id: str size: List[int] class ActivityAssets(TypedDict, total=False): large_image: str large_text: str small_image: str small_text: str class ActivitySecrets(TypedDict, total=False): join: str spectate: str match: str class _ActivityEmojiOptional(TypedDict, total=False): id: Snowflake animated: bool class ActivityEmoji(_ActivityEmojiOptional): name: str
class ActivityButton(TypedDict): label: str url: str class _SendableActivityOptional(TypedDict, total=False): url: Optional[str] ActivityType = Literal[0, 1, 2, 4, 5] class SendableActivity(_SendableActivityOptional): name: str type: ActivityType class _BaseActivity(SendableActivity): created_at: int class Activity(_BaseActivity, total=False): state: Optional[str] details: Optional[str] timestamps: ActivityTimestamps assets: ActivityAssets party: ActivityParty application_id: Snowflake flags: int emoji: Optional[ActivityEmoji] secrets: ActivitySecrets session_id: Optional[str] instance: bool buttons: List[ActivityButton]
random_line_split
activity.py
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations from typing import List, Literal, Optional, TypedDict from .user import User from .snowflake import Snowflake StatusType = Literal['idle', 'dnd', 'online', 'offline'] class
(TypedDict): user: User guild_id: Snowflake status: StatusType activities: List[Activity] client_status: ClientStatus class ClientStatus(TypedDict, total=False): desktop: StatusType mobile: StatusType web: StatusType class ActivityTimestamps(TypedDict, total=False): start: int end: int class ActivityParty(TypedDict, total=False): id: str size: List[int] class ActivityAssets(TypedDict, total=False): large_image: str large_text: str small_image: str small_text: str class ActivitySecrets(TypedDict, total=False): join: str spectate: str match: str class _ActivityEmojiOptional(TypedDict, total=False): id: Snowflake animated: bool class ActivityEmoji(_ActivityEmojiOptional): name: str class ActivityButton(TypedDict): label: str url: str class _SendableActivityOptional(TypedDict, total=False): url: Optional[str] ActivityType = Literal[0, 1, 2, 4, 5] class SendableActivity(_SendableActivityOptional): name: str type: ActivityType class _BaseActivity(SendableActivity): created_at: int class Activity(_BaseActivity, total=False): state: Optional[str] details: Optional[str] timestamps: ActivityTimestamps assets: ActivityAssets party: ActivityParty application_id: Snowflake flags: int emoji: Optional[ActivityEmoji] secrets: ActivitySecrets session_id: Optional[str] instance: bool buttons: List[ActivityButton]
PartialPresenceUpdate
identifier_name
activity.py
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations from typing import List, Literal, Optional, TypedDict from .user import User from .snowflake import Snowflake StatusType = Literal['idle', 'dnd', 'online', 'offline'] class PartialPresenceUpdate(TypedDict): user: User guild_id: Snowflake status: StatusType activities: List[Activity] client_status: ClientStatus class ClientStatus(TypedDict, total=False): desktop: StatusType mobile: StatusType web: StatusType class ActivityTimestamps(TypedDict, total=False): start: int end: int class ActivityParty(TypedDict, total=False): id: str size: List[int] class ActivityAssets(TypedDict, total=False): large_image: str large_text: str small_image: str small_text: str class ActivitySecrets(TypedDict, total=False): join: str spectate: str match: str class _ActivityEmojiOptional(TypedDict, total=False): id: Snowflake animated: bool class ActivityEmoji(_ActivityEmojiOptional):
class ActivityButton(TypedDict): label: str url: str class _SendableActivityOptional(TypedDict, total=False): url: Optional[str] ActivityType = Literal[0, 1, 2, 4, 5] class SendableActivity(_SendableActivityOptional): name: str type: ActivityType class _BaseActivity(SendableActivity): created_at: int class Activity(_BaseActivity, total=False): state: Optional[str] details: Optional[str] timestamps: ActivityTimestamps assets: ActivityAssets party: ActivityParty application_id: Snowflake flags: int emoji: Optional[ActivityEmoji] secrets: ActivitySecrets session_id: Optional[str] instance: bool buttons: List[ActivityButton]
name: str
identifier_body
FormProps.ts
import * as React from "react"; import * as PropTypes from "prop-types"; import { FormContext } from "./FormContext"; import { ModelInterface, ModelValue } from "../Model"; export type StorageRequiredInterface = Pick<Storage, "setItem" | "getItem">; export const StorageRequiredInterfaceTypes: {[P in keyof StorageRequiredInterface]: PropTypes.Validator<any>} = { getItem: PropTypes.func.isRequired, setItem: PropTypes.func.isRequired }; export interface FormProps<M extends ModelInterface> { instantiate: () => M; /* This method will be used for creating model instance in Form state */ method?: string; /* Name of method of model, which will be called on form submit */ onSubmit?: (model: M, childContext: FormContext) => Promise<void>; // will be called if no method provided storageKey?: string; /* If provided Model will be saved to localStorage on unmount and loaded on mount */ resetAfterSubmit?: boolean; afterSubmit?: () => void; storage?: StorageRequiredInterface; onValidate?: (groups: Array<{ name: string, isValid: boolean }>) => void; formRef?: (node: HTMLFormElement) => void; } export const FormPropTypes: {[P in keyof FormProps<any>]: PropTypes.Validator<any>} = { instantiate: PropTypes.func.isRequired, method: PropTypes.string, onSubmit: PropTypes.func, storageKey: PropTypes.string, resetAfterSubmit: PropTypes.bool, afterSubmit: PropTypes.func, storage: PropTypes.shape(StorageRequiredInterfaceTypes), onValidate: PropTypes.func, formRef: PropTypes.func,
};
random_line_split
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy ... ok")); } #[test] fn escapes_names_starting_with_digit() { assert!(actual().contains("test test_cases::basic_test::_1 ... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string ... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true ... ok")); } #[test] fn lowers_test_case_name()
#[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position ... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be ... ignored")); } }
{ assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code ... ok")); }
identifier_body
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy ... ok"));
} #[test] fn escapes_names_starting_with_digit() { assert!(actual().contains("test test_cases::basic_test::_1 ... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string ... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true ... ok")); } #[test] fn lowers_test_case_name() { assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code ... ok")); } #[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position ... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be ... ignored")); } }
random_line_split
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy ... ok")); } #[test] fn
() { assert!(actual().contains("test test_cases::basic_test::_1 ... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string ... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true ... ok")); } #[test] fn lowers_test_case_name() { assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code ... ok")); } #[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position ... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be ... ignored")); } }
escapes_names_starting_with_digit
identifier_name
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c != '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn
(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c != '0' && c != '1' && c != '2' && c != '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) }
unescape_octal
identifier_name
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c != '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_octal(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c != '0' && c != '1' && c != '2' && c != '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok());
}
char::from_u32(u)
random_line_split
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c != '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char>
fn unescape_octal(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c != '0' && c != '1' && c != '2' && c != '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) }
{ let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) }
identifier_body
screen-snippet-handler.ts
import { app, BrowserWindow } from 'electron'; import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import { ChildProcess, ExecException, execFile } from 'child_process'; import * as util from 'util'; import { IScreenSnippet } from '../common/api-interface'; import { isDevEnv, isLinux, isMac, isWindowsOS } from '../common/env'; import { i18n } from '../common/i18n'; import { logger } from '../common/logger'; import { updateAlwaysOnTop } from './window-actions'; import { windowHandler } from './window-handler'; import { windowExists } from './window-utils'; const readFile = util.promisify(fs.readFile); class ScreenSnippet { private readonly tempDir: string; private readonly captureUtil: string; private outputFileName: string | undefined; private captureUtilArgs: ReadonlyArray<string> | undefined; private child: ChildProcess | undefined; private focusedWindow: BrowserWindow | null = null; private shouldUpdateAlwaysOnTop: boolean = false; constructor() { this.tempDir = os.tmpdir(); this.captureUtil = isMac ? '/usr/sbin/screencapture' : isDevEnv ? path.join(__dirname, '../../../node_modules/screen-snippet/ScreenSnippet.exe') : path.join(path.dirname(app.getPath('exe')), 'ScreenSnippet.exe'); if (isLinux) { this.captureUtil = '/usr/bin/gnome-screenshot'; } } /** * Captures a user selected portion of the monitor and returns jpeg image * encoded in base64 format. * * @param webContents {Electron.webContents} */ public async capture(webContents: Electron.webContents) { const mainWindow = windowHandler.getMainWindow(); if (mainWindow && windowExists(mainWindow) && isWindowsOS) { this.shouldUpdateAlwaysOnTop = mainWindow.isAlwaysOnTop(); if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(false, false, false); } } logger.info(`screen-snippet-handler: Starting screen capture!`); this.outputFileName = path.join(this.tempDir, 'symphonyImage-' + Date.now() + '.png'); this.captureUtilArgs = isMac ? [ '-i', '-s', '-t', 'png', this.outputFileName ] : [ this.outputFileName, i18n.getLocale() ]; this.focusedWindow = BrowserWindow.getFocusedWindow(); if (isLinux) { this.captureUtilArgs = ['-a', '-f', this.outputFileName]; } logger.info(`screen-snippet-handler: Capturing snippet with file ${this.outputFileName} and args ${this.captureUtilArgs}!`); // only allow one screen capture at a time. if (this.child) { logger.info(`screen-snippet-handler: Child screen capture exists, killing it and keeping only 1 instance!`); this.killChildProcess(); } try { await this.execCmd(this.captureUtil, this.captureUtilArgs); const { message, data, type }: IScreenSnippet = await this.convertFileToData(); logger.info(`screen-snippet-handler: Snippet captured! Sending data to SFE`); webContents.send('screen-snippet-data', { message, data, type }); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture failed with error: ${error}!`); } } /** * Cancels a screen capture and closes the snippet window */ public async cancelCapture() { if (!isWindowsOS) { return; } logger.info(`screen-snippet-handler: Cancel screen capture!`); this.focusedWindow = BrowserWindow.getFocusedWindow(); try { await this.execCmd(this.captureUtil, []); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture cancel failed with error: ${error}!`); } } /** * Kills the child process when the application is reloaded */ public killChildProcess(): void { if (this.child && typeof this.child.kill === 'function') { this.child.kill(); } } /** * Executes the given command via a child process * * Windows: uses custom built windows screen capture tool * Mac OSX: uses built-in screencapture tool which has been * available since OSX ver 10.2. * * @param captureUtil {string} * @param captureUtilArgs {captureUtilArgs} * @example execCmd('-i -s', '/user/desktop/symphonyImage-1544025391698.png') */ private execCmd(captureUtil: string, captureUtilArgs: ReadonlyArray<string>): Promise<ChildProcess> { logger.info(`screen-snippet-handlers: execCmd ${captureUtil} ${captureUtilArgs}`); return new Promise<ChildProcess>((resolve, reject) => { return this.child = execFile(captureUtil, captureUtilArgs, (error: ExecException | null) => { if (error && error.killed) { // processs was killed, just resolve with no data. return reject(error); } resolve(); }); }); } /** * Converts the temporary stored file into base64 * and removes the temp file * * @return Promise<IScreenSnippet> { message, data, type } */ private async convertFileToData(): Promise<IScreenSnippet> { try { if (!this.outputFileName) { logger.info(`screen-snippet-handler: screen capture failed! output file doesn't exist!`); return { message: 'output file name is required', type: 'ERROR' }; } const data = await readFile(this.outputFileName); if (!data) { logger.info(`screen-snippet-handler: screen capture failed! data doesn't exist!`); return { message: `no file data provided`, type: 'ERROR' }; } // convert binary data to base64 encoded string const output = Buffer.from(data).toString('base64'); return { message: 'success', data: output, type: 'image/png;base64' }; } catch (error) { // no such file exists or user likely aborted // creating snippet. also include any error when // creating child process. return error && error.code === 'ENOENT' ? { message: `file does not exist`, type: 'ERROR' } : { message: `${error}`, type: 'ERROR' }; } finally { if (this.focusedWindow && windowExists(this.focusedWindow)) {
logger.info(`screen-snippet-handler: cleaning up temp snippet file: ${this.outputFileName}!`); if (removeErr) { logger.error(`screen-snippet-handler: error removing temp snippet file: ${this.outputFileName}, err: ${removeErr}`); } }); } } } /** * Verify and updates always on top */ private async verifyAndUpdateAlwaysOnTop(): Promise<void> { if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(true, false, false); this.shouldUpdateAlwaysOnTop = false; } } } const screenSnippet = new ScreenSnippet(); export { screenSnippet };
this.focusedWindow.moveTop(); } // remove tmp file (async) if (this.outputFileName) { fs.unlink(this.outputFileName, (removeErr) => {
random_line_split
screen-snippet-handler.ts
import { app, BrowserWindow } from 'electron'; import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import { ChildProcess, ExecException, execFile } from 'child_process'; import * as util from 'util'; import { IScreenSnippet } from '../common/api-interface'; import { isDevEnv, isLinux, isMac, isWindowsOS } from '../common/env'; import { i18n } from '../common/i18n'; import { logger } from '../common/logger'; import { updateAlwaysOnTop } from './window-actions'; import { windowHandler } from './window-handler'; import { windowExists } from './window-utils'; const readFile = util.promisify(fs.readFile); class ScreenSnippet { private readonly tempDir: string; private readonly captureUtil: string; private outputFileName: string | undefined; private captureUtilArgs: ReadonlyArray<string> | undefined; private child: ChildProcess | undefined; private focusedWindow: BrowserWindow | null = null; private shouldUpdateAlwaysOnTop: boolean = false; constructor()
/** * Captures a user selected portion of the monitor and returns jpeg image * encoded in base64 format. * * @param webContents {Electron.webContents} */ public async capture(webContents: Electron.webContents) { const mainWindow = windowHandler.getMainWindow(); if (mainWindow && windowExists(mainWindow) && isWindowsOS) { this.shouldUpdateAlwaysOnTop = mainWindow.isAlwaysOnTop(); if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(false, false, false); } } logger.info(`screen-snippet-handler: Starting screen capture!`); this.outputFileName = path.join(this.tempDir, 'symphonyImage-' + Date.now() + '.png'); this.captureUtilArgs = isMac ? [ '-i', '-s', '-t', 'png', this.outputFileName ] : [ this.outputFileName, i18n.getLocale() ]; this.focusedWindow = BrowserWindow.getFocusedWindow(); if (isLinux) { this.captureUtilArgs = ['-a', '-f', this.outputFileName]; } logger.info(`screen-snippet-handler: Capturing snippet with file ${this.outputFileName} and args ${this.captureUtilArgs}!`); // only allow one screen capture at a time. if (this.child) { logger.info(`screen-snippet-handler: Child screen capture exists, killing it and keeping only 1 instance!`); this.killChildProcess(); } try { await this.execCmd(this.captureUtil, this.captureUtilArgs); const { message, data, type }: IScreenSnippet = await this.convertFileToData(); logger.info(`screen-snippet-handler: Snippet captured! Sending data to SFE`); webContents.send('screen-snippet-data', { message, data, type }); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture failed with error: ${error}!`); } } /** * Cancels a screen capture and closes the snippet window */ public async cancelCapture() { if (!isWindowsOS) { return; } logger.info(`screen-snippet-handler: Cancel screen capture!`); this.focusedWindow = BrowserWindow.getFocusedWindow(); try { await this.execCmd(this.captureUtil, []); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture cancel failed with error: ${error}!`); } } /** * Kills the child process when the application is reloaded */ public killChildProcess(): void { if (this.child && typeof this.child.kill === 'function') { this.child.kill(); } } /** * Executes the given command via a child process * * Windows: uses custom built windows screen capture tool * Mac OSX: uses built-in screencapture tool which has been * available since OSX ver 10.2. * * @param captureUtil {string} * @param captureUtilArgs {captureUtilArgs} * @example execCmd('-i -s', '/user/desktop/symphonyImage-1544025391698.png') */ private execCmd(captureUtil: string, captureUtilArgs: ReadonlyArray<string>): Promise<ChildProcess> { logger.info(`screen-snippet-handlers: execCmd ${captureUtil} ${captureUtilArgs}`); return new Promise<ChildProcess>((resolve, reject) => { return this.child = execFile(captureUtil, captureUtilArgs, (error: ExecException | null) => { if (error && error.killed) { // processs was killed, just resolve with no data. return reject(error); } resolve(); }); }); } /** * Converts the temporary stored file into base64 * and removes the temp file * * @return Promise<IScreenSnippet> { message, data, type } */ private async convertFileToData(): Promise<IScreenSnippet> { try { if (!this.outputFileName) { logger.info(`screen-snippet-handler: screen capture failed! output file doesn't exist!`); return { message: 'output file name is required', type: 'ERROR' }; } const data = await readFile(this.outputFileName); if (!data) { logger.info(`screen-snippet-handler: screen capture failed! data doesn't exist!`); return { message: `no file data provided`, type: 'ERROR' }; } // convert binary data to base64 encoded string const output = Buffer.from(data).toString('base64'); return { message: 'success', data: output, type: 'image/png;base64' }; } catch (error) { // no such file exists or user likely aborted // creating snippet. also include any error when // creating child process. return error && error.code === 'ENOENT' ? { message: `file does not exist`, type: 'ERROR' } : { message: `${error}`, type: 'ERROR' }; } finally { if (this.focusedWindow && windowExists(this.focusedWindow)) { this.focusedWindow.moveTop(); } // remove tmp file (async) if (this.outputFileName) { fs.unlink(this.outputFileName, (removeErr) => { logger.info(`screen-snippet-handler: cleaning up temp snippet file: ${this.outputFileName}!`); if (removeErr) { logger.error(`screen-snippet-handler: error removing temp snippet file: ${this.outputFileName}, err: ${removeErr}`); } }); } } } /** * Verify and updates always on top */ private async verifyAndUpdateAlwaysOnTop(): Promise<void> { if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(true, false, false); this.shouldUpdateAlwaysOnTop = false; } } } const screenSnippet = new ScreenSnippet(); export { screenSnippet };
{ this.tempDir = os.tmpdir(); this.captureUtil = isMac ? '/usr/sbin/screencapture' : isDevEnv ? path.join(__dirname, '../../../node_modules/screen-snippet/ScreenSnippet.exe') : path.join(path.dirname(app.getPath('exe')), 'ScreenSnippet.exe'); if (isLinux) { this.captureUtil = '/usr/bin/gnome-screenshot'; } }
identifier_body
screen-snippet-handler.ts
import { app, BrowserWindow } from 'electron'; import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import { ChildProcess, ExecException, execFile } from 'child_process'; import * as util from 'util'; import { IScreenSnippet } from '../common/api-interface'; import { isDevEnv, isLinux, isMac, isWindowsOS } from '../common/env'; import { i18n } from '../common/i18n'; import { logger } from '../common/logger'; import { updateAlwaysOnTop } from './window-actions'; import { windowHandler } from './window-handler'; import { windowExists } from './window-utils'; const readFile = util.promisify(fs.readFile); class ScreenSnippet { private readonly tempDir: string; private readonly captureUtil: string; private outputFileName: string | undefined; private captureUtilArgs: ReadonlyArray<string> | undefined; private child: ChildProcess | undefined; private focusedWindow: BrowserWindow | null = null; private shouldUpdateAlwaysOnTop: boolean = false; constructor() { this.tempDir = os.tmpdir(); this.captureUtil = isMac ? '/usr/sbin/screencapture' : isDevEnv ? path.join(__dirname, '../../../node_modules/screen-snippet/ScreenSnippet.exe') : path.join(path.dirname(app.getPath('exe')), 'ScreenSnippet.exe'); if (isLinux) { this.captureUtil = '/usr/bin/gnome-screenshot'; } } /** * Captures a user selected portion of the monitor and returns jpeg image * encoded in base64 format. * * @param webContents {Electron.webContents} */ public async capture(webContents: Electron.webContents) { const mainWindow = windowHandler.getMainWindow(); if (mainWindow && windowExists(mainWindow) && isWindowsOS)
logger.info(`screen-snippet-handler: Starting screen capture!`); this.outputFileName = path.join(this.tempDir, 'symphonyImage-' + Date.now() + '.png'); this.captureUtilArgs = isMac ? [ '-i', '-s', '-t', 'png', this.outputFileName ] : [ this.outputFileName, i18n.getLocale() ]; this.focusedWindow = BrowserWindow.getFocusedWindow(); if (isLinux) { this.captureUtilArgs = ['-a', '-f', this.outputFileName]; } logger.info(`screen-snippet-handler: Capturing snippet with file ${this.outputFileName} and args ${this.captureUtilArgs}!`); // only allow one screen capture at a time. if (this.child) { logger.info(`screen-snippet-handler: Child screen capture exists, killing it and keeping only 1 instance!`); this.killChildProcess(); } try { await this.execCmd(this.captureUtil, this.captureUtilArgs); const { message, data, type }: IScreenSnippet = await this.convertFileToData(); logger.info(`screen-snippet-handler: Snippet captured! Sending data to SFE`); webContents.send('screen-snippet-data', { message, data, type }); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture failed with error: ${error}!`); } } /** * Cancels a screen capture and closes the snippet window */ public async cancelCapture() { if (!isWindowsOS) { return; } logger.info(`screen-snippet-handler: Cancel screen capture!`); this.focusedWindow = BrowserWindow.getFocusedWindow(); try { await this.execCmd(this.captureUtil, []); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture cancel failed with error: ${error}!`); } } /** * Kills the child process when the application is reloaded */ public killChildProcess(): void { if (this.child && typeof this.child.kill === 'function') { this.child.kill(); } } /** * Executes the given command via a child process * * Windows: uses custom built windows screen capture tool * Mac OSX: uses built-in screencapture tool which has been * available since OSX ver 10.2. * * @param captureUtil {string} * @param captureUtilArgs {captureUtilArgs} * @example execCmd('-i -s', '/user/desktop/symphonyImage-1544025391698.png') */ private execCmd(captureUtil: string, captureUtilArgs: ReadonlyArray<string>): Promise<ChildProcess> { logger.info(`screen-snippet-handlers: execCmd ${captureUtil} ${captureUtilArgs}`); return new Promise<ChildProcess>((resolve, reject) => { return this.child = execFile(captureUtil, captureUtilArgs, (error: ExecException | null) => { if (error && error.killed) { // processs was killed, just resolve with no data. return reject(error); } resolve(); }); }); } /** * Converts the temporary stored file into base64 * and removes the temp file * * @return Promise<IScreenSnippet> { message, data, type } */ private async convertFileToData(): Promise<IScreenSnippet> { try { if (!this.outputFileName) { logger.info(`screen-snippet-handler: screen capture failed! output file doesn't exist!`); return { message: 'output file name is required', type: 'ERROR' }; } const data = await readFile(this.outputFileName); if (!data) { logger.info(`screen-snippet-handler: screen capture failed! data doesn't exist!`); return { message: `no file data provided`, type: 'ERROR' }; } // convert binary data to base64 encoded string const output = Buffer.from(data).toString('base64'); return { message: 'success', data: output, type: 'image/png;base64' }; } catch (error) { // no such file exists or user likely aborted // creating snippet. also include any error when // creating child process. return error && error.code === 'ENOENT' ? { message: `file does not exist`, type: 'ERROR' } : { message: `${error}`, type: 'ERROR' }; } finally { if (this.focusedWindow && windowExists(this.focusedWindow)) { this.focusedWindow.moveTop(); } // remove tmp file (async) if (this.outputFileName) { fs.unlink(this.outputFileName, (removeErr) => { logger.info(`screen-snippet-handler: cleaning up temp snippet file: ${this.outputFileName}!`); if (removeErr) { logger.error(`screen-snippet-handler: error removing temp snippet file: ${this.outputFileName}, err: ${removeErr}`); } }); } } } /** * Verify and updates always on top */ private async verifyAndUpdateAlwaysOnTop(): Promise<void> { if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(true, false, false); this.shouldUpdateAlwaysOnTop = false; } } } const screenSnippet = new ScreenSnippet(); export { screenSnippet };
{ this.shouldUpdateAlwaysOnTop = mainWindow.isAlwaysOnTop(); if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(false, false, false); } }
conditional_block
screen-snippet-handler.ts
import { app, BrowserWindow } from 'electron'; import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import { ChildProcess, ExecException, execFile } from 'child_process'; import * as util from 'util'; import { IScreenSnippet } from '../common/api-interface'; import { isDevEnv, isLinux, isMac, isWindowsOS } from '../common/env'; import { i18n } from '../common/i18n'; import { logger } from '../common/logger'; import { updateAlwaysOnTop } from './window-actions'; import { windowHandler } from './window-handler'; import { windowExists } from './window-utils'; const readFile = util.promisify(fs.readFile); class ScreenSnippet { private readonly tempDir: string; private readonly captureUtil: string; private outputFileName: string | undefined; private captureUtilArgs: ReadonlyArray<string> | undefined; private child: ChildProcess | undefined; private focusedWindow: BrowserWindow | null = null; private shouldUpdateAlwaysOnTop: boolean = false; constructor() { this.tempDir = os.tmpdir(); this.captureUtil = isMac ? '/usr/sbin/screencapture' : isDevEnv ? path.join(__dirname, '../../../node_modules/screen-snippet/ScreenSnippet.exe') : path.join(path.dirname(app.getPath('exe')), 'ScreenSnippet.exe'); if (isLinux) { this.captureUtil = '/usr/bin/gnome-screenshot'; } } /** * Captures a user selected portion of the monitor and returns jpeg image * encoded in base64 format. * * @param webContents {Electron.webContents} */ public async capture(webContents: Electron.webContents) { const mainWindow = windowHandler.getMainWindow(); if (mainWindow && windowExists(mainWindow) && isWindowsOS) { this.shouldUpdateAlwaysOnTop = mainWindow.isAlwaysOnTop(); if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(false, false, false); } } logger.info(`screen-snippet-handler: Starting screen capture!`); this.outputFileName = path.join(this.tempDir, 'symphonyImage-' + Date.now() + '.png'); this.captureUtilArgs = isMac ? [ '-i', '-s', '-t', 'png', this.outputFileName ] : [ this.outputFileName, i18n.getLocale() ]; this.focusedWindow = BrowserWindow.getFocusedWindow(); if (isLinux) { this.captureUtilArgs = ['-a', '-f', this.outputFileName]; } logger.info(`screen-snippet-handler: Capturing snippet with file ${this.outputFileName} and args ${this.captureUtilArgs}!`); // only allow one screen capture at a time. if (this.child) { logger.info(`screen-snippet-handler: Child screen capture exists, killing it and keeping only 1 instance!`); this.killChildProcess(); } try { await this.execCmd(this.captureUtil, this.captureUtilArgs); const { message, data, type }: IScreenSnippet = await this.convertFileToData(); logger.info(`screen-snippet-handler: Snippet captured! Sending data to SFE`); webContents.send('screen-snippet-data', { message, data, type }); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture failed with error: ${error}!`); } } /** * Cancels a screen capture and closes the snippet window */ public async cancelCapture() { if (!isWindowsOS) { return; } logger.info(`screen-snippet-handler: Cancel screen capture!`); this.focusedWindow = BrowserWindow.getFocusedWindow(); try { await this.execCmd(this.captureUtil, []); await this.verifyAndUpdateAlwaysOnTop(); } catch (error) { await this.verifyAndUpdateAlwaysOnTop(); logger.error(`screen-snippet-handler: screen capture cancel failed with error: ${error}!`); } } /** * Kills the child process when the application is reloaded */ public killChildProcess(): void { if (this.child && typeof this.child.kill === 'function') { this.child.kill(); } } /** * Executes the given command via a child process * * Windows: uses custom built windows screen capture tool * Mac OSX: uses built-in screencapture tool which has been * available since OSX ver 10.2. * * @param captureUtil {string} * @param captureUtilArgs {captureUtilArgs} * @example execCmd('-i -s', '/user/desktop/symphonyImage-1544025391698.png') */ private execCmd(captureUtil: string, captureUtilArgs: ReadonlyArray<string>): Promise<ChildProcess> { logger.info(`screen-snippet-handlers: execCmd ${captureUtil} ${captureUtilArgs}`); return new Promise<ChildProcess>((resolve, reject) => { return this.child = execFile(captureUtil, captureUtilArgs, (error: ExecException | null) => { if (error && error.killed) { // processs was killed, just resolve with no data. return reject(error); } resolve(); }); }); } /** * Converts the temporary stored file into base64 * and removes the temp file * * @return Promise<IScreenSnippet> { message, data, type } */ private async convertFileToData(): Promise<IScreenSnippet> { try { if (!this.outputFileName) { logger.info(`screen-snippet-handler: screen capture failed! output file doesn't exist!`); return { message: 'output file name is required', type: 'ERROR' }; } const data = await readFile(this.outputFileName); if (!data) { logger.info(`screen-snippet-handler: screen capture failed! data doesn't exist!`); return { message: `no file data provided`, type: 'ERROR' }; } // convert binary data to base64 encoded string const output = Buffer.from(data).toString('base64'); return { message: 'success', data: output, type: 'image/png;base64' }; } catch (error) { // no such file exists or user likely aborted // creating snippet. also include any error when // creating child process. return error && error.code === 'ENOENT' ? { message: `file does not exist`, type: 'ERROR' } : { message: `${error}`, type: 'ERROR' }; } finally { if (this.focusedWindow && windowExists(this.focusedWindow)) { this.focusedWindow.moveTop(); } // remove tmp file (async) if (this.outputFileName) { fs.unlink(this.outputFileName, (removeErr) => { logger.info(`screen-snippet-handler: cleaning up temp snippet file: ${this.outputFileName}!`); if (removeErr) { logger.error(`screen-snippet-handler: error removing temp snippet file: ${this.outputFileName}, err: ${removeErr}`); } }); } } } /** * Verify and updates always on top */ private async
(): Promise<void> { if (this.shouldUpdateAlwaysOnTop) { await updateAlwaysOnTop(true, false, false); this.shouldUpdateAlwaysOnTop = false; } } } const screenSnippet = new ScreenSnippet(); export { screenSnippet };
verifyAndUpdateAlwaysOnTop
identifier_name
test_parameters.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB from types import MappingProxyType # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import parameters, realizations def test_realizations_in_dir():
@pytest.mark.parametrize("name", parameters.available) def test_getting_parameters(name): """ Test getting 'parameters' and that it is derived from the corresponding realization. """ params = getattr(parameters, name) assert isinstance(params, MappingProxyType) assert params["name"] == name # Check parameters have the right keys and values cosmo = getattr(realizations, name) assert params["name"] == cosmo.name assert params["cosmology"] == cosmo.__class__.__qualname__ # All the cosmology parameters are equal for n in cosmo.__parameters__: assert np.array_equal(params[n], getattr(cosmo, n)) # All the metadata is included. Parameter values take precedence, so only # checking the keys. assert set(cosmo.meta.keys()).issubset(params.keys()) # Lastly, check the generation process. m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) assert params == m
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`.""" d = dir(parameters) assert set(d) == set(parameters.__all__) for n in parameters.available: assert n in d
identifier_body
test_parameters.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB from types import MappingProxyType # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import parameters, realizations def test_realizations_in_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`.""" d = dir(parameters) assert set(d) == set(parameters.__all__) for n in parameters.available: assert n in d @pytest.mark.parametrize("name", parameters.available) def test_getting_parameters(name): """ Test getting 'parameters' and that it is derived from the corresponding realization. """ params = getattr(parameters, name) assert isinstance(params, MappingProxyType) assert params["name"] == name # Check parameters have the right keys and values cosmo = getattr(realizations, name) assert params["name"] == cosmo.name assert params["cosmology"] == cosmo.__class__.__qualname__ # All the cosmology parameters are equal for n in cosmo.__parameters__:
# All the metadata is included. Parameter values take precedence, so only # checking the keys. assert set(cosmo.meta.keys()).issubset(params.keys()) # Lastly, check the generation process. m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) assert params == m
assert np.array_equal(params[n], getattr(cosmo, n))
conditional_block
test_parameters.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB from types import MappingProxyType # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import parameters, realizations def test_realizations_in_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`.""" d = dir(parameters) assert set(d) == set(parameters.__all__) for n in parameters.available: assert n in d @pytest.mark.parametrize("name", parameters.available) def test_getting_parameters(name): """ Test getting 'parameters' and that it is derived from the corresponding realization. """ params = getattr(parameters, name)
assert params["name"] == name # Check parameters have the right keys and values cosmo = getattr(realizations, name) assert params["name"] == cosmo.name assert params["cosmology"] == cosmo.__class__.__qualname__ # All the cosmology parameters are equal for n in cosmo.__parameters__: assert np.array_equal(params[n], getattr(cosmo, n)) # All the metadata is included. Parameter values take precedence, so only # checking the keys. assert set(cosmo.meta.keys()).issubset(params.keys()) # Lastly, check the generation process. m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) assert params == m
assert isinstance(params, MappingProxyType)
random_line_split
test_parameters.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB from types import MappingProxyType # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import parameters, realizations def test_realizations_in_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`.""" d = dir(parameters) assert set(d) == set(parameters.__all__) for n in parameters.available: assert n in d @pytest.mark.parametrize("name", parameters.available) def
(name): """ Test getting 'parameters' and that it is derived from the corresponding realization. """ params = getattr(parameters, name) assert isinstance(params, MappingProxyType) assert params["name"] == name # Check parameters have the right keys and values cosmo = getattr(realizations, name) assert params["name"] == cosmo.name assert params["cosmology"] == cosmo.__class__.__qualname__ # All the cosmology parameters are equal for n in cosmo.__parameters__: assert np.array_equal(params[n], getattr(cosmo, n)) # All the metadata is included. Parameter values take precedence, so only # checking the keys. assert set(cosmo.meta.keys()).issubset(params.keys()) # Lastly, check the generation process. m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) assert params == m
test_getting_parameters
identifier_name
data_manager_view_mixin.py
from django.contrib import messages from django.views.generic.base import ContextMixin from edc_constants.constants import OPEN from ..models import DataActionItem from ..model_wrappers import DataActionItemModelWrapper from .user_details_check_view_mixin import UserDetailsCheckViewMixin class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin): data_action_item_template = 'edc_data_manager/data_manager.html' @property def
(self): """Returns a wrapped saved or unsaved consent version. """ model_obj = DataActionItem(subject_identifier=self.subject_identifier) return DataActionItemModelWrapper(model_obj=model_obj) def data_action_items(self): """Return a list of action items. """ wrapped_data_action_items = [] status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') for data_action_item in data_action_items: wrapped_data_action_items.append(data_action_item) return wrapped_data_action_items def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') msg = '' for data_action_item in data_action_items: msg = (f'Issue {data_action_item.issue_number}. Pending action' f' created by {data_action_item.user_created}. ' f'{data_action_item.subject} Assigned to ' f'{data_action_item.assigned}') messages.add_message( self.request, messages.ERROR, msg) context.update( data_action_item_template=self.data_action_item_template, data_action_item_add_url=self.data_action_item.href, data_action_items=self.data_action_items) return context
data_action_item
identifier_name
data_manager_view_mixin.py
from django.contrib import messages from django.views.generic.base import ContextMixin from edc_constants.constants import OPEN from ..models import DataActionItem from ..model_wrappers import DataActionItemModelWrapper from .user_details_check_view_mixin import UserDetailsCheckViewMixin class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin): data_action_item_template = 'edc_data_manager/data_manager.html' @property def data_action_item(self): """Returns a wrapped saved or unsaved consent version. """ model_obj = DataActionItem(subject_identifier=self.subject_identifier) return DataActionItemModelWrapper(model_obj=model_obj) def data_action_items(self): """Return a list of action items. """ wrapped_data_action_items = [] status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') for data_action_item in data_action_items: wrapped_data_action_items.append(data_action_item) return wrapped_data_action_items def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier,
for data_action_item in data_action_items: msg = (f'Issue {data_action_item.issue_number}. Pending action' f' created by {data_action_item.user_created}. ' f'{data_action_item.subject} Assigned to ' f'{data_action_item.assigned}') messages.add_message( self.request, messages.ERROR, msg) context.update( data_action_item_template=self.data_action_item_template, data_action_item_add_url=self.data_action_item.href, data_action_items=self.data_action_items) return context
status__in=status).order_by('issue_number') msg = ''
random_line_split
data_manager_view_mixin.py
from django.contrib import messages from django.views.generic.base import ContextMixin from edc_constants.constants import OPEN from ..models import DataActionItem from ..model_wrappers import DataActionItemModelWrapper from .user_details_check_view_mixin import UserDetailsCheckViewMixin class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin): data_action_item_template = 'edc_data_manager/data_manager.html' @property def data_action_item(self): """Returns a wrapped saved or unsaved consent version. """ model_obj = DataActionItem(subject_identifier=self.subject_identifier) return DataActionItemModelWrapper(model_obj=model_obj) def data_action_items(self): """Return a list of action items. """ wrapped_data_action_items = [] status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') for data_action_item in data_action_items: wrapped_data_action_items.append(data_action_item) return wrapped_data_action_items def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') msg = '' for data_action_item in data_action_items:
context.update( data_action_item_template=self.data_action_item_template, data_action_item_add_url=self.data_action_item.href, data_action_items=self.data_action_items) return context
msg = (f'Issue {data_action_item.issue_number}. Pending action' f' created by {data_action_item.user_created}. ' f'{data_action_item.subject} Assigned to ' f'{data_action_item.assigned}') messages.add_message( self.request, messages.ERROR, msg)
conditional_block
data_manager_view_mixin.py
from django.contrib import messages from django.views.generic.base import ContextMixin from edc_constants.constants import OPEN from ..models import DataActionItem from ..model_wrappers import DataActionItemModelWrapper from .user_details_check_view_mixin import UserDetailsCheckViewMixin class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin): data_action_item_template = 'edc_data_manager/data_manager.html' @property def data_action_item(self): """Returns a wrapped saved or unsaved consent version. """ model_obj = DataActionItem(subject_identifier=self.subject_identifier) return DataActionItemModelWrapper(model_obj=model_obj) def data_action_items(self): """Return a list of action items. """ wrapped_data_action_items = [] status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') for data_action_item in data_action_items: wrapped_data_action_items.append(data_action_item) return wrapped_data_action_items def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs) status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') msg = '' for data_action_item in data_action_items: msg = (f'Issue {data_action_item.issue_number}. Pending action' f' created by {data_action_item.user_created}. ' f'{data_action_item.subject} Assigned to ' f'{data_action_item.assigned}') messages.add_message( self.request, messages.ERROR, msg) context.update( data_action_item_template=self.data_action_item_template, data_action_item_add_url=self.data_action_item.href, data_action_items=self.data_action_items) return context
identifier_body
parse.js
var assert = require('assert'); var cookie = require('..'); suite('parse'); test('basic', function() { assert.deepEqual({ foo: 'bar' }, cookie.parse('foo=bar')); assert.deepEqual({ foo: '123' }, cookie.parse('foo=123')); }); test('ignore spaces', function() { assert.deepEqual({ FOO: 'bar', baz: 'raz' }, cookie.parse('FOO = bar; baz = raz')); }); test('escaping', function() { assert.deepEqual({ foo: 'bar=123456789&name=Magic+Mouse' }, cookie.parse('foo="bar=123456789&name=Magic+Mouse"')); assert.deepEqual({ email: ' ",;/' }, cookie.parse('email=%20%22%2c%3b%2f')); }); test('ignore escaping error and return original value', function() {
assert.deepEqual({ foo: '%1', bar: 'bar' }, cookie.parse('foo=%1;bar=bar')); });
random_line_split
event-target-legacy.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ export function eventTargetLegacyPatch(_global: any, api: _ZonePrivate) { const {eventNames, globalSources, zoneSymbolEventNames, TRUE_STR, FALSE_STR, ZONE_SYMBOL_PREFIX} = api.getGlobalObjects() !; const WTF_ISSUE_555 = 'Anchor,Area,Audio,BR,Base,BaseFont,Body,Button,Canvas,Content,DList,Directory,Div,Embed,FieldSet,Font,Form,Frame,FrameSet,HR,Head,Heading,Html,IFrame,Image,Input,Keygen,LI,Label,Legend,Link,Map,Marquee,Media,Menu,Meta,Meter,Mod,OList,Object,OptGroup,Option,Output,Paragraph,Pre,Progress,Quote,Script,Select,Source,Span,Style,TableCaption,TableCell,TableCol,Table,TableRow,TableSection,TextArea,Title,Track,UList,Unknown,Video'; const NO_EVENT_TARGET = 'ApplicationCache,EventSource,FileReader,InputMethodContext,MediaController,MessagePort,Node,Performance,SVGElementInstance,SharedWorker,TextTrack,TextTrackCue,TextTrackList,WebKitNamedFlow,Window,Worker,WorkerGlobalScope,XMLHttpRequest,XMLHttpRequestEventTarget,XMLHttpRequestUpload,IDBRequest,IDBOpenDBRequest,IDBDatabase,IDBTransaction,IDBCursor,DBIndex,WebSocket' .split(','); const EVENT_TARGET = 'EventTarget'; let apis: any[] = []; const isWtf = _global['wtf']; const WTF_ISSUE_555_ARRAY = WTF_ISSUE_555.split(','); if (isWtf) { // Workaround for: https://github.com/google/tracing-framework/issues/555 apis = WTF_ISSUE_555_ARRAY.map((v) => 'HTML' + v + 'Element').concat(NO_EVENT_TARGET); } else if (_global[EVENT_TARGET]) { apis.push(EVENT_TARGET); } else { // Note: EventTarget is not available in all browsers, // if it's not available, we instead patch the APIs in the IDL that inherit from EventTarget apis = NO_EVENT_TARGET; } const isDisableIECheck = _global['__Zone_disable_IE_check'] || false; const isEnableCrossContextCheck = _global['__Zone_enable_cross_context_check'] || false; const ieOrEdge = api.isIEOrEdge(); const ADD_EVENT_LISTENER_SOURCE = '.addEventListener:'; const FUNCTION_WRAPPER = '[object FunctionWrapper]'; const BROWSER_TOOLS = 'function __BROWSERTOOLS_CONSOLE_SAFEFUNC() { [native code] }'; const pointerEventsMap: {[key: string]: string} = { 'MSPointerCancel': 'pointercancel', 'MSPointerDown': 'pointerdown', 'MSPointerEnter': 'pointerenter', 'MSPointerHover': 'pointerhover', 'MSPointerLeave': 'pointerleave', 'MSPointerMove': 'pointermove', 'MSPointerOut': 'pointerout', 'MSPointerOver': 'pointerover', 'MSPointerUp': 'pointerup' }; // predefine all __zone_symbol__ + eventName + true/false string for (let i = 0; i < eventNames.length; i++) { const eventName = eventNames[i]; const falseEventName = eventName + FALSE_STR; const trueEventName = eventName + TRUE_STR; const symbol = ZONE_SYMBOL_PREFIX + falseEventName; const symbolCapture = ZONE_SYMBOL_PREFIX + trueEventName; zoneSymbolEventNames[eventName] = {}; zoneSymbolEventNames[eventName][FALSE_STR] = symbol; zoneSymbolEventNames[eventName][TRUE_STR] = symbolCapture; } // predefine all task.source string for (let i = 0; i < WTF_ISSUE_555_ARRAY.length; i++) { const target: any = WTF_ISSUE_555_ARRAY[i]; const targets: any = globalSources[target] = {}; for (let j = 0; j < eventNames.length; j++) { const eventName = eventNames[j]; targets[eventName] = target + ADD_EVENT_LISTENER_SOURCE + eventName; } } const checkIEAndCrossContext = function( nativeDelegate: any, delegate: any, target: any, args: any) { if (!isDisableIECheck && ieOrEdge) { if (isEnableCrossContextCheck) { try { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } catch (error) { nativeDelegate.apply(target, args); return false; } } else { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } } else if (isEnableCrossContextCheck) { try { delegate.toString(); } catch (error) { nativeDelegate.apply(target, args); return false; } } return true; }; const apiTypes: any[] = []; for (let i = 0; i < apis.length; i++) { const type = _global[apis[i]]; apiTypes.push(type && type.prototype); } // vh is validateHandler to check event handler // is valid or not(for security check) api.patchEventTarget(_global, apiTypes, { vh: checkIEAndCrossContext, transferEventName: (eventName: string) => { const pointerEventName = pointerEventsMap[eventName]; return pointerEventName || eventName; } }); (Zone as any)[api.symbol('patchEventTarget')] = !!_global[EVENT_TARGET]; return true; } export function
(global: any, api: _ZonePrivate) { api.patchEventPrototype(global, api); }
patchEvent
identifier_name
event-target-legacy.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ export function eventTargetLegacyPatch(_global: any, api: _ZonePrivate) { const {eventNames, globalSources, zoneSymbolEventNames, TRUE_STR, FALSE_STR, ZONE_SYMBOL_PREFIX} = api.getGlobalObjects() !; const WTF_ISSUE_555 = 'Anchor,Area,Audio,BR,Base,BaseFont,Body,Button,Canvas,Content,DList,Directory,Div,Embed,FieldSet,Font,Form,Frame,FrameSet,HR,Head,Heading,Html,IFrame,Image,Input,Keygen,LI,Label,Legend,Link,Map,Marquee,Media,Menu,Meta,Meter,Mod,OList,Object,OptGroup,Option,Output,Paragraph,Pre,Progress,Quote,Script,Select,Source,Span,Style,TableCaption,TableCell,TableCol,Table,TableRow,TableSection,TextArea,Title,Track,UList,Unknown,Video'; const NO_EVENT_TARGET = 'ApplicationCache,EventSource,FileReader,InputMethodContext,MediaController,MessagePort,Node,Performance,SVGElementInstance,SharedWorker,TextTrack,TextTrackCue,TextTrackList,WebKitNamedFlow,Window,Worker,WorkerGlobalScope,XMLHttpRequest,XMLHttpRequestEventTarget,XMLHttpRequestUpload,IDBRequest,IDBOpenDBRequest,IDBDatabase,IDBTransaction,IDBCursor,DBIndex,WebSocket' .split(','); const EVENT_TARGET = 'EventTarget'; let apis: any[] = []; const isWtf = _global['wtf']; const WTF_ISSUE_555_ARRAY = WTF_ISSUE_555.split(','); if (isWtf) { // Workaround for: https://github.com/google/tracing-framework/issues/555 apis = WTF_ISSUE_555_ARRAY.map((v) => 'HTML' + v + 'Element').concat(NO_EVENT_TARGET); } else if (_global[EVENT_TARGET]) { apis.push(EVENT_TARGET); } else { // Note: EventTarget is not available in all browsers, // if it's not available, we instead patch the APIs in the IDL that inherit from EventTarget apis = NO_EVENT_TARGET; } const isDisableIECheck = _global['__Zone_disable_IE_check'] || false; const isEnableCrossContextCheck = _global['__Zone_enable_cross_context_check'] || false; const ieOrEdge = api.isIEOrEdge(); const ADD_EVENT_LISTENER_SOURCE = '.addEventListener:'; const FUNCTION_WRAPPER = '[object FunctionWrapper]'; const BROWSER_TOOLS = 'function __BROWSERTOOLS_CONSOLE_SAFEFUNC() { [native code] }'; const pointerEventsMap: {[key: string]: string} = { 'MSPointerCancel': 'pointercancel', 'MSPointerDown': 'pointerdown', 'MSPointerEnter': 'pointerenter', 'MSPointerHover': 'pointerhover', 'MSPointerLeave': 'pointerleave', 'MSPointerMove': 'pointermove', 'MSPointerOut': 'pointerout', 'MSPointerOver': 'pointerover', 'MSPointerUp': 'pointerup' }; // predefine all __zone_symbol__ + eventName + true/false string for (let i = 0; i < eventNames.length; i++) { const eventName = eventNames[i]; const falseEventName = eventName + FALSE_STR; const trueEventName = eventName + TRUE_STR; const symbol = ZONE_SYMBOL_PREFIX + falseEventName; const symbolCapture = ZONE_SYMBOL_PREFIX + trueEventName; zoneSymbolEventNames[eventName] = {}; zoneSymbolEventNames[eventName][FALSE_STR] = symbol; zoneSymbolEventNames[eventName][TRUE_STR] = symbolCapture; } // predefine all task.source string for (let i = 0; i < WTF_ISSUE_555_ARRAY.length; i++) { const target: any = WTF_ISSUE_555_ARRAY[i]; const targets: any = globalSources[target] = {}; for (let j = 0; j < eventNames.length; j++) { const eventName = eventNames[j]; targets[eventName] = target + ADD_EVENT_LISTENER_SOURCE + eventName; } } const checkIEAndCrossContext = function( nativeDelegate: any, delegate: any, target: any, args: any) { if (!isDisableIECheck && ieOrEdge) { if (isEnableCrossContextCheck) { try { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } catch (error) { nativeDelegate.apply(target, args); return false; } } else { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } } else if (isEnableCrossContextCheck) { try { delegate.toString(); } catch (error) { nativeDelegate.apply(target, args); return false; } } return true; }; const apiTypes: any[] = []; for (let i = 0; i < apis.length; i++) { const type = _global[apis[i]]; apiTypes.push(type && type.prototype); } // vh is validateHandler to check event handler // is valid or not(for security check) api.patchEventTarget(_global, apiTypes, { vh: checkIEAndCrossContext, transferEventName: (eventName: string) => { const pointerEventName = pointerEventsMap[eventName]; return pointerEventName || eventName; } }); (Zone as any)[api.symbol('patchEventTarget')] = !!_global[EVENT_TARGET]; return true; } export function patchEvent(global: any, api: _ZonePrivate)
{ api.patchEventPrototype(global, api); }
identifier_body
event-target-legacy.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ export function eventTargetLegacyPatch(_global: any, api: _ZonePrivate) { const {eventNames, globalSources, zoneSymbolEventNames, TRUE_STR, FALSE_STR, ZONE_SYMBOL_PREFIX} = api.getGlobalObjects() !; const WTF_ISSUE_555 = 'Anchor,Area,Audio,BR,Base,BaseFont,Body,Button,Canvas,Content,DList,Directory,Div,Embed,FieldSet,Font,Form,Frame,FrameSet,HR,Head,Heading,Html,IFrame,Image,Input,Keygen,LI,Label,Legend,Link,Map,Marquee,Media,Menu,Meta,Meter,Mod,OList,Object,OptGroup,Option,Output,Paragraph,Pre,Progress,Quote,Script,Select,Source,Span,Style,TableCaption,TableCell,TableCol,Table,TableRow,TableSection,TextArea,Title,Track,UList,Unknown,Video'; const NO_EVENT_TARGET = 'ApplicationCache,EventSource,FileReader,InputMethodContext,MediaController,MessagePort,Node,Performance,SVGElementInstance,SharedWorker,TextTrack,TextTrackCue,TextTrackList,WebKitNamedFlow,Window,Worker,WorkerGlobalScope,XMLHttpRequest,XMLHttpRequestEventTarget,XMLHttpRequestUpload,IDBRequest,IDBOpenDBRequest,IDBDatabase,IDBTransaction,IDBCursor,DBIndex,WebSocket' .split(','); const EVENT_TARGET = 'EventTarget'; let apis: any[] = []; const isWtf = _global['wtf']; const WTF_ISSUE_555_ARRAY = WTF_ISSUE_555.split(','); if (isWtf) { // Workaround for: https://github.com/google/tracing-framework/issues/555 apis = WTF_ISSUE_555_ARRAY.map((v) => 'HTML' + v + 'Element').concat(NO_EVENT_TARGET); } else if (_global[EVENT_TARGET]) { apis.push(EVENT_TARGET); } else { // Note: EventTarget is not available in all browsers, // if it's not available, we instead patch the APIs in the IDL that inherit from EventTarget apis = NO_EVENT_TARGET; } const isDisableIECheck = _global['__Zone_disable_IE_check'] || false; const isEnableCrossContextCheck = _global['__Zone_enable_cross_context_check'] || false;
const ieOrEdge = api.isIEOrEdge(); const ADD_EVENT_LISTENER_SOURCE = '.addEventListener:'; const FUNCTION_WRAPPER = '[object FunctionWrapper]'; const BROWSER_TOOLS = 'function __BROWSERTOOLS_CONSOLE_SAFEFUNC() { [native code] }'; const pointerEventsMap: {[key: string]: string} = { 'MSPointerCancel': 'pointercancel', 'MSPointerDown': 'pointerdown', 'MSPointerEnter': 'pointerenter', 'MSPointerHover': 'pointerhover', 'MSPointerLeave': 'pointerleave', 'MSPointerMove': 'pointermove', 'MSPointerOut': 'pointerout', 'MSPointerOver': 'pointerover', 'MSPointerUp': 'pointerup' }; // predefine all __zone_symbol__ + eventName + true/false string for (let i = 0; i < eventNames.length; i++) { const eventName = eventNames[i]; const falseEventName = eventName + FALSE_STR; const trueEventName = eventName + TRUE_STR; const symbol = ZONE_SYMBOL_PREFIX + falseEventName; const symbolCapture = ZONE_SYMBOL_PREFIX + trueEventName; zoneSymbolEventNames[eventName] = {}; zoneSymbolEventNames[eventName][FALSE_STR] = symbol; zoneSymbolEventNames[eventName][TRUE_STR] = symbolCapture; } // predefine all task.source string for (let i = 0; i < WTF_ISSUE_555_ARRAY.length; i++) { const target: any = WTF_ISSUE_555_ARRAY[i]; const targets: any = globalSources[target] = {}; for (let j = 0; j < eventNames.length; j++) { const eventName = eventNames[j]; targets[eventName] = target + ADD_EVENT_LISTENER_SOURCE + eventName; } } const checkIEAndCrossContext = function( nativeDelegate: any, delegate: any, target: any, args: any) { if (!isDisableIECheck && ieOrEdge) { if (isEnableCrossContextCheck) { try { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } catch (error) { nativeDelegate.apply(target, args); return false; } } else { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } } else if (isEnableCrossContextCheck) { try { delegate.toString(); } catch (error) { nativeDelegate.apply(target, args); return false; } } return true; }; const apiTypes: any[] = []; for (let i = 0; i < apis.length; i++) { const type = _global[apis[i]]; apiTypes.push(type && type.prototype); } // vh is validateHandler to check event handler // is valid or not(for security check) api.patchEventTarget(_global, apiTypes, { vh: checkIEAndCrossContext, transferEventName: (eventName: string) => { const pointerEventName = pointerEventsMap[eventName]; return pointerEventName || eventName; } }); (Zone as any)[api.symbol('patchEventTarget')] = !!_global[EVENT_TARGET]; return true; } export function patchEvent(global: any, api: _ZonePrivate) { api.patchEventPrototype(global, api); }
random_line_split
event-target-legacy.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ export function eventTargetLegacyPatch(_global: any, api: _ZonePrivate) { const {eventNames, globalSources, zoneSymbolEventNames, TRUE_STR, FALSE_STR, ZONE_SYMBOL_PREFIX} = api.getGlobalObjects() !; const WTF_ISSUE_555 = 'Anchor,Area,Audio,BR,Base,BaseFont,Body,Button,Canvas,Content,DList,Directory,Div,Embed,FieldSet,Font,Form,Frame,FrameSet,HR,Head,Heading,Html,IFrame,Image,Input,Keygen,LI,Label,Legend,Link,Map,Marquee,Media,Menu,Meta,Meter,Mod,OList,Object,OptGroup,Option,Output,Paragraph,Pre,Progress,Quote,Script,Select,Source,Span,Style,TableCaption,TableCell,TableCol,Table,TableRow,TableSection,TextArea,Title,Track,UList,Unknown,Video'; const NO_EVENT_TARGET = 'ApplicationCache,EventSource,FileReader,InputMethodContext,MediaController,MessagePort,Node,Performance,SVGElementInstance,SharedWorker,TextTrack,TextTrackCue,TextTrackList,WebKitNamedFlow,Window,Worker,WorkerGlobalScope,XMLHttpRequest,XMLHttpRequestEventTarget,XMLHttpRequestUpload,IDBRequest,IDBOpenDBRequest,IDBDatabase,IDBTransaction,IDBCursor,DBIndex,WebSocket' .split(','); const EVENT_TARGET = 'EventTarget'; let apis: any[] = []; const isWtf = _global['wtf']; const WTF_ISSUE_555_ARRAY = WTF_ISSUE_555.split(','); if (isWtf) { // Workaround for: https://github.com/google/tracing-framework/issues/555 apis = WTF_ISSUE_555_ARRAY.map((v) => 'HTML' + v + 'Element').concat(NO_EVENT_TARGET); } else if (_global[EVENT_TARGET]) { apis.push(EVENT_TARGET); } else
const isDisableIECheck = _global['__Zone_disable_IE_check'] || false; const isEnableCrossContextCheck = _global['__Zone_enable_cross_context_check'] || false; const ieOrEdge = api.isIEOrEdge(); const ADD_EVENT_LISTENER_SOURCE = '.addEventListener:'; const FUNCTION_WRAPPER = '[object FunctionWrapper]'; const BROWSER_TOOLS = 'function __BROWSERTOOLS_CONSOLE_SAFEFUNC() { [native code] }'; const pointerEventsMap: {[key: string]: string} = { 'MSPointerCancel': 'pointercancel', 'MSPointerDown': 'pointerdown', 'MSPointerEnter': 'pointerenter', 'MSPointerHover': 'pointerhover', 'MSPointerLeave': 'pointerleave', 'MSPointerMove': 'pointermove', 'MSPointerOut': 'pointerout', 'MSPointerOver': 'pointerover', 'MSPointerUp': 'pointerup' }; // predefine all __zone_symbol__ + eventName + true/false string for (let i = 0; i < eventNames.length; i++) { const eventName = eventNames[i]; const falseEventName = eventName + FALSE_STR; const trueEventName = eventName + TRUE_STR; const symbol = ZONE_SYMBOL_PREFIX + falseEventName; const symbolCapture = ZONE_SYMBOL_PREFIX + trueEventName; zoneSymbolEventNames[eventName] = {}; zoneSymbolEventNames[eventName][FALSE_STR] = symbol; zoneSymbolEventNames[eventName][TRUE_STR] = symbolCapture; } // predefine all task.source string for (let i = 0; i < WTF_ISSUE_555_ARRAY.length; i++) { const target: any = WTF_ISSUE_555_ARRAY[i]; const targets: any = globalSources[target] = {}; for (let j = 0; j < eventNames.length; j++) { const eventName = eventNames[j]; targets[eventName] = target + ADD_EVENT_LISTENER_SOURCE + eventName; } } const checkIEAndCrossContext = function( nativeDelegate: any, delegate: any, target: any, args: any) { if (!isDisableIECheck && ieOrEdge) { if (isEnableCrossContextCheck) { try { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } catch (error) { nativeDelegate.apply(target, args); return false; } } else { const testString = delegate.toString(); if ((testString === FUNCTION_WRAPPER || testString == BROWSER_TOOLS)) { nativeDelegate.apply(target, args); return false; } } } else if (isEnableCrossContextCheck) { try { delegate.toString(); } catch (error) { nativeDelegate.apply(target, args); return false; } } return true; }; const apiTypes: any[] = []; for (let i = 0; i < apis.length; i++) { const type = _global[apis[i]]; apiTypes.push(type && type.prototype); } // vh is validateHandler to check event handler // is valid or not(for security check) api.patchEventTarget(_global, apiTypes, { vh: checkIEAndCrossContext, transferEventName: (eventName: string) => { const pointerEventName = pointerEventsMap[eventName]; return pointerEventName || eventName; } }); (Zone as any)[api.symbol('patchEventTarget')] = !!_global[EVENT_TARGET]; return true; } export function patchEvent(global: any, api: _ZonePrivate) { api.patchEventPrototype(global, api); }
{ // Note: EventTarget is not available in all browsers, // if it's not available, we instead patch the APIs in the IDL that inherit from EventTarget apis = NO_EVENT_TARGET; }
conditional_block
inject.js
/* Copyright (C) 2016 R&D Solutions Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; var path = require('path'); var gulp = require('gulp'); var conf = require('./conf'); var $ = require('gulp-load-plugins')(); var wiredep = require('wiredep').stream; var _ = require('lodash'); var browserSync = require('browser-sync'); gulp.task('inject-reload', ['inject'], function () { browserSync.reload(); }); gulp.task('inject', ['scripts'], function () {
var injectStyles = gulp.src([ path.join(conf.paths.src, '/app/**/*.css') ], {read: false}); var injectScripts = gulp.src([ path.join(conf.paths.src, '/app/**/*.main.js'), path.join(conf.paths.src, '/app/**/*.js'), path.join('!' + conf.paths.src, '/app/dataTables/*.js'), path.join('!' + conf.paths.src, '/app/**/bootstrap.js'), path.join('!' + conf.paths.src, '/app/**/quick-sidebar.js'), path.join('!' + conf.paths.src, '/app/**/app.js'), path.join('!' + conf.paths.src, '/app/**/layout.js'), path.join('!' + conf.paths.src, '/app/**/*.spec.js'), path.join('!' + conf.paths.src, '/app/**/*.mock.js'), path.join('!' + conf.paths.src, '/app/**/jstree.min.js'), path.join('!' + conf.paths.src, '/app/**/ngJsTree.min.js'), path.join('!' + conf.paths.src, '/app/**/ng-infinite-scroll.min.js'), path.join('!' + conf.paths.src, '/app/**/bootstrap-switch.js') ]) .pipe($.angularFilesort()).on('error', conf.errorHandler('AngularFilesort')); // var injectCustomScripts = gulp.src([ // path.join(conf.paths.src, '/app/js/app.js'), // path.join(conf.paths.src, '/app/js/layout.js'), // path.join(conf.paths.src, '/app/js/quick-sidebar.js') // ]).pipe($.angularFilesort()).on('error', conf.errorHandler('AngularFilesort')); var injectOptions = { ignorePath: [conf.paths.src, path.join(conf.paths.tmp, '/serve')], addRootSlash: false }; return gulp.src(path.join(conf.paths.src, '/*.html')) .pipe($.inject(injectStyles, injectOptions)) .pipe($.inject(injectScripts, injectOptions)) .pipe(wiredep(_.extend({}, conf.wiredep))) .pipe(gulp.dest(path.join(conf.paths.tmp, '/serve'))); });
random_line_split
filter_library.py
import re from collections import defaultdict import numpy as np import warnings import speclite.filters as spec_filter from threeML.io.configuration import get_user_data_path from threeML.io.file_utils import ( if_directory_not_existing_then_make, file_existing_and_readable, ) from threeML.io.network import internet_connection_is_active from threeML.io.package_data import get_path_of_data_dir def get_speclite_filter_path(): return os.path.join(get_path_of_data_dir(), "optical_filters") def to_valid_python_name(name): new_name = name.replace("-", "_") try: int(new_name[0]) new_name = "f_%s" % new_name return new_name except (ValueError): return new_name class ObservatoryNode(object): def __init__(self, sub_dict): self._sub_dict = sub_dict def __repr__(self): return yaml.dump(self._sub_dict, default_flow_style=False) class FilterLibrary(object): def __init__(self, library_file): """ holds all the observatories/instruments/filters :param library_file: """ # get the filter file with open(library_file) as f: self._library = yaml.load(f, Loader=yaml.SafeLoader) self._instruments = [] # create attributes which are lib.observatory.instrument # and the instrument attributes are speclite FilterResponse objects with warnings.catch_warnings(): warnings.simplefilter("ignore") print("Loading optical filters") for observatory, value in self._library.items(): # create a node for the observatory this_node = ObservatoryNode(value) # attach it to the object setattr(self, observatory, this_node) # now get the instruments for instrument, value2 in value.items(): # update the instruments self._instruments.append(instrument) # create the filter response via speclite filter_path = os.path.join( get_speclite_filter_path(), observatory, instrument ) filters_to_load = [ "%s-%s.ecsv" % (filter_path, filter) for filter in value2 ] this_filter = spec_filter.load_filters(*filters_to_load) # attach the filters to the observatory setattr(this_node, instrument, this_filter) self._instruments.sort() @property def instruments(self): return self._instruments def __repr__(self): return yaml.dump(self._library, default_flow_style=False) def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False): """ download an SVO filter file and then add it to the user library :param observatory: :param instrument: :param ffilter: :return: """ # make a directory for this observatory and instrument filter_path = os.path.join( get_speclite_filter_path(), to_valid_python_name(observatory) ) if_directory_not_existing_then_make(filter_path) # grab the filter file from SVO # reconvert 2MASS so we can grab it if observatory == "TwoMASS": observatory = "2MASS" if ( not file_existing_and_readable( os.path.join( filter_path, "%s-%s.ecsv" % (to_valid_python_name(instrument), to_valid_python_name(ffilter)), ) ) or update ): url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB" % (observatory, instrument, ffilter) ) # now parse it data = votable.parse_single_table(url_response).to_table() # save the waveunit waveunit = data["Wavelength"].unit # the filter files are masked arrays, which do not go to zero on # the boundaries. This confuses speclite and will throw an error. # so we add a zero on the boundaries if data["Transmission"][0] != 0.0: w1 = data["Wavelength"][0] * 0.9 data.insert_row(0, [w1, 0]) if data["Transmission"][-1] != 0.0: w2 = data["Wavelength"][-1] * 1.1 data.add_row([w2, 0]) # filter any negative values idx = data["Transmission"] < 0 data["Transmission"][idx] = 0 # build the transmission. # we will force all the wavelengths # to Angstroms because sometimes AA is misunderstood try: transmission = spec_filter.FilterResponse( wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom, response=data["Transmission"], meta=dict( group_name=to_valid_python_name(instrument), band_name=to_valid_python_name(ffilter), ), ) # save the filter transmission.save(filter_path) success = True except (ValueError): success = False print( "%s:%s:%s has an invalid wave table, SKIPPING" % (observatory, instrument, ffilter) ) return success else: return True def download_SVO_filters(filter_dict, update=False): """ download the filters sets from the SVO repository :return: """ # to group the observatory / instrument / filters search_name = re.compile("^(.*)\/(.*)\.(.*)$") # load the SVO meta XML file svo_url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?" url_response = urllib.request.urlopen(svo_url) # the normal VO parser cannot read the XML table # so we manually do it to obtain all the instrument names tree = ET.parse(url_response) observatories = [] for elem in tree.iter(tag="PARAM"): if elem.attrib["name"] == "INPUT:Facility": for child in list(elem): if child.tag == "VALUES": for child2 in list(child): val = child2.attrib["value"] if val != "": observatories.append(val) # now we are going to build a multi-layer dictionary # observatory:instrument:filter for obs in observatories: # fix 2MASS to a valid name
print("Downloading %s filters" % (obs)) for x in instruments: _, instrument, subfilter = search_name.match(x).groups() success = add_svo_filter_to_speclite(obs, instrument, subfilter, update) if success: instrument_dict[to_valid_python_name(instrument)].append( to_valid_python_name(subfilter) ) # attach this to the big dictionary filter_dict[to_valid_python_name(obs)] = dict(instrument_dict) except (IndexError): pass return filter_dict def download_grond(filter_dict): save_path = os.path.join(get_speclite_filter_path(), "ESO") if_directory_not_existing_then_make(save_path) grond_filter_url = "http://www.mpe.mpg.de/~jcg/GROND/GROND_filtercurves.txt" url_response = urllib.request.urlopen(grond_filter_url) grond_table = pd.read_table(url_response) wave = grond_table["A"].as_matrix() bands = ["g", "r", "i", "z", "H", "J", "K"] for band in bands: curve = np.array(grond_table["%sBand" % band]) curve[curve < 0] = 0 curve[0] = 0 curve[-1] = 0 grond_spec = spec_filter.FilterResponse( wavelength=wave * u.nm, response=curve, meta=dict(group_name="GROND", band_name=band), ) grond_spec.save(directory_name=save_path) filter_dict["ESO"] = {"GROND": bands} return filter_dict def build_filter_library(): if not file_existing_and_readable( os.path.join(get_speclite_filter_path(), "filter_lib.yml") ): print("Downloading optical filters. This will take a while.\n") if internet_connection_is_active(): filter_dict = {} filter_dict = download_SVO_filters(filter_dict) filter_dict = download_grond(filter_dict) # Ok, finally, we want to keep track of the SVO filters we have # so we will save this to a YAML file for future reference with open( os.path.join(get_speclite_filter_path(), "filter_lib.yml"), "w"
if obs == "2MASS": obs = "TwoMASS" url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?Facility=%s" % obs ) try: # parse the VO table v = votable.parse(url_response) instrument_dict = defaultdict(list) # get the filter names for this observatory instruments = v.get_first_table().to_table()["filterID"].tolist()
conditional_block
filter_library.py
get_speclite_filter_path(): return os.path.join(get_path_of_data_dir(), "optical_filters") def to_valid_python_name(name): new_name = name.replace("-", "_") try: int(new_name[0]) new_name = "f_%s" % new_name return new_name except (ValueError): return new_name class ObservatoryNode(object): def __init__(self, sub_dict): self._sub_dict = sub_dict def __repr__(self): return yaml.dump(self._sub_dict, default_flow_style=False) class FilterLibrary(object): def __init__(self, library_file): """ holds all the observatories/instruments/filters :param library_file: """ # get the filter file with open(library_file) as f: self._library = yaml.load(f, Loader=yaml.SafeLoader) self._instruments = [] # create attributes which are lib.observatory.instrument # and the instrument attributes are speclite FilterResponse objects with warnings.catch_warnings(): warnings.simplefilter("ignore") print("Loading optical filters") for observatory, value in self._library.items(): # create a node for the observatory this_node = ObservatoryNode(value) # attach it to the object setattr(self, observatory, this_node) # now get the instruments for instrument, value2 in value.items(): # update the instruments self._instruments.append(instrument) # create the filter response via speclite filter_path = os.path.join( get_speclite_filter_path(), observatory, instrument ) filters_to_load = [ "%s-%s.ecsv" % (filter_path, filter) for filter in value2 ] this_filter = spec_filter.load_filters(*filters_to_load) # attach the filters to the observatory setattr(this_node, instrument, this_filter) self._instruments.sort() @property def instruments(self): return self._instruments def __repr__(self): return yaml.dump(self._library, default_flow_style=False) def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False): """ download an SVO filter file and then add it to the user library :param observatory: :param instrument: :param ffilter: :return: """ # make a directory for this observatory and instrument filter_path = os.path.join( get_speclite_filter_path(), to_valid_python_name(observatory) ) if_directory_not_existing_then_make(filter_path) # grab the filter file from SVO # reconvert 2MASS so we can grab it if observatory == "TwoMASS": observatory = "2MASS" if ( not file_existing_and_readable( os.path.join( filter_path, "%s-%s.ecsv" % (to_valid_python_name(instrument), to_valid_python_name(ffilter)), ) ) or update ): url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB" % (observatory, instrument, ffilter) ) # now parse it data = votable.parse_single_table(url_response).to_table() # save the waveunit waveunit = data["Wavelength"].unit # the filter files are masked arrays, which do not go to zero on # the boundaries. This confuses speclite and will throw an error. # so we add a zero on the boundaries if data["Transmission"][0] != 0.0: w1 = data["Wavelength"][0] * 0.9 data.insert_row(0, [w1, 0]) if data["Transmission"][-1] != 0.0: w2 = data["Wavelength"][-1] * 1.1 data.add_row([w2, 0]) # filter any negative values idx = data["Transmission"] < 0 data["Transmission"][idx] = 0 # build the transmission. # we will force all the wavelengths # to Angstroms because sometimes AA is misunderstood try: transmission = spec_filter.FilterResponse( wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom, response=data["Transmission"], meta=dict( group_name=to_valid_python_name(instrument), band_name=to_valid_python_name(ffilter), ), ) # save the filter transmission.save(filter_path) success = True except (ValueError): success = False print( "%s:%s:%s has an invalid wave table, SKIPPING" % (observatory, instrument, ffilter) ) return success else: return True def download_SVO_filters(filter_dict, update=False): """ download the filters sets from the SVO repository :return: """ # to group the observatory / instrument / filters search_name = re.compile("^(.*)\/(.*)\.(.*)$") # load the SVO meta XML file svo_url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?" url_response = urllib.request.urlopen(svo_url) # the normal VO parser cannot read the XML table # so we manually do it to obtain all the instrument names tree = ET.parse(url_response) observatories = [] for elem in tree.iter(tag="PARAM"): if elem.attrib["name"] == "INPUT:Facility": for child in list(elem): if child.tag == "VALUES": for child2 in list(child): val = child2.attrib["value"] if val != "": observatories.append(val) # now we are going to build a multi-layer dictionary # observatory:instrument:filter for obs in observatories: # fix 2MASS to a valid name if obs == "2MASS": obs = "TwoMASS" url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?Facility=%s" % obs ) try: # parse the VO table v = votable.parse(url_response) instrument_dict = defaultdict(list) # get the filter names for this observatory instruments = v.get_first_table().to_table()["filterID"].tolist() print("Downloading %s filters" % (obs)) for x in instruments: _, instrument, subfilter = search_name.match(x).groups() success = add_svo_filter_to_speclite(obs, instrument, subfilter, update) if success: instrument_dict[to_valid_python_name(instrument)].append( to_valid_python_name(subfilter) ) # attach this to the big dictionary filter_dict[to_valid_python_name(obs)] = dict(instrument_dict) except (IndexError): pass return filter_dict def download_grond(filter_dict): save_path = os.path.join(get_speclite_filter_path(), "ESO") if_directory_not_existing_then_make(save_path) grond_filter_url = "http://www.mpe.mpg.de/~jcg/GROND/GROND_filtercurves.txt" url_response = urllib.request.urlopen(grond_filter_url) grond_table = pd.read_table(url_response) wave = grond_table["A"].as_matrix() bands = ["g", "r", "i", "z", "H", "J", "K"] for band in bands: curve = np.array(grond_table["%sBand" % band]) curve[curve < 0] = 0 curve[0] = 0 curve[-1] = 0 grond_spec = spec_filter.FilterResponse( wavelength=wave * u.nm, response=curve, meta=dict(group_name="GROND", band_name=band), ) grond_spec.save(directory_name=save_path) filter_dict["ESO"] = {"GROND": bands} return filter_dict def build_filter_library(): if not file_existing_and_readable( os.path.join(get_speclite_filter_path(), "filter_lib.yml") ): print("Downloading optical filters. This will take a while.\n") if internet_connection_is_active(): filter_dict = {} filter_dict = download_SVO_filters(filter_dict) filter_dict = download_grond(filter_dict) # Ok, finally, we want to keep track of the SVO filters we have # so we will save this to a YAML file for future reference with open( os.path.join(get_speclite_filter_path(), "filter_lib.yml"), "w" ) as f: yaml.safe_dump(filter_dict, f, default_flow_style=False) return True else: print( "You do not have the 3ML filter library and you do not have an active internet connection." ) print("Please connect to the internet to use the 3ML filter library.") print("pyspeclite filter library is still available.")
random_line_split
filter_library.py
re from collections import defaultdict import numpy as np import warnings import speclite.filters as spec_filter from threeML.io.configuration import get_user_data_path from threeML.io.file_utils import ( if_directory_not_existing_then_make, file_existing_and_readable, ) from threeML.io.network import internet_connection_is_active from threeML.io.package_data import get_path_of_data_dir def get_speclite_filter_path(): return os.path.join(get_path_of_data_dir(), "optical_filters") def to_valid_python_name(name): new_name = name.replace("-", "_") try: int(new_name[0]) new_name = "f_%s" % new_name return new_name except (ValueError): return new_name class
(object): def __init__(self, sub_dict): self._sub_dict = sub_dict def __repr__(self): return yaml.dump(self._sub_dict, default_flow_style=False) class FilterLibrary(object): def __init__(self, library_file): """ holds all the observatories/instruments/filters :param library_file: """ # get the filter file with open(library_file) as f: self._library = yaml.load(f, Loader=yaml.SafeLoader) self._instruments = [] # create attributes which are lib.observatory.instrument # and the instrument attributes are speclite FilterResponse objects with warnings.catch_warnings(): warnings.simplefilter("ignore") print("Loading optical filters") for observatory, value in self._library.items(): # create a node for the observatory this_node = ObservatoryNode(value) # attach it to the object setattr(self, observatory, this_node) # now get the instruments for instrument, value2 in value.items(): # update the instruments self._instruments.append(instrument) # create the filter response via speclite filter_path = os.path.join( get_speclite_filter_path(), observatory, instrument ) filters_to_load = [ "%s-%s.ecsv" % (filter_path, filter) for filter in value2 ] this_filter = spec_filter.load_filters(*filters_to_load) # attach the filters to the observatory setattr(this_node, instrument, this_filter) self._instruments.sort() @property def instruments(self): return self._instruments def __repr__(self): return yaml.dump(self._library, default_flow_style=False) def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False): """ download an SVO filter file and then add it to the user library :param observatory: :param instrument: :param ffilter: :return: """ # make a directory for this observatory and instrument filter_path = os.path.join( get_speclite_filter_path(), to_valid_python_name(observatory) ) if_directory_not_existing_then_make(filter_path) # grab the filter file from SVO # reconvert 2MASS so we can grab it if observatory == "TwoMASS": observatory = "2MASS" if ( not file_existing_and_readable( os.path.join( filter_path, "%s-%s.ecsv" % (to_valid_python_name(instrument), to_valid_python_name(ffilter)), ) ) or update ): url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB" % (observatory, instrument, ffilter) ) # now parse it data = votable.parse_single_table(url_response).to_table() # save the waveunit waveunit = data["Wavelength"].unit # the filter files are masked arrays, which do not go to zero on # the boundaries. This confuses speclite and will throw an error. # so we add a zero on the boundaries if data["Transmission"][0] != 0.0: w1 = data["Wavelength"][0] * 0.9 data.insert_row(0, [w1, 0]) if data["Transmission"][-1] != 0.0: w2 = data["Wavelength"][-1] * 1.1 data.add_row([w2, 0]) # filter any negative values idx = data["Transmission"] < 0 data["Transmission"][idx] = 0 # build the transmission. # we will force all the wavelengths # to Angstroms because sometimes AA is misunderstood try: transmission = spec_filter.FilterResponse( wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom, response=data["Transmission"], meta=dict( group_name=to_valid_python_name(instrument), band_name=to_valid_python_name(ffilter), ), ) # save the filter transmission.save(filter_path) success = True except (ValueError): success = False print( "%s:%s:%s has an invalid wave table, SKIPPING" % (observatory, instrument, ffilter) ) return success else: return True def download_SVO_filters(filter_dict, update=False): """ download the filters sets from the SVO repository :return: """ # to group the observatory / instrument / filters search_name = re.compile("^(.*)\/(.*)\.(.*)$") # load the SVO meta XML file svo_url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?" url_response = urllib.request.urlopen(svo_url) # the normal VO parser cannot read the XML table # so we manually do it to obtain all the instrument names tree = ET.parse(url_response) observatories = [] for elem in tree.iter(tag="PARAM"): if elem.attrib["name"] == "INPUT:Facility": for child in list(elem): if child.tag == "VALUES": for child2 in list(child): val = child2.attrib["value"] if val != "": observatories.append(val) # now we are going to build a multi-layer dictionary # observatory:instrument:filter for obs in observatories: # fix 2MASS to a valid name if obs == "2MASS": obs = "TwoMASS" url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?Facility=%s" % obs ) try: # parse the VO table v = votable.parse(url_response) instrument_dict = defaultdict(list) # get the filter names for this observatory instruments = v.get_first_table().to_table()["filterID"].tolist() print("Downloading %s filters" % (obs)) for x in instruments: _, instrument, subfilter = search_name.match(x).groups() success = add_svo_filter_to_speclite(obs, instrument, subfilter, update) if success: instrument_dict[to_valid_python_name(instrument)].append( to_valid_python_name(subfilter) ) # attach this to the big dictionary filter_dict[to_valid_python_name(obs)] = dict(instrument_dict) except (IndexError): pass return filter_dict def download_grond(filter_dict): save_path = os.path.join(get_speclite_filter_path(), "ESO") if_directory_not_existing_then_make(save_path) grond_filter_url = "http://www.mpe.mpg.de/~jcg/GROND/GROND_filtercurves.txt" url_response = urllib.request.urlopen(grond_filter_url) grond_table = pd.read_table(url_response) wave = grond_table["A"].as_matrix() bands = ["g", "r", "i", "z", "H", "J", "K"] for band in bands: curve = np.array(grond_table["%sBand" % band]) curve[curve < 0] = 0 curve[0] = 0 curve[-1] = 0 grond_spec = spec_filter.FilterResponse( wavelength=wave * u.nm, response=curve, meta=dict(group_name="GROND", band_name=band), ) grond_spec.save(directory_name=save_path) filter_dict["ESO"] = {"GROND": bands} return filter_dict def build_filter_library(): if not file_existing_and_readable( os.path.join(get_speclite_filter_path(), "filter_lib.yml") ): print("Downloading optical filters. This will take a while.\n") if internet_connection_is_active(): filter_dict = {} filter_dict = download_SVO_filters(filter_dict) filter_dict = download_grond(filter_dict) # Ok, finally, we want to keep track of the SVO filters we have # so we will save this to a YAML file for future reference with open( os.path.join(get_speclite_filter_path(), "filter_lib.yml"), "w
ObservatoryNode
identifier_name
filter_library.py
re from collections import defaultdict import numpy as np import warnings import speclite.filters as spec_filter from threeML.io.configuration import get_user_data_path from threeML.io.file_utils import ( if_directory_not_existing_then_make, file_existing_and_readable, ) from threeML.io.network import internet_connection_is_active from threeML.io.package_data import get_path_of_data_dir def get_speclite_filter_path(): return os.path.join(get_path_of_data_dir(), "optical_filters") def to_valid_python_name(name): new_name = name.replace("-", "_") try: int(new_name[0]) new_name = "f_%s" % new_name return new_name except (ValueError): return new_name class ObservatoryNode(object): def __init__(self, sub_dict): self._sub_dict = sub_dict def __repr__(self): return yaml.dump(self._sub_dict, default_flow_style=False) class FilterLibrary(object): def __init__(self, library_file):
print("Loading optical filters") for observatory, value in self._library.items(): # create a node for the observatory this_node = ObservatoryNode(value) # attach it to the object setattr(self, observatory, this_node) # now get the instruments for instrument, value2 in value.items(): # update the instruments self._instruments.append(instrument) # create the filter response via speclite filter_path = os.path.join( get_speclite_filter_path(), observatory, instrument ) filters_to_load = [ "%s-%s.ecsv" % (filter_path, filter) for filter in value2 ] this_filter = spec_filter.load_filters(*filters_to_load) # attach the filters to the observatory setattr(this_node, instrument, this_filter) self._instruments.sort() @property def instruments(self): return self._instruments def __repr__(self): return yaml.dump(self._library, default_flow_style=False) def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False): """ download an SVO filter file and then add it to the user library :param observatory: :param instrument: :param ffilter: :return: """ # make a directory for this observatory and instrument filter_path = os.path.join( get_speclite_filter_path(), to_valid_python_name(observatory) ) if_directory_not_existing_then_make(filter_path) # grab the filter file from SVO # reconvert 2MASS so we can grab it if observatory == "TwoMASS": observatory = "2MASS" if ( not file_existing_and_readable( os.path.join( filter_path, "%s-%s.ecsv" % (to_valid_python_name(instrument), to_valid_python_name(ffilter)), ) ) or update ): url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB" % (observatory, instrument, ffilter) ) # now parse it data = votable.parse_single_table(url_response).to_table() # save the waveunit waveunit = data["Wavelength"].unit # the filter files are masked arrays, which do not go to zero on # the boundaries. This confuses speclite and will throw an error. # so we add a zero on the boundaries if data["Transmission"][0] != 0.0: w1 = data["Wavelength"][0] * 0.9 data.insert_row(0, [w1, 0]) if data["Transmission"][-1] != 0.0: w2 = data["Wavelength"][-1] * 1.1 data.add_row([w2, 0]) # filter any negative values idx = data["Transmission"] < 0 data["Transmission"][idx] = 0 # build the transmission. # we will force all the wavelengths # to Angstroms because sometimes AA is misunderstood try: transmission = spec_filter.FilterResponse( wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom, response=data["Transmission"], meta=dict( group_name=to_valid_python_name(instrument), band_name=to_valid_python_name(ffilter), ), ) # save the filter transmission.save(filter_path) success = True except (ValueError): success = False print( "%s:%s:%s has an invalid wave table, SKIPPING" % (observatory, instrument, ffilter) ) return success else: return True def download_SVO_filters(filter_dict, update=False): """ download the filters sets from the SVO repository :return: """ # to group the observatory / instrument / filters search_name = re.compile("^(.*)\/(.*)\.(.*)$") # load the SVO meta XML file svo_url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?" url_response = urllib.request.urlopen(svo_url) # the normal VO parser cannot read the XML table # so we manually do it to obtain all the instrument names tree = ET.parse(url_response) observatories = [] for elem in tree.iter(tag="PARAM"): if elem.attrib["name"] == "INPUT:Facility": for child in list(elem): if child.tag == "VALUES": for child2 in list(child): val = child2.attrib["value"] if val != "": observatories.append(val) # now we are going to build a multi-layer dictionary # observatory:instrument:filter for obs in observatories: # fix 2MASS to a valid name if obs == "2MASS": obs = "TwoMASS" url_response = urllib.request.urlopen( "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?Facility=%s" % obs ) try: # parse the VO table v = votable.parse(url_response) instrument_dict = defaultdict(list) # get the filter names for this observatory instruments = v.get_first_table().to_table()["filterID"].tolist() print("Downloading %s filters" % (obs)) for x in instruments: _, instrument, subfilter = search_name.match(x).groups() success = add_svo_filter_to_speclite(obs, instrument, subfilter, update) if success: instrument_dict[to_valid_python_name(instrument)].append( to_valid_python_name(subfilter) ) # attach this to the big dictionary filter_dict[to_valid_python_name(obs)] = dict(instrument_dict) except (IndexError): pass return filter_dict def download_grond(filter_dict): save_path = os.path.join(get_speclite_filter_path(), "ESO") if_directory_not_existing_then_make(save_path) grond_filter_url = "http://www.mpe.mpg.de/~jcg/GROND/GROND_filtercurves.txt" url_response = urllib.request.urlopen(grond_filter_url) grond_table = pd.read_table(url_response) wave = grond_table["A"].as_matrix() bands = ["g", "r", "i", "z", "H", "J", "K"] for band in bands: curve = np.array(grond_table["%sBand" % band]) curve[curve < 0] = 0 curve[0] = 0 curve[-1] = 0 grond_spec = spec_filter.FilterResponse( wavelength=wave * u.nm, response=curve, meta=dict(group_name="GROND", band_name=band), ) grond_spec.save(directory_name=save_path) filter_dict["ESO"] = {"GROND": bands} return filter_dict def build_filter_library(): if not file_existing_and_readable( os.path.join(get_speclite_filter_path(), "filter_lib.yml") ): print("Downloading optical filters. This will take a while.\n") if internet_connection_is_active(): filter_dict = {} filter_dict = download_SVO_filters(filter_dict) filter_dict = download_grond(filter_dict) # Ok, finally, we want to keep track of the SVO filters we have # so we will save this to a YAML file for future reference with open( os.path.join(get_speclite_filter_path(), "filter_lib.yml"), "w"
""" holds all the observatories/instruments/filters :param library_file: """ # get the filter file with open(library_file) as f: self._library = yaml.load(f, Loader=yaml.SafeLoader) self._instruments = [] # create attributes which are lib.observatory.instrument # and the instrument attributes are speclite FilterResponse objects with warnings.catch_warnings(): warnings.simplefilter("ignore")
identifier_body
ShardingManager.js
const path = require('path'); const fs = require('fs'); const EventEmitter = require('events').EventEmitter; const Shard = require('./Shard'); const Collection = require('../util/Collection'); const Util = require('../util/Util'); /** * This is a utility class that can be used to help you spawn shards of your client. Each shard is completely separate * from the other. The Shard Manager takes a path to a file and spawns it under the specified amount of shards safely. * If you do not select an amount of shards, the manager will automatically decide the best amount. * @extends {EventEmitter} */ class ShardingManager extends EventEmitter { /** * @param {string} file Path to your shard script file * @param {Object} [options] Options for the sharding manager * @param {number|string} [options.totalShards='auto'] Number of shards to spawn, or "auto" * @param {boolean} [options.respawn=true] Whether shards should automatically respawn upon exiting * @param {string[]} [options.shardArgs=[]] Arguments to pass to the shard script when spawning * @param {string} [options.token] Token to use for automatic shard count and passing to shards */ constructor(file, options = {}) { super(); options = Util.mergeDefault({ totalShards: 'auto', respawn: true, shardArgs: [], token: null, }, options); /** * Path to the shard script file * @type {string} */ this.file = file; if (!file) throw new Error('File must be specified.'); if (!path.isAbsolute(file)) this.file = path.resolve(process.cwd(), file); const stats = fs.statSync(this.file); if (!stats.isFile()) throw new Error('File path does not point to a file.'); /** * Amount of shards that this manager is going to spawn * @type {number|string} */ this.totalShards = options.totalShards; if (this.totalShards !== 'auto') { if (typeof this.totalShards !== 'number' || isNaN(this.totalShards)) { throw new TypeError('Amount of shards must be a number.'); } if (this.totalShards < 1) throw new RangeError('Amount of shards must be at least 1.'); if (this.totalShards !== Math.floor(this.totalShards)) { throw new RangeError('Amount of shards must be an integer.'); } } /** * Whether shards should automatically respawn upon exiting * @type {boolean} */ this.respawn = options.respawn; /** * An array of arguments to pass to shards * @type {string[]} */ this.shardArgs = options.shardArgs; /** * Token to use for obtaining the automatic shard count, and passing to shards * @type {?string} */ this.token = options.token ? options.token.replace(/^Bot\s*/i, '') : null; /** * A collection of shards that this manager has spawned * @type {Collection<number, Shard>} */ this.shards = new Collection(); } /** * Spawns a single shard. * @param {number} id The ID of the shard to spawn. **This is usually not necessary** * @returns {Promise<Shard>} */ createShard(id = this.shards.size) { const shard = new Shard(this, id, this.shardArgs); this.shards.set(id, shard); /** * Emitted upon launching a shard. * @event ShardingManager#launch * @param {Shard} shard Shard that was launched */ this.emit('launch', shard); return Promise.resolve(shard); } /** * Spawns multiple shards. * @param {number} [amount=this.totalShards] Number of shards to spawn * @param {number} [delay=7500] How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} */ spawn(amount = this.totalShards, delay = 7500) { if (amount === 'auto') { return Util.fetchRecommendedShards(this.token).then(count => { this.totalShards = count; return this._spawn(count, delay); }); } else { if (typeof amount !== 'number' || isNaN(amount)) throw new TypeError('Amount of shards must be a number.'); if (amount < 1) throw new RangeError('Amount of shards must be at least 1.'); if (amount !== Math.floor(amount)) throw new TypeError('Amount of shards must be an integer.'); return this._spawn(amount, delay); } } /** * Actually spawns shards, unlike that poser above >:( * @param {number} amount Number of shards to spawn * @param {number} delay How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} * @private */
(amount, delay) { return new Promise(resolve => { if (this.shards.size >= amount) throw new Error(`Already spawned ${this.shards.size} shards.`); this.totalShards = amount; this.createShard(); if (this.shards.size >= this.totalShards) { resolve(this.shards); return; } if (delay <= 0) { while (this.shards.size < this.totalShards) this.createShard(); resolve(this.shards); } else { const interval = setInterval(() => { this.createShard(); if (this.shards.size >= this.totalShards) { clearInterval(interval); resolve(this.shards); } }, delay); } }); } /** * Send a message to all shards. * @param {*} message Message to be sent to the shards * @returns {Promise<Shard[]>} */ broadcast(message) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.send(message)); return Promise.all(promises); } /** * Evaluates a script on all shards, in the context of the Clients. * @param {string} script JavaScript to run on each shard * @returns {Promise<Array>} Results of the script execution */ broadcastEval(script) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.eval(script)); return Promise.all(promises); } /** * Fetches a client property value of each shard. * @param {string} prop Name of the client property to get, using periods for nesting * @returns {Promise<Array>} * @example * manager.fetchClientValues('guilds.size') * .then(results => { * console.log(`${results.reduce((prev, val) => prev + val, 0)} total guilds`); * }) * .catch(console.error); */ fetchClientValues(prop) { if (this.shards.size === 0) return Promise.reject(new Error('No shards have been spawned.')); if (this.shards.size !== this.totalShards) return Promise.reject(new Error('Still spawning shards.')); const promises = []; for (const shard of this.shards.values()) promises.push(shard.fetchClientValue(prop)); return Promise.all(promises); } } module.exports = ShardingManager;
_spawn
identifier_name
ShardingManager.js
const path = require('path'); const fs = require('fs'); const EventEmitter = require('events').EventEmitter; const Shard = require('./Shard'); const Collection = require('../util/Collection'); const Util = require('../util/Util'); /** * This is a utility class that can be used to help you spawn shards of your client. Each shard is completely separate * from the other. The Shard Manager takes a path to a file and spawns it under the specified amount of shards safely. * If you do not select an amount of shards, the manager will automatically decide the best amount. * @extends {EventEmitter} */ class ShardingManager extends EventEmitter { /** * @param {string} file Path to your shard script file * @param {Object} [options] Options for the sharding manager * @param {number|string} [options.totalShards='auto'] Number of shards to spawn, or "auto" * @param {boolean} [options.respawn=true] Whether shards should automatically respawn upon exiting * @param {string[]} [options.shardArgs=[]] Arguments to pass to the shard script when spawning * @param {string} [options.token] Token to use for automatic shard count and passing to shards */ constructor(file, options = {}) { super(); options = Util.mergeDefault({ totalShards: 'auto', respawn: true, shardArgs: [], token: null, }, options); /** * Path to the shard script file * @type {string} */ this.file = file; if (!file) throw new Error('File must be specified.'); if (!path.isAbsolute(file)) this.file = path.resolve(process.cwd(), file); const stats = fs.statSync(this.file); if (!stats.isFile()) throw new Error('File path does not point to a file.'); /** * Amount of shards that this manager is going to spawn * @type {number|string} */ this.totalShards = options.totalShards; if (this.totalShards !== 'auto') { if (typeof this.totalShards !== 'number' || isNaN(this.totalShards)) { throw new TypeError('Amount of shards must be a number.'); } if (this.totalShards < 1) throw new RangeError('Amount of shards must be at least 1.'); if (this.totalShards !== Math.floor(this.totalShards)) { throw new RangeError('Amount of shards must be an integer.'); } } /** * Whether shards should automatically respawn upon exiting * @type {boolean} */ this.respawn = options.respawn; /** * An array of arguments to pass to shards * @type {string[]} */ this.shardArgs = options.shardArgs; /** * Token to use for obtaining the automatic shard count, and passing to shards * @type {?string} */ this.token = options.token ? options.token.replace(/^Bot\s*/i, '') : null; /** * A collection of shards that this manager has spawned * @type {Collection<number, Shard>} */ this.shards = new Collection(); } /** * Spawns a single shard. * @param {number} id The ID of the shard to spawn. **This is usually not necessary** * @returns {Promise<Shard>} */ createShard(id = this.shards.size) { const shard = new Shard(this, id, this.shardArgs); this.shards.set(id, shard); /** * Emitted upon launching a shard. * @event ShardingManager#launch * @param {Shard} shard Shard that was launched */ this.emit('launch', shard); return Promise.resolve(shard); } /** * Spawns multiple shards. * @param {number} [amount=this.totalShards] Number of shards to spawn * @param {number} [delay=7500] How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} */ spawn(amount = this.totalShards, delay = 7500) { if (amount === 'auto') { return Util.fetchRecommendedShards(this.token).then(count => { this.totalShards = count; return this._spawn(count, delay); }); } else { if (typeof amount !== 'number' || isNaN(amount)) throw new TypeError('Amount of shards must be a number.'); if (amount < 1) throw new RangeError('Amount of shards must be at least 1.'); if (amount !== Math.floor(amount)) throw new TypeError('Amount of shards must be an integer.'); return this._spawn(amount, delay); } } /** * Actually spawns shards, unlike that poser above >:( * @param {number} amount Number of shards to spawn * @param {number} delay How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} * @private */ _spawn(amount, delay) { return new Promise(resolve => { if (this.shards.size >= amount) throw new Error(`Already spawned ${this.shards.size} shards.`); this.totalShards = amount; this.createShard(); if (this.shards.size >= this.totalShards) { resolve(this.shards); return; } if (delay <= 0) { while (this.shards.size < this.totalShards) this.createShard(); resolve(this.shards); } else { const interval = setInterval(() => { this.createShard(); if (this.shards.size >= this.totalShards)
}, delay); } }); } /** * Send a message to all shards. * @param {*} message Message to be sent to the shards * @returns {Promise<Shard[]>} */ broadcast(message) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.send(message)); return Promise.all(promises); } /** * Evaluates a script on all shards, in the context of the Clients. * @param {string} script JavaScript to run on each shard * @returns {Promise<Array>} Results of the script execution */ broadcastEval(script) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.eval(script)); return Promise.all(promises); } /** * Fetches a client property value of each shard. * @param {string} prop Name of the client property to get, using periods for nesting * @returns {Promise<Array>} * @example * manager.fetchClientValues('guilds.size') * .then(results => { * console.log(`${results.reduce((prev, val) => prev + val, 0)} total guilds`); * }) * .catch(console.error); */ fetchClientValues(prop) { if (this.shards.size === 0) return Promise.reject(new Error('No shards have been spawned.')); if (this.shards.size !== this.totalShards) return Promise.reject(new Error('Still spawning shards.')); const promises = []; for (const shard of this.shards.values()) promises.push(shard.fetchClientValue(prop)); return Promise.all(promises); } } module.exports = ShardingManager;
{ clearInterval(interval); resolve(this.shards); }
conditional_block
ShardingManager.js
const path = require('path'); const fs = require('fs'); const EventEmitter = require('events').EventEmitter; const Shard = require('./Shard'); const Collection = require('../util/Collection'); const Util = require('../util/Util'); /** * This is a utility class that can be used to help you spawn shards of your client. Each shard is completely separate * from the other. The Shard Manager takes a path to a file and spawns it under the specified amount of shards safely. * If you do not select an amount of shards, the manager will automatically decide the best amount. * @extends {EventEmitter} */ class ShardingManager extends EventEmitter { /** * @param {string} file Path to your shard script file * @param {Object} [options] Options for the sharding manager * @param {number|string} [options.totalShards='auto'] Number of shards to spawn, or "auto" * @param {boolean} [options.respawn=true] Whether shards should automatically respawn upon exiting * @param {string[]} [options.shardArgs=[]] Arguments to pass to the shard script when spawning * @param {string} [options.token] Token to use for automatic shard count and passing to shards */ constructor(file, options = {}) { super(); options = Util.mergeDefault({ totalShards: 'auto', respawn: true, shardArgs: [], token: null, }, options);
* @type {string} */ this.file = file; if (!file) throw new Error('File must be specified.'); if (!path.isAbsolute(file)) this.file = path.resolve(process.cwd(), file); const stats = fs.statSync(this.file); if (!stats.isFile()) throw new Error('File path does not point to a file.'); /** * Amount of shards that this manager is going to spawn * @type {number|string} */ this.totalShards = options.totalShards; if (this.totalShards !== 'auto') { if (typeof this.totalShards !== 'number' || isNaN(this.totalShards)) { throw new TypeError('Amount of shards must be a number.'); } if (this.totalShards < 1) throw new RangeError('Amount of shards must be at least 1.'); if (this.totalShards !== Math.floor(this.totalShards)) { throw new RangeError('Amount of shards must be an integer.'); } } /** * Whether shards should automatically respawn upon exiting * @type {boolean} */ this.respawn = options.respawn; /** * An array of arguments to pass to shards * @type {string[]} */ this.shardArgs = options.shardArgs; /** * Token to use for obtaining the automatic shard count, and passing to shards * @type {?string} */ this.token = options.token ? options.token.replace(/^Bot\s*/i, '') : null; /** * A collection of shards that this manager has spawned * @type {Collection<number, Shard>} */ this.shards = new Collection(); } /** * Spawns a single shard. * @param {number} id The ID of the shard to spawn. **This is usually not necessary** * @returns {Promise<Shard>} */ createShard(id = this.shards.size) { const shard = new Shard(this, id, this.shardArgs); this.shards.set(id, shard); /** * Emitted upon launching a shard. * @event ShardingManager#launch * @param {Shard} shard Shard that was launched */ this.emit('launch', shard); return Promise.resolve(shard); } /** * Spawns multiple shards. * @param {number} [amount=this.totalShards] Number of shards to spawn * @param {number} [delay=7500] How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} */ spawn(amount = this.totalShards, delay = 7500) { if (amount === 'auto') { return Util.fetchRecommendedShards(this.token).then(count => { this.totalShards = count; return this._spawn(count, delay); }); } else { if (typeof amount !== 'number' || isNaN(amount)) throw new TypeError('Amount of shards must be a number.'); if (amount < 1) throw new RangeError('Amount of shards must be at least 1.'); if (amount !== Math.floor(amount)) throw new TypeError('Amount of shards must be an integer.'); return this._spawn(amount, delay); } } /** * Actually spawns shards, unlike that poser above >:( * @param {number} amount Number of shards to spawn * @param {number} delay How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} * @private */ _spawn(amount, delay) { return new Promise(resolve => { if (this.shards.size >= amount) throw new Error(`Already spawned ${this.shards.size} shards.`); this.totalShards = amount; this.createShard(); if (this.shards.size >= this.totalShards) { resolve(this.shards); return; } if (delay <= 0) { while (this.shards.size < this.totalShards) this.createShard(); resolve(this.shards); } else { const interval = setInterval(() => { this.createShard(); if (this.shards.size >= this.totalShards) { clearInterval(interval); resolve(this.shards); } }, delay); } }); } /** * Send a message to all shards. * @param {*} message Message to be sent to the shards * @returns {Promise<Shard[]>} */ broadcast(message) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.send(message)); return Promise.all(promises); } /** * Evaluates a script on all shards, in the context of the Clients. * @param {string} script JavaScript to run on each shard * @returns {Promise<Array>} Results of the script execution */ broadcastEval(script) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.eval(script)); return Promise.all(promises); } /** * Fetches a client property value of each shard. * @param {string} prop Name of the client property to get, using periods for nesting * @returns {Promise<Array>} * @example * manager.fetchClientValues('guilds.size') * .then(results => { * console.log(`${results.reduce((prev, val) => prev + val, 0)} total guilds`); * }) * .catch(console.error); */ fetchClientValues(prop) { if (this.shards.size === 0) return Promise.reject(new Error('No shards have been spawned.')); if (this.shards.size !== this.totalShards) return Promise.reject(new Error('Still spawning shards.')); const promises = []; for (const shard of this.shards.values()) promises.push(shard.fetchClientValue(prop)); return Promise.all(promises); } } module.exports = ShardingManager;
/** * Path to the shard script file
random_line_split
ShardingManager.js
const path = require('path'); const fs = require('fs'); const EventEmitter = require('events').EventEmitter; const Shard = require('./Shard'); const Collection = require('../util/Collection'); const Util = require('../util/Util'); /** * This is a utility class that can be used to help you spawn shards of your client. Each shard is completely separate * from the other. The Shard Manager takes a path to a file and spawns it under the specified amount of shards safely. * If you do not select an amount of shards, the manager will automatically decide the best amount. * @extends {EventEmitter} */ class ShardingManager extends EventEmitter { /** * @param {string} file Path to your shard script file * @param {Object} [options] Options for the sharding manager * @param {number|string} [options.totalShards='auto'] Number of shards to spawn, or "auto" * @param {boolean} [options.respawn=true] Whether shards should automatically respawn upon exiting * @param {string[]} [options.shardArgs=[]] Arguments to pass to the shard script when spawning * @param {string} [options.token] Token to use for automatic shard count and passing to shards */ constructor(file, options = {}) { super(); options = Util.mergeDefault({ totalShards: 'auto', respawn: true, shardArgs: [], token: null, }, options); /** * Path to the shard script file * @type {string} */ this.file = file; if (!file) throw new Error('File must be specified.'); if (!path.isAbsolute(file)) this.file = path.resolve(process.cwd(), file); const stats = fs.statSync(this.file); if (!stats.isFile()) throw new Error('File path does not point to a file.'); /** * Amount of shards that this manager is going to spawn * @type {number|string} */ this.totalShards = options.totalShards; if (this.totalShards !== 'auto') { if (typeof this.totalShards !== 'number' || isNaN(this.totalShards)) { throw new TypeError('Amount of shards must be a number.'); } if (this.totalShards < 1) throw new RangeError('Amount of shards must be at least 1.'); if (this.totalShards !== Math.floor(this.totalShards)) { throw new RangeError('Amount of shards must be an integer.'); } } /** * Whether shards should automatically respawn upon exiting * @type {boolean} */ this.respawn = options.respawn; /** * An array of arguments to pass to shards * @type {string[]} */ this.shardArgs = options.shardArgs; /** * Token to use for obtaining the automatic shard count, and passing to shards * @type {?string} */ this.token = options.token ? options.token.replace(/^Bot\s*/i, '') : null; /** * A collection of shards that this manager has spawned * @type {Collection<number, Shard>} */ this.shards = new Collection(); } /** * Spawns a single shard. * @param {number} id The ID of the shard to spawn. **This is usually not necessary** * @returns {Promise<Shard>} */ createShard(id = this.shards.size) { const shard = new Shard(this, id, this.shardArgs); this.shards.set(id, shard); /** * Emitted upon launching a shard. * @event ShardingManager#launch * @param {Shard} shard Shard that was launched */ this.emit('launch', shard); return Promise.resolve(shard); } /** * Spawns multiple shards. * @param {number} [amount=this.totalShards] Number of shards to spawn * @param {number} [delay=7500] How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} */ spawn(amount = this.totalShards, delay = 7500)
/** * Actually spawns shards, unlike that poser above >:( * @param {number} amount Number of shards to spawn * @param {number} delay How long to wait in between spawning each shard (in milliseconds) * @returns {Promise<Collection<number, Shard>>} * @private */ _spawn(amount, delay) { return new Promise(resolve => { if (this.shards.size >= amount) throw new Error(`Already spawned ${this.shards.size} shards.`); this.totalShards = amount; this.createShard(); if (this.shards.size >= this.totalShards) { resolve(this.shards); return; } if (delay <= 0) { while (this.shards.size < this.totalShards) this.createShard(); resolve(this.shards); } else { const interval = setInterval(() => { this.createShard(); if (this.shards.size >= this.totalShards) { clearInterval(interval); resolve(this.shards); } }, delay); } }); } /** * Send a message to all shards. * @param {*} message Message to be sent to the shards * @returns {Promise<Shard[]>} */ broadcast(message) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.send(message)); return Promise.all(promises); } /** * Evaluates a script on all shards, in the context of the Clients. * @param {string} script JavaScript to run on each shard * @returns {Promise<Array>} Results of the script execution */ broadcastEval(script) { const promises = []; for (const shard of this.shards.values()) promises.push(shard.eval(script)); return Promise.all(promises); } /** * Fetches a client property value of each shard. * @param {string} prop Name of the client property to get, using periods for nesting * @returns {Promise<Array>} * @example * manager.fetchClientValues('guilds.size') * .then(results => { * console.log(`${results.reduce((prev, val) => prev + val, 0)} total guilds`); * }) * .catch(console.error); */ fetchClientValues(prop) { if (this.shards.size === 0) return Promise.reject(new Error('No shards have been spawned.')); if (this.shards.size !== this.totalShards) return Promise.reject(new Error('Still spawning shards.')); const promises = []; for (const shard of this.shards.values()) promises.push(shard.fetchClientValue(prop)); return Promise.all(promises); } } module.exports = ShardingManager;
{ if (amount === 'auto') { return Util.fetchRecommendedShards(this.token).then(count => { this.totalShards = count; return this._spawn(count, delay); }); } else { if (typeof amount !== 'number' || isNaN(amount)) throw new TypeError('Amount of shards must be a number.'); if (amount < 1) throw new RangeError('Amount of shards must be at least 1.'); if (amount !== Math.floor(amount)) throw new TypeError('Amount of shards must be an integer.'); return this._spawn(amount, delay); } }
identifier_body
sha1.js
/** * * Secure Hash Algorithm (SHA1) * http://www.webtoolkit.info/ * **/ export function SHA1(msg) { function rotate_left(n, s) { var t4 = (n << s) | (n >>> (32 - s)); return t4; }; function lsb_hex(val) { var str = ""; var i; var vh; var vl; for (i = 0; i <= 6; i += 2) { vh = (val >>> (i * 4 + 4)) & 0x0f; vl = (val >>> (i * 4)) & 0x0f; str += vh.toString(16) + vl.toString(16); } return str; }; function cvt_hex(val) { var str = ""; var i; var v; for (i = 7; i >= 0; i--) { v = (val >>> (i * 4)) & 0x0f; str += v.toString(16); } return str; }; function
(string) { string = string.replace(/\r\n/g, "\n"); var utftext = ""; for (var n = 0; n < string.length; n++) { var c = string.charCodeAt(n); if (c < 128) { utftext += String.fromCharCode(c); } else if ((c > 127) && (c < 2048)) { utftext += String.fromCharCode((c >> 6) | 192); utftext += String.fromCharCode((c & 63) | 128); } else { utftext += String.fromCharCode((c >> 12) | 224); utftext += String.fromCharCode(((c >> 6) & 63) | 128); utftext += String.fromCharCode((c & 63) | 128); } } return utftext; }; var blockstart; var i, j; var W = new Array(80); var H0 = 0x67452301; var H1 = 0xEFCDAB89; var H2 = 0x98BADCFE; var H3 = 0x10325476; var H4 = 0xC3D2E1F0; var A, B, C, D, E; var temp; msg = Utf8Encode(msg); var msg_len = msg.length; var word_array = new Array(); for (i = 0; i < msg_len - 3; i += 4) { j = msg.charCodeAt(i) << 24 | msg.charCodeAt(i + 1) << 16 | msg.charCodeAt(i + 2) << 8 | msg.charCodeAt(i + 3); word_array.push(j); } switch (msg_len % 4) { case 0: i = 0x080000000; break; case 1: i = msg.charCodeAt(msg_len - 1) << 24 | 0x0800000; break; case 2: i = msg.charCodeAt(msg_len - 2) << 24 | msg.charCodeAt(msg_len - 1) << 16 | 0x08000; break; case 3: i = msg.charCodeAt(msg_len - 3) << 24 | msg.charCodeAt(msg_len - 2) << 16 | msg.charCodeAt(msg_len - 1) << 8 | 0x80; break; } word_array.push(i); while ((word_array.length % 16) != 14) word_array.push(0); word_array.push(msg_len >>> 29); word_array.push((msg_len << 3) & 0x0ffffffff); for (blockstart = 0; blockstart < word_array.length; blockstart += 16) { for (i = 0; i < 16; i++) W[i] = word_array[blockstart + i]; for (i = 16; i <= 79; i++) W[i] = rotate_left(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16], 1); A = H0; B = H1; C = H2; D = H3; E = H4; for (i = 0; i <= 19; i++) { temp = (rotate_left(A, 5) + ((B & C) | (~B & D)) + E + W[i] + 0x5A827999) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 20; i <= 39; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0x6ED9EBA1) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 40; i <= 59; i++) { temp = (rotate_left(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[i] + 0x8F1BBCDC) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 60; i <= 79; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0xCA62C1D6) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } H0 = (H0 + A) & 0x0ffffffff; H1 = (H1 + B) & 0x0ffffffff; H2 = (H2 + C) & 0x0ffffffff; H3 = (H3 + D) & 0x0ffffffff; H4 = (H4 + E) & 0x0ffffffff; } var temp = cvt_hex(H0) + cvt_hex(H1) + cvt_hex(H2) + cvt_hex(H3) + cvt_hex(H4); return temp.toLowerCase(); };
Utf8Encode
identifier_name
sha1.js
/** * * Secure Hash Algorithm (SHA1) * http://www.webtoolkit.info/ * **/ export function SHA1(msg) { function rotate_left(n, s) { var t4 = (n << s) | (n >>> (32 - s)); return t4; }; function lsb_hex(val) { var str = ""; var i; var vh; var vl; for (i = 0; i <= 6; i += 2) { vh = (val >>> (i * 4 + 4)) & 0x0f; vl = (val >>> (i * 4)) & 0x0f; str += vh.toString(16) + vl.toString(16); } return str; }; function cvt_hex(val) { var str = ""; var i; var v; for (i = 7; i >= 0; i--) { v = (val >>> (i * 4)) & 0x0f; str += v.toString(16); } return str; }; function Utf8Encode(string) { string = string.replace(/\r\n/g, "\n"); var utftext = ""; for (var n = 0; n < string.length; n++) { var c = string.charCodeAt(n); if (c < 128) { utftext += String.fromCharCode(c); } else if ((c > 127) && (c < 2048)) { utftext += String.fromCharCode((c >> 6) | 192); utftext += String.fromCharCode((c & 63) | 128); } else { utftext += String.fromCharCode((c >> 12) | 224); utftext += String.fromCharCode(((c >> 6) & 63) | 128); utftext += String.fromCharCode((c & 63) | 128); } } return utftext; }; var blockstart; var i, j; var W = new Array(80); var H0 = 0x67452301; var H1 = 0xEFCDAB89; var H2 = 0x98BADCFE; var H3 = 0x10325476; var H4 = 0xC3D2E1F0; var A, B, C, D, E; var temp; msg = Utf8Encode(msg); var msg_len = msg.length; var word_array = new Array(); for (i = 0; i < msg_len - 3; i += 4) { j = msg.charCodeAt(i) << 24 | msg.charCodeAt(i + 1) << 16 | msg.charCodeAt(i + 2) << 8 | msg.charCodeAt(i + 3); word_array.push(j); } switch (msg_len % 4) { case 0: i = 0x080000000; break; case 1: i = msg.charCodeAt(msg_len - 1) << 24 | 0x0800000; break; case 2: i = msg.charCodeAt(msg_len - 2) << 24 | msg.charCodeAt(msg_len - 1) << 16 | 0x08000; break; case 3: i = msg.charCodeAt(msg_len - 3) << 24 | msg.charCodeAt(msg_len - 2) << 16 | msg.charCodeAt(msg_len - 1) << 8 | 0x80; break; } word_array.push(i); while ((word_array.length % 16) != 14) word_array.push(0); word_array.push(msg_len >>> 29); word_array.push((msg_len << 3) & 0x0ffffffff); for (blockstart = 0; blockstart < word_array.length; blockstart += 16) { for (i = 0; i < 16; i++) W[i] = word_array[blockstart + i]; for (i = 16; i <= 79; i++) W[i] = rotate_left(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16], 1); A = H0; B = H1; C = H2; D = H3; E = H4;
E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 20; i <= 39; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0x6ED9EBA1) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 40; i <= 59; i++) { temp = (rotate_left(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[i] + 0x8F1BBCDC) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 60; i <= 79; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0xCA62C1D6) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } H0 = (H0 + A) & 0x0ffffffff; H1 = (H1 + B) & 0x0ffffffff; H2 = (H2 + C) & 0x0ffffffff; H3 = (H3 + D) & 0x0ffffffff; H4 = (H4 + E) & 0x0ffffffff; } var temp = cvt_hex(H0) + cvt_hex(H1) + cvt_hex(H2) + cvt_hex(H3) + cvt_hex(H4); return temp.toLowerCase(); };
for (i = 0; i <= 19; i++) { temp = (rotate_left(A, 5) + ((B & C) | (~B & D)) + E + W[i] + 0x5A827999) & 0x0ffffffff;
random_line_split
sha1.js
/** * * Secure Hash Algorithm (SHA1) * http://www.webtoolkit.info/ * **/ export function SHA1(msg)
function cvt_hex(val) { var str = ""; var i; var v; for (i = 7; i >= 0; i--) { v = (val >>> (i * 4)) & 0x0f; str += v.toString(16); } return str; }; function Utf8Encode(string) { string = string.replace(/\r\n/g, "\n"); var utftext = ""; for (var n = 0; n < string.length; n++) { var c = string.charCodeAt(n); if (c < 128) { utftext += String.fromCharCode(c); } else if ((c > 127) && (c < 2048)) { utftext += String.fromCharCode((c >> 6) | 192); utftext += String.fromCharCode((c & 63) | 128); } else { utftext += String.fromCharCode((c >> 12) | 224); utftext += String.fromCharCode(((c >> 6) & 63) | 128); utftext += String.fromCharCode((c & 63) | 128); } } return utftext; }; var blockstart; var i, j; var W = new Array(80); var H0 = 0x67452301; var H1 = 0xEFCDAB89; var H2 = 0x98BADCFE; var H3 = 0x10325476; var H4 = 0xC3D2E1F0; var A, B, C, D, E; var temp; msg = Utf8Encode(msg); var msg_len = msg.length; var word_array = new Array(); for (i = 0; i < msg_len - 3; i += 4) { j = msg.charCodeAt(i) << 24 | msg.charCodeAt(i + 1) << 16 | msg.charCodeAt(i + 2) << 8 | msg.charCodeAt(i + 3); word_array.push(j); } switch (msg_len % 4) { case 0: i = 0x080000000; break; case 1: i = msg.charCodeAt(msg_len - 1) << 24 | 0x0800000; break; case 2: i = msg.charCodeAt(msg_len - 2) << 24 | msg.charCodeAt(msg_len - 1) << 16 | 0x08000; break; case 3: i = msg.charCodeAt(msg_len - 3) << 24 | msg.charCodeAt(msg_len - 2) << 16 | msg.charCodeAt(msg_len - 1) << 8 | 0x80; break; } word_array.push(i); while ((word_array.length % 16) != 14) word_array.push(0); word_array.push(msg_len >>> 29); word_array.push((msg_len << 3) & 0x0ffffffff); for (blockstart = 0; blockstart < word_array.length; blockstart += 16) { for (i = 0; i < 16; i++) W[i] = word_array[blockstart + i]; for (i = 16; i <= 79; i++) W[i] = rotate_left(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16], 1); A = H0; B = H1; C = H2; D = H3; E = H4; for (i = 0; i <= 19; i++) { temp = (rotate_left(A, 5) + ((B & C) | (~B & D)) + E + W[i] + 0x5A827999) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 20; i <= 39; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0x6ED9EBA1) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 40; i <= 59; i++) { temp = (rotate_left(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[i] + 0x8F1BBCDC) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } for (i = 60; i <= 79; i++) { temp = (rotate_left(A, 5) + (B ^ C ^ D) + E + W[i] + 0xCA62C1D6) & 0x0ffffffff; E = D; D = C; C = rotate_left(B, 30); B = A; A = temp; } H0 = (H0 + A) & 0x0ffffffff; H1 = (H1 + B) & 0x0ffffffff; H2 = (H2 + C) & 0x0ffffffff; H3 = (H3 + D) & 0x0ffffffff; H4 = (H4 + E) & 0x0ffffffff; } var temp = cvt_hex(H0) + cvt_hex(H1) + cvt_hex(H2) + cvt_hex(H3) + cvt_hex(H4); return temp.toLowerCase(); } ;
{ function rotate_left(n, s) { var t4 = (n << s) | (n >>> (32 - s)); return t4; }; function lsb_hex(val) { var str = ""; var i; var vh; var vl; for (i = 0; i <= 6; i += 2) { vh = (val >>> (i * 4 + 4)) & 0x0f; vl = (val >>> (i * 4)) & 0x0f; str += vh.toString(16) + vl.toString(16); } return str; };
identifier_body
gr-keyboard-shortcuts-dialog.ts
/** * @license * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import '../../shared/gr-button/gr-button'; import '../gr-key-binding-display/gr-key-binding-display'; import {sharedStyles} from '../../../styles/shared-styles'; import {fontStyles} from '../../../styles/gr-font-styles'; import {LitElement, css, html} from 'lit'; import {customElement, property} from 'lit/decorators'; import { ShortcutSection, SectionView, } from '../../../mixins/keyboard-shortcut-mixin/keyboard-shortcut-mixin'; import {getAppContext} from '../../../services/app-context'; import {ShortcutViewListener} from '../../../services/shortcuts/shortcuts-service'; declare global { interface HTMLElementTagNameMap { 'gr-keyboard-shortcuts-dialog': GrKeyboardShortcutsDialog; } } interface SectionShortcut { section: ShortcutSection; shortcuts?: SectionView; } @customElement('gr-keyboard-shortcuts-dialog') export class GrKeyboardShortcutsDialog extends LitElement { /** * Fired when the user presses the close button. * * @event close */ @property({type: Array}) _left?: SectionShortcut[]; @property({type: Array}) _right?: SectionShortcut[]; private readonly shortcutListener: ShortcutViewListener; private readonly shortcuts = getAppContext().shortcutsService; constructor() { super(); this.shortcutListener = (d?: Map<ShortcutSection, SectionView>) => this._onDirectoryUpdated(d); } static override get styles() { return [ sharedStyles, fontStyles, css` :host { display: block; max-height: 100vh; overflow-y: auto; } header { padding: var(--spacing-l); } main { display: flex; padding: 0 var(--spacing-xxl) var(--spacing-xxl); } .column { flex: 50%; } header { align-items: center; border-bottom: 1px solid var(--border-color); display: flex; justify-content: space-between; } table caption { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); text-align: left; } tr { height: 32px; } td { padding: var(--spacing-xs) 0; } td:first-child, th:first-child { padding-right: var(--spacing-m); text-align: right; width: 160px; color: var(--deemphasized-text-color); } td:second-child { min-width: 200px; } th { color: var(--deemphasized-text-color); text-align: left; } .header { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); } .modifier { font-weight: var(--font-weight-normal); } `, ]; } override render() { return html`<header> <h3 class="heading-3">Keyboard shortcuts</h3> <gr-button link="" @click=${this.handleCloseTap}>Close</gr-button> </header> <main> <div class="column"> ${this._left?.map(section => this.renderSection(section))} </div> <div class="column"> ${this._right?.map(section => this.renderSection(section))} </div> </main> <footer></footer>`; } private renderSection(section: SectionShortcut) { return html`<table> <caption> ${section.section} </caption> <thead> <tr> <th>Key</th> <th>Action</th> </tr> </thead> <tbody> ${section.shortcuts?.map( shortcut => html`<tr> <td> <gr-key-binding-display .binding=${shortcut.binding}> </gr-key-binding-display> </td> <td>${shortcut.text}</td> </tr>` )} </tbody> </table>`; } override connectedCallback() { super.connectedCallback(); this.shortcuts.addListener(this.shortcutListener); } override disconnectedCallback() { this.shortcuts.removeListener(this.shortcutListener); super.disconnectedCallback(); } private handleCloseTap(e: MouseEvent) { e.preventDefault(); e.stopPropagation(); this.dispatchEvent( new CustomEvent('close', { composed: true, bubbles: false, }) ); } _onDirectoryUpdated(directory?: Map<ShortcutSection, SectionView>) { if (!directory) { return; } const left = [] as SectionShortcut[]; const right = [] as SectionShortcut[]; if (directory.has(ShortcutSection.EVERYWHERE)) { left.push({ section: ShortcutSection.EVERYWHERE, shortcuts: directory.get(ShortcutSection.EVERYWHERE), }); } if (directory.has(ShortcutSection.NAVIGATION)) { left.push({ section: ShortcutSection.NAVIGATION, shortcuts: directory.get(ShortcutSection.NAVIGATION), }); }
shortcuts: directory.get(ShortcutSection.ACTIONS), }); } if (directory.has(ShortcutSection.REPLY_DIALOG)) { right.push({ section: ShortcutSection.REPLY_DIALOG, shortcuts: directory.get(ShortcutSection.REPLY_DIALOG), }); } if (directory.has(ShortcutSection.FILE_LIST)) { right.push({ section: ShortcutSection.FILE_LIST, shortcuts: directory.get(ShortcutSection.FILE_LIST), }); } if (directory.has(ShortcutSection.DIFFS)) { right.push({ section: ShortcutSection.DIFFS, shortcuts: directory.get(ShortcutSection.DIFFS), }); } this._right = right; this._left = left; } }
if (directory.has(ShortcutSection.ACTIONS)) { right.push({ section: ShortcutSection.ACTIONS,
random_line_split
gr-keyboard-shortcuts-dialog.ts
/** * @license * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import '../../shared/gr-button/gr-button'; import '../gr-key-binding-display/gr-key-binding-display'; import {sharedStyles} from '../../../styles/shared-styles'; import {fontStyles} from '../../../styles/gr-font-styles'; import {LitElement, css, html} from 'lit'; import {customElement, property} from 'lit/decorators'; import { ShortcutSection, SectionView, } from '../../../mixins/keyboard-shortcut-mixin/keyboard-shortcut-mixin'; import {getAppContext} from '../../../services/app-context'; import {ShortcutViewListener} from '../../../services/shortcuts/shortcuts-service'; declare global { interface HTMLElementTagNameMap { 'gr-keyboard-shortcuts-dialog': GrKeyboardShortcutsDialog; } } interface SectionShortcut { section: ShortcutSection; shortcuts?: SectionView; } @customElement('gr-keyboard-shortcuts-dialog') export class GrKeyboardShortcutsDialog extends LitElement { /** * Fired when the user presses the close button. * * @event close */ @property({type: Array}) _left?: SectionShortcut[]; @property({type: Array}) _right?: SectionShortcut[]; private readonly shortcutListener: ShortcutViewListener; private readonly shortcuts = getAppContext().shortcutsService; constructor() { super(); this.shortcutListener = (d?: Map<ShortcutSection, SectionView>) => this._onDirectoryUpdated(d); } static override get styles() { return [ sharedStyles, fontStyles, css` :host { display: block; max-height: 100vh; overflow-y: auto; } header { padding: var(--spacing-l); } main { display: flex; padding: 0 var(--spacing-xxl) var(--spacing-xxl); } .column { flex: 50%; } header { align-items: center; border-bottom: 1px solid var(--border-color); display: flex; justify-content: space-between; } table caption { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); text-align: left; } tr { height: 32px; } td { padding: var(--spacing-xs) 0; } td:first-child, th:first-child { padding-right: var(--spacing-m); text-align: right; width: 160px; color: var(--deemphasized-text-color); } td:second-child { min-width: 200px; } th { color: var(--deemphasized-text-color); text-align: left; } .header { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); } .modifier { font-weight: var(--font-weight-normal); } `, ]; } override render() { return html`<header> <h3 class="heading-3">Keyboard shortcuts</h3> <gr-button link="" @click=${this.handleCloseTap}>Close</gr-button> </header> <main> <div class="column"> ${this._left?.map(section => this.renderSection(section))} </div> <div class="column"> ${this._right?.map(section => this.renderSection(section))} </div> </main> <footer></footer>`; } private renderSection(section: SectionShortcut) { return html`<table> <caption> ${section.section} </caption> <thead> <tr> <th>Key</th> <th>Action</th> </tr> </thead> <tbody> ${section.shortcuts?.map( shortcut => html`<tr> <td> <gr-key-binding-display .binding=${shortcut.binding}> </gr-key-binding-display> </td> <td>${shortcut.text}</td> </tr>` )} </tbody> </table>`; } override connectedCallback() { super.connectedCallback(); this.shortcuts.addListener(this.shortcutListener); } override disconnectedCallback() { this.shortcuts.removeListener(this.shortcutListener); super.disconnectedCallback(); } private handleCloseTap(e: MouseEvent) { e.preventDefault(); e.stopPropagation(); this.dispatchEvent( new CustomEvent('close', { composed: true, bubbles: false, }) ); } _onDirectoryUpdated(directory?: Map<ShortcutSection, SectionView>) { if (!directory) { return; } const left = [] as SectionShortcut[]; const right = [] as SectionShortcut[]; if (directory.has(ShortcutSection.EVERYWHERE)) { left.push({ section: ShortcutSection.EVERYWHERE, shortcuts: directory.get(ShortcutSection.EVERYWHERE), }); } if (directory.has(ShortcutSection.NAVIGATION)) { left.push({ section: ShortcutSection.NAVIGATION, shortcuts: directory.get(ShortcutSection.NAVIGATION), }); } if (directory.has(ShortcutSection.ACTIONS)) { right.push({ section: ShortcutSection.ACTIONS, shortcuts: directory.get(ShortcutSection.ACTIONS), }); } if (directory.has(ShortcutSection.REPLY_DIALOG)) { right.push({ section: ShortcutSection.REPLY_DIALOG, shortcuts: directory.get(ShortcutSection.REPLY_DIALOG), }); } if (directory.has(ShortcutSection.FILE_LIST)) { right.push({ section: ShortcutSection.FILE_LIST, shortcuts: directory.get(ShortcutSection.FILE_LIST), }); } if (directory.has(ShortcutSection.DIFFS))
this._right = right; this._left = left; } }
{ right.push({ section: ShortcutSection.DIFFS, shortcuts: directory.get(ShortcutSection.DIFFS), }); }
conditional_block
gr-keyboard-shortcuts-dialog.ts
/** * @license * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import '../../shared/gr-button/gr-button'; import '../gr-key-binding-display/gr-key-binding-display'; import {sharedStyles} from '../../../styles/shared-styles'; import {fontStyles} from '../../../styles/gr-font-styles'; import {LitElement, css, html} from 'lit'; import {customElement, property} from 'lit/decorators'; import { ShortcutSection, SectionView, } from '../../../mixins/keyboard-shortcut-mixin/keyboard-shortcut-mixin'; import {getAppContext} from '../../../services/app-context'; import {ShortcutViewListener} from '../../../services/shortcuts/shortcuts-service'; declare global { interface HTMLElementTagNameMap { 'gr-keyboard-shortcuts-dialog': GrKeyboardShortcutsDialog; } } interface SectionShortcut { section: ShortcutSection; shortcuts?: SectionView; } @customElement('gr-keyboard-shortcuts-dialog') export class GrKeyboardShortcutsDialog extends LitElement { /** * Fired when the user presses the close button. * * @event close */ @property({type: Array}) _left?: SectionShortcut[]; @property({type: Array}) _right?: SectionShortcut[]; private readonly shortcutListener: ShortcutViewListener; private readonly shortcuts = getAppContext().shortcutsService; constructor() { super(); this.shortcutListener = (d?: Map<ShortcutSection, SectionView>) => this._onDirectoryUpdated(d); } static override get styles() { return [ sharedStyles, fontStyles, css` :host { display: block; max-height: 100vh; overflow-y: auto; } header { padding: var(--spacing-l); } main { display: flex; padding: 0 var(--spacing-xxl) var(--spacing-xxl); } .column { flex: 50%; } header { align-items: center; border-bottom: 1px solid var(--border-color); display: flex; justify-content: space-between; } table caption { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); text-align: left; } tr { height: 32px; } td { padding: var(--spacing-xs) 0; } td:first-child, th:first-child { padding-right: var(--spacing-m); text-align: right; width: 160px; color: var(--deemphasized-text-color); } td:second-child { min-width: 200px; } th { color: var(--deemphasized-text-color); text-align: left; } .header { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); } .modifier { font-weight: var(--font-weight-normal); } `, ]; } override render() { return html`<header> <h3 class="heading-3">Keyboard shortcuts</h3> <gr-button link="" @click=${this.handleCloseTap}>Close</gr-button> </header> <main> <div class="column"> ${this._left?.map(section => this.renderSection(section))} </div> <div class="column"> ${this._right?.map(section => this.renderSection(section))} </div> </main> <footer></footer>`; } private renderSection(section: SectionShortcut) { return html`<table> <caption> ${section.section} </caption> <thead> <tr> <th>Key</th> <th>Action</th> </tr> </thead> <tbody> ${section.shortcuts?.map( shortcut => html`<tr> <td> <gr-key-binding-display .binding=${shortcut.binding}> </gr-key-binding-display> </td> <td>${shortcut.text}</td> </tr>` )} </tbody> </table>`; } override
() { super.connectedCallback(); this.shortcuts.addListener(this.shortcutListener); } override disconnectedCallback() { this.shortcuts.removeListener(this.shortcutListener); super.disconnectedCallback(); } private handleCloseTap(e: MouseEvent) { e.preventDefault(); e.stopPropagation(); this.dispatchEvent( new CustomEvent('close', { composed: true, bubbles: false, }) ); } _onDirectoryUpdated(directory?: Map<ShortcutSection, SectionView>) { if (!directory) { return; } const left = [] as SectionShortcut[]; const right = [] as SectionShortcut[]; if (directory.has(ShortcutSection.EVERYWHERE)) { left.push({ section: ShortcutSection.EVERYWHERE, shortcuts: directory.get(ShortcutSection.EVERYWHERE), }); } if (directory.has(ShortcutSection.NAVIGATION)) { left.push({ section: ShortcutSection.NAVIGATION, shortcuts: directory.get(ShortcutSection.NAVIGATION), }); } if (directory.has(ShortcutSection.ACTIONS)) { right.push({ section: ShortcutSection.ACTIONS, shortcuts: directory.get(ShortcutSection.ACTIONS), }); } if (directory.has(ShortcutSection.REPLY_DIALOG)) { right.push({ section: ShortcutSection.REPLY_DIALOG, shortcuts: directory.get(ShortcutSection.REPLY_DIALOG), }); } if (directory.has(ShortcutSection.FILE_LIST)) { right.push({ section: ShortcutSection.FILE_LIST, shortcuts: directory.get(ShortcutSection.FILE_LIST), }); } if (directory.has(ShortcutSection.DIFFS)) { right.push({ section: ShortcutSection.DIFFS, shortcuts: directory.get(ShortcutSection.DIFFS), }); } this._right = right; this._left = left; } }
connectedCallback
identifier_name
gr-keyboard-shortcuts-dialog.ts
/** * @license * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import '../../shared/gr-button/gr-button'; import '../gr-key-binding-display/gr-key-binding-display'; import {sharedStyles} from '../../../styles/shared-styles'; import {fontStyles} from '../../../styles/gr-font-styles'; import {LitElement, css, html} from 'lit'; import {customElement, property} from 'lit/decorators'; import { ShortcutSection, SectionView, } from '../../../mixins/keyboard-shortcut-mixin/keyboard-shortcut-mixin'; import {getAppContext} from '../../../services/app-context'; import {ShortcutViewListener} from '../../../services/shortcuts/shortcuts-service'; declare global { interface HTMLElementTagNameMap { 'gr-keyboard-shortcuts-dialog': GrKeyboardShortcutsDialog; } } interface SectionShortcut { section: ShortcutSection; shortcuts?: SectionView; } @customElement('gr-keyboard-shortcuts-dialog') export class GrKeyboardShortcutsDialog extends LitElement { /** * Fired when the user presses the close button. * * @event close */ @property({type: Array}) _left?: SectionShortcut[]; @property({type: Array}) _right?: SectionShortcut[]; private readonly shortcutListener: ShortcutViewListener; private readonly shortcuts = getAppContext().shortcutsService; constructor() { super(); this.shortcutListener = (d?: Map<ShortcutSection, SectionView>) => this._onDirectoryUpdated(d); } static override get styles() { return [ sharedStyles, fontStyles, css` :host { display: block; max-height: 100vh; overflow-y: auto; } header { padding: var(--spacing-l); } main { display: flex; padding: 0 var(--spacing-xxl) var(--spacing-xxl); } .column { flex: 50%; } header { align-items: center; border-bottom: 1px solid var(--border-color); display: flex; justify-content: space-between; } table caption { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); text-align: left; } tr { height: 32px; } td { padding: var(--spacing-xs) 0; } td:first-child, th:first-child { padding-right: var(--spacing-m); text-align: right; width: 160px; color: var(--deemphasized-text-color); } td:second-child { min-width: 200px; } th { color: var(--deemphasized-text-color); text-align: left; } .header { font-weight: var(--font-weight-bold); padding-top: var(--spacing-l); } .modifier { font-weight: var(--font-weight-normal); } `, ]; } override render() { return html`<header> <h3 class="heading-3">Keyboard shortcuts</h3> <gr-button link="" @click=${this.handleCloseTap}>Close</gr-button> </header> <main> <div class="column"> ${this._left?.map(section => this.renderSection(section))} </div> <div class="column"> ${this._right?.map(section => this.renderSection(section))} </div> </main> <footer></footer>`; } private renderSection(section: SectionShortcut) { return html`<table> <caption> ${section.section} </caption> <thead> <tr> <th>Key</th> <th>Action</th> </tr> </thead> <tbody> ${section.shortcuts?.map( shortcut => html`<tr> <td> <gr-key-binding-display .binding=${shortcut.binding}> </gr-key-binding-display> </td> <td>${shortcut.text}</td> </tr>` )} </tbody> </table>`; } override connectedCallback() { super.connectedCallback(); this.shortcuts.addListener(this.shortcutListener); } override disconnectedCallback() { this.shortcuts.removeListener(this.shortcutListener); super.disconnectedCallback(); } private handleCloseTap(e: MouseEvent)
_onDirectoryUpdated(directory?: Map<ShortcutSection, SectionView>) { if (!directory) { return; } const left = [] as SectionShortcut[]; const right = [] as SectionShortcut[]; if (directory.has(ShortcutSection.EVERYWHERE)) { left.push({ section: ShortcutSection.EVERYWHERE, shortcuts: directory.get(ShortcutSection.EVERYWHERE), }); } if (directory.has(ShortcutSection.NAVIGATION)) { left.push({ section: ShortcutSection.NAVIGATION, shortcuts: directory.get(ShortcutSection.NAVIGATION), }); } if (directory.has(ShortcutSection.ACTIONS)) { right.push({ section: ShortcutSection.ACTIONS, shortcuts: directory.get(ShortcutSection.ACTIONS), }); } if (directory.has(ShortcutSection.REPLY_DIALOG)) { right.push({ section: ShortcutSection.REPLY_DIALOG, shortcuts: directory.get(ShortcutSection.REPLY_DIALOG), }); } if (directory.has(ShortcutSection.FILE_LIST)) { right.push({ section: ShortcutSection.FILE_LIST, shortcuts: directory.get(ShortcutSection.FILE_LIST), }); } if (directory.has(ShortcutSection.DIFFS)) { right.push({ section: ShortcutSection.DIFFS, shortcuts: directory.get(ShortcutSection.DIFFS), }); } this._right = right; this._left = left; } }
{ e.preventDefault(); e.stopPropagation(); this.dispatchEvent( new CustomEvent('close', { composed: true, bubbles: false, }) ); }
identifier_body
menu.js
/* Author: mg12 Update: 2009/08/07 Author URI: http://www.neoease.com/ */ (function() { var Class = { create: function() { return function() { this.initialize.apply(this, arguments); } } } var GhostlyMenu = Class.create(); GhostlyMenu.prototype = { initialize: function(target, align, sub) { this.obj = cleanWhitespace(target); this.align = align || 'left'; this.sub = sub || -1; this.menu = this.obj.childNodes; if (this.menu.length < 2) { return; } this.title = this.menu[0]; this.body = this.menu[1]; cleanWhitespace(this.body).lastChild.getElementsByTagName('a')[0].className += ' last'; setStyle(this.body, 'visibility', 'hidden'); setStyle(this.body, 'display', 'block'); addListener(this.obj, 'mouseover', bind(this, this.activate), false); addListener(this.obj, 'mouseout', bind(this, this.deactivate), false); }, activate: function() { if(this.sub == 1) { var pos = currentOffset(this.title); var top = pos[1] - 1; var left = getWidth(this.body) - 2; if (this.align == 'right') { var left = getWidth(this.body) * (-1); } } else { var pos = cumulativeOffset(this.title); var top = pos[1] + getHeight(this.title); var left = pos[0]; if (this.align == 'right') { left += getWidth(this.title) - getWidth(this.body); } } if(!/current/.test(this.title.className)) { this.title.className += ' current'; } setStyle(this.body, 'left', left + 'px'); setStyle(this.body, 'top', top + 'px'); setStyle(this.body, 'visibility', 'visible'); }, deactivate: function(){ this.title.className = this.title.className.replace('current', '');
var tid = setInterval( function() { clearInterval(tid); if (!/current/.test(thismenu.title.className)) { setStyle(thismenu.body, 'visibility', 'hidden'); } return false; }, 400); } } $A = function(iterable) { if(!iterable) { return []; } if(iterable.toArray) { return iterable.toArray(); } else { var results = []; for(var i = 0; i < iterable.length; i++) { results.push(iterable[i]); } return results; } } bind = function() { var array = this.$A(arguments); var func = array[array.length - 1]; var method = func, args = array, object = args.shift(); return function() { return method.apply(object, args.concat(array)); } } getHeight = function(element) { return element.offsetHeight; } getWidth = function(element) { return element.offsetWidth; } setStyle = function(element, key, value) { element.style[key] = value; } cleanWhitespace = function(list) { var node = list.firstChild; while (node) { var nextNode = node.nextSibling; if(node.nodeType == 3 && !/\S/.test(node.nodeValue)) { list.removeChild(node); } node = nextNode; } return list; } currentOffset = function(element) { var valueT = element.offsetTop || 0; var valueL = element.offsetLeft || 0; return [valueL, valueT]; } cumulativeOffset = function(element) { var valueT = 0, valueL = 0; do { valueT += element.offsetTop || 0; valueL += element.offsetLeft || 0; element = element.offsetParent; } while (element); return [valueL, valueT]; } addListener = function(element, name, observer, useCapture) { if(element.addEventListener) { element.addEventListener(name, observer, useCapture); } else if(element.attachEvent) { element.attachEvent('on' + name, observer); } } function loadMenus() { var align = 'left'; for(var i = 0; (a = document.getElementsByTagName('link')[i]); i++) { if((a.getAttribute('rel') == 'stylesheet') && (a.getAttribute('href').indexOf('rtl.css') != -1)) { align = 'right'; } } var subscribe = document.getElementById('subscribe'); if (subscribe) { new GhostlyMenu(subscribe, align); } var menubar = document.getElementById('menus'); if (menubar) { var list = menubar.getElementsByTagName('ul'); for (var i = 0; i < list.length; i++) { var menu = list[i].parentNode; if(menu.parentNode === menubar) { new GhostlyMenu(menu, align); } else { new GhostlyMenu(menu, align, 1); menu.firstChild.className += ' subtitle'; } } } } if (document.addEventListener) { document.addEventListener("DOMContentLoaded", loadMenus, false); } else if (/MSIE/i.test(navigator.userAgent)) { document.write('<script id="__ie_onload_for_Julie" defer src="javascript:void(0)"></script>'); var script = document.getElementById('__ie_onload_for_Julie'); script.onreadystatechange = function() { if (this.readyState == 'complete') { loadMenus(); } } } else if (/WebKit/i.test(navigator.userAgent)) { var _timer = setInterval( function() { if (/loaded|complete/.test(document.readyState)) { clearInterval(_timer); loadMenus(); } }, 10); } else { window.onload = function(e) { loadMenus(); } } })();
var thismenu = this;
random_line_split
menu.js
/* Author: mg12 Update: 2009/08/07 Author URI: http://www.neoease.com/ */ (function() { var Class = { create: function() { return function() { this.initialize.apply(this, arguments); } } } var GhostlyMenu = Class.create(); GhostlyMenu.prototype = { initialize: function(target, align, sub) { this.obj = cleanWhitespace(target); this.align = align || 'left'; this.sub = sub || -1; this.menu = this.obj.childNodes; if (this.menu.length < 2) { return; } this.title = this.menu[0]; this.body = this.menu[1]; cleanWhitespace(this.body).lastChild.getElementsByTagName('a')[0].className += ' last'; setStyle(this.body, 'visibility', 'hidden'); setStyle(this.body, 'display', 'block'); addListener(this.obj, 'mouseover', bind(this, this.activate), false); addListener(this.obj, 'mouseout', bind(this, this.deactivate), false); }, activate: function() { if(this.sub == 1) { var pos = currentOffset(this.title); var top = pos[1] - 1; var left = getWidth(this.body) - 2; if (this.align == 'right') { var left = getWidth(this.body) * (-1); } } else { var pos = cumulativeOffset(this.title); var top = pos[1] + getHeight(this.title); var left = pos[0]; if (this.align == 'right') { left += getWidth(this.title) - getWidth(this.body); } } if(!/current/.test(this.title.className)) { this.title.className += ' current'; } setStyle(this.body, 'left', left + 'px'); setStyle(this.body, 'top', top + 'px'); setStyle(this.body, 'visibility', 'visible'); }, deactivate: function(){ this.title.className = this.title.className.replace('current', ''); var thismenu = this; var tid = setInterval( function() { clearInterval(tid); if (!/current/.test(thismenu.title.className)) { setStyle(thismenu.body, 'visibility', 'hidden'); } return false; }, 400); } } $A = function(iterable) { if(!iterable) { return []; } if(iterable.toArray) { return iterable.toArray(); } else { var results = []; for(var i = 0; i < iterable.length; i++) { results.push(iterable[i]); } return results; } } bind = function() { var array = this.$A(arguments); var func = array[array.length - 1]; var method = func, args = array, object = args.shift(); return function() { return method.apply(object, args.concat(array)); } } getHeight = function(element) { return element.offsetHeight; } getWidth = function(element) { return element.offsetWidth; } setStyle = function(element, key, value) { element.style[key] = value; } cleanWhitespace = function(list) { var node = list.firstChild; while (node) { var nextNode = node.nextSibling; if(node.nodeType == 3 && !/\S/.test(node.nodeValue)) { list.removeChild(node); } node = nextNode; } return list; } currentOffset = function(element) { var valueT = element.offsetTop || 0; var valueL = element.offsetLeft || 0; return [valueL, valueT]; } cumulativeOffset = function(element) { var valueT = 0, valueL = 0; do { valueT += element.offsetTop || 0; valueL += element.offsetLeft || 0; element = element.offsetParent; } while (element); return [valueL, valueT]; } addListener = function(element, name, observer, useCapture) { if(element.addEventListener) { element.addEventListener(name, observer, useCapture); } else if(element.attachEvent) { element.attachEvent('on' + name, observer); } } function loadMenus()
} else { new GhostlyMenu(menu, align, 1); menu.firstChild.className += ' subtitle'; } } } } if (document.addEventListener) { document.addEventListener("DOMContentLoaded", loadMenus, false); } else if (/MSIE/i.test(navigator.userAgent)) { document.write('<script id="__ie_onload_for_Julie" defer src="javascript:void(0)"></script>'); var script = document.getElementById('__ie_onload_for_Julie'); script.onreadystatechange = function() { if (this.readyState == 'complete') { loadMenus(); } } } else if (/WebKit/i.test(navigator.userAgent)) { var _timer = setInterval( function() { if (/loaded|complete/.test(document.readyState)) { clearInterval(_timer); loadMenus(); } }, 10); } else { window.onload = function(e) { loadMenus(); } } })();
{ var align = 'left'; for(var i = 0; (a = document.getElementsByTagName('link')[i]); i++) { if((a.getAttribute('rel') == 'stylesheet') && (a.getAttribute('href').indexOf('rtl.css') != -1)) { align = 'right'; } } var subscribe = document.getElementById('subscribe'); if (subscribe) { new GhostlyMenu(subscribe, align); } var menubar = document.getElementById('menus'); if (menubar) { var list = menubar.getElementsByTagName('ul'); for (var i = 0; i < list.length; i++) { var menu = list[i].parentNode; if(menu.parentNode === menubar) { new GhostlyMenu(menu, align);
identifier_body
menu.js
/* Author: mg12 Update: 2009/08/07 Author URI: http://www.neoease.com/ */ (function() { var Class = { create: function() { return function() { this.initialize.apply(this, arguments); } } } var GhostlyMenu = Class.create(); GhostlyMenu.prototype = { initialize: function(target, align, sub) { this.obj = cleanWhitespace(target); this.align = align || 'left'; this.sub = sub || -1; this.menu = this.obj.childNodes; if (this.menu.length < 2) { return; } this.title = this.menu[0]; this.body = this.menu[1]; cleanWhitespace(this.body).lastChild.getElementsByTagName('a')[0].className += ' last'; setStyle(this.body, 'visibility', 'hidden'); setStyle(this.body, 'display', 'block'); addListener(this.obj, 'mouseover', bind(this, this.activate), false); addListener(this.obj, 'mouseout', bind(this, this.deactivate), false); }, activate: function() { if(this.sub == 1) { var pos = currentOffset(this.title); var top = pos[1] - 1; var left = getWidth(this.body) - 2; if (this.align == 'right') { var left = getWidth(this.body) * (-1); } } else { var pos = cumulativeOffset(this.title); var top = pos[1] + getHeight(this.title); var left = pos[0]; if (this.align == 'right') { left += getWidth(this.title) - getWidth(this.body); } } if(!/current/.test(this.title.className)) { this.title.className += ' current'; } setStyle(this.body, 'left', left + 'px'); setStyle(this.body, 'top', top + 'px'); setStyle(this.body, 'visibility', 'visible'); }, deactivate: function(){ this.title.className = this.title.className.replace('current', ''); var thismenu = this; var tid = setInterval( function() { clearInterval(tid); if (!/current/.test(thismenu.title.className)) { setStyle(thismenu.body, 'visibility', 'hidden'); } return false; }, 400); } } $A = function(iterable) { if(!iterable) { return []; } if(iterable.toArray) { return iterable.toArray(); } else { var results = []; for(var i = 0; i < iterable.length; i++) { results.push(iterable[i]); } return results; } } bind = function() { var array = this.$A(arguments); var func = array[array.length - 1]; var method = func, args = array, object = args.shift(); return function() { return method.apply(object, args.concat(array)); } } getHeight = function(element) { return element.offsetHeight; } getWidth = function(element) { return element.offsetWidth; } setStyle = function(element, key, value) { element.style[key] = value; } cleanWhitespace = function(list) { var node = list.firstChild; while (node) { var nextNode = node.nextSibling; if(node.nodeType == 3 && !/\S/.test(node.nodeValue)) { list.removeChild(node); } node = nextNode; } return list; } currentOffset = function(element) { var valueT = element.offsetTop || 0; var valueL = element.offsetLeft || 0; return [valueL, valueT]; } cumulativeOffset = function(element) { var valueT = 0, valueL = 0; do { valueT += element.offsetTop || 0; valueL += element.offsetLeft || 0; element = element.offsetParent; } while (element); return [valueL, valueT]; } addListener = function(element, name, observer, useCapture) { if(element.addEventListener) { element.addEventListener(name, observer, useCapture); } else if(element.attachEvent) { element.attachEvent('on' + name, observer); } } function
() { var align = 'left'; for(var i = 0; (a = document.getElementsByTagName('link')[i]); i++) { if((a.getAttribute('rel') == 'stylesheet') && (a.getAttribute('href').indexOf('rtl.css') != -1)) { align = 'right'; } } var subscribe = document.getElementById('subscribe'); if (subscribe) { new GhostlyMenu(subscribe, align); } var menubar = document.getElementById('menus'); if (menubar) { var list = menubar.getElementsByTagName('ul'); for (var i = 0; i < list.length; i++) { var menu = list[i].parentNode; if(menu.parentNode === menubar) { new GhostlyMenu(menu, align); } else { new GhostlyMenu(menu, align, 1); menu.firstChild.className += ' subtitle'; } } } } if (document.addEventListener) { document.addEventListener("DOMContentLoaded", loadMenus, false); } else if (/MSIE/i.test(navigator.userAgent)) { document.write('<script id="__ie_onload_for_Julie" defer src="javascript:void(0)"></script>'); var script = document.getElementById('__ie_onload_for_Julie'); script.onreadystatechange = function() { if (this.readyState == 'complete') { loadMenus(); } } } else if (/WebKit/i.test(navigator.userAgent)) { var _timer = setInterval( function() { if (/loaded|complete/.test(document.readyState)) { clearInterval(_timer); loadMenus(); } }, 10); } else { window.onload = function(e) { loadMenus(); } } })();
loadMenus
identifier_name
meus_marcados-service.ts
import { MappingsService } from './_mappings-service'; import { VitrineVO } from './../../model/vitrineVO'; import { FirebaseService } from './../database/firebase-service'; import { Injectable } from '@angular/core'; @Injectable() export class MeusMarcadosService { private meusMarcadosRef: any; constructor(private fbService: FirebaseService, private mapSrv: MappingsService)
public getMeusMarcadosRef() { return this.meusMarcadosRef; } public getMeusMarcadosPorUsuario(uidUsuario: string) { return this.meusMarcadosRef.child(uidUsuario).orderByKey().once('value'); } public salvar(uidUsuario: string, vitrine: VitrineVO) { var newKey = this.meusMarcadosRef.child(uidUsuario).push().key this.meusMarcadosRef.child(uidUsuario).child(newKey).set(vitrine); } public excluir(uidUsuario: string, uidMinhaVitrine: string) { let self = this; var promise = new Promise(function (resolve, reject) { self.meusMarcadosRef.child(uidUsuario).child(uidMinhaVitrine).remove() .then(() => { resolve(true); }).catch(() => { reject(false); } ); }); return promise; } // public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { // return this.minhaVitrineRef.child(uidUsuario).child(uidVitrine).once('value'); // } public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { return this.meusMarcadosRef.child(uidUsuario).orderByChild('vitr_sq_id').equalTo(uidVitrine).once('value'); } }
{ this.meusMarcadosRef = this.fbService.getDataBase().ref('minhavitrine'); }
identifier_body
meus_marcados-service.ts
import { MappingsService } from './_mappings-service'; import { VitrineVO } from './../../model/vitrineVO'; import { FirebaseService } from './../database/firebase-service'; import { Injectable } from '@angular/core'; @Injectable() export class MeusMarcadosService { private meusMarcadosRef: any; constructor(private fbService: FirebaseService,
} public getMeusMarcadosRef() { return this.meusMarcadosRef; } public getMeusMarcadosPorUsuario(uidUsuario: string) { return this.meusMarcadosRef.child(uidUsuario).orderByKey().once('value'); } public salvar(uidUsuario: string, vitrine: VitrineVO) { var newKey = this.meusMarcadosRef.child(uidUsuario).push().key this.meusMarcadosRef.child(uidUsuario).child(newKey).set(vitrine); } public excluir(uidUsuario: string, uidMinhaVitrine: string) { let self = this; var promise = new Promise(function (resolve, reject) { self.meusMarcadosRef.child(uidUsuario).child(uidMinhaVitrine).remove() .then(() => { resolve(true); }).catch(() => { reject(false); } ); }); return promise; } // public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { // return this.minhaVitrineRef.child(uidUsuario).child(uidVitrine).once('value'); // } public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { return this.meusMarcadosRef.child(uidUsuario).orderByChild('vitr_sq_id').equalTo(uidVitrine).once('value'); } }
private mapSrv: MappingsService) { this.meusMarcadosRef = this.fbService.getDataBase().ref('minhavitrine');
random_line_split
meus_marcados-service.ts
import { MappingsService } from './_mappings-service'; import { VitrineVO } from './../../model/vitrineVO'; import { FirebaseService } from './../database/firebase-service'; import { Injectable } from '@angular/core'; @Injectable() export class MeusMarcadosService { private meusMarcadosRef: any; constructor(private fbService: FirebaseService, private mapSrv: MappingsService) { this.meusMarcadosRef = this.fbService.getDataBase().ref('minhavitrine'); } public
() { return this.meusMarcadosRef; } public getMeusMarcadosPorUsuario(uidUsuario: string) { return this.meusMarcadosRef.child(uidUsuario).orderByKey().once('value'); } public salvar(uidUsuario: string, vitrine: VitrineVO) { var newKey = this.meusMarcadosRef.child(uidUsuario).push().key this.meusMarcadosRef.child(uidUsuario).child(newKey).set(vitrine); } public excluir(uidUsuario: string, uidMinhaVitrine: string) { let self = this; var promise = new Promise(function (resolve, reject) { self.meusMarcadosRef.child(uidUsuario).child(uidMinhaVitrine).remove() .then(() => { resolve(true); }).catch(() => { reject(false); } ); }); return promise; } // public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { // return this.minhaVitrineRef.child(uidUsuario).child(uidVitrine).once('value'); // } public pesquisaPorUidVitrine(uidUsuario: string, uidVitrine: string) { return this.meusMarcadosRef.child(uidUsuario).orderByChild('vitr_sq_id').equalTo(uidVitrine).once('value'); } }
getMeusMarcadosRef
identifier_name
base.py
# -*- coding: utf-8 -*- import re from math import modf from datetime import datetime, timedelta ## {{{ http://code.activestate.com/recipes/65215/ (r5) EMAIL_PATTERN = re.compile('^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]' \ '+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$') def to_unicode(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.decode("UTF-8") if type(text) == str else unicode(text) def to_byte_string(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.encode("UTF-8") if type(text) == unicode else str(text) def is_valid_email(s): ''' Returns a value indicating whether the submitted string is a valid email address. @param s:str Email @return: bool ''' return (s and len(s) > 7 and EMAIL_PATTERN.match(s)) def timestamp_to_datetime(s): ''' Parses a timestamp to a datetime instance. @param: s:str Timestamp string. @return: datetime ''' f, i = modf(long(s) / float(1000)) return datetime.fromtimestamp(i) + timedelta(milliseconds=f * 1000) def datetime_to_timestamp(d): ''' Converts a datetime instance into a timestamp string. @param d:datetime Date instance @return:long ''' return long(d.strftime("%s") + "%03d" % (d.time().microsecond / 1000)) def extract_id_from_uri(s): ''' Returns the ID section of an URI. @param s:str URI @return: str ''' return [ item for item in s.split("/") if item ][-1] def
(data): ''' Gets the size in bytes of a str. @return: long ''' return len(data)
size_in_bytes
identifier_name
base.py
# -*- coding: utf-8 -*- import re from math import modf from datetime import datetime, timedelta ## {{{ http://code.activestate.com/recipes/65215/ (r5) EMAIL_PATTERN = re.compile('^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]' \ '+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$') def to_unicode(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.decode("UTF-8") if type(text) == str else unicode(text) def to_byte_string(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.encode("UTF-8") if type(text) == unicode else str(text) def is_valid_email(s): ''' Returns a value indicating whether the submitted string is a valid email address. @param s:str Email @return: bool ''' return (s and len(s) > 7 and EMAIL_PATTERN.match(s)) def timestamp_to_datetime(s): ''' Parses a timestamp to a datetime instance. @param: s:str Timestamp string. @return: datetime ''' f, i = modf(long(s) / float(1000)) return datetime.fromtimestamp(i) + timedelta(milliseconds=f * 1000) def datetime_to_timestamp(d):
def extract_id_from_uri(s): ''' Returns the ID section of an URI. @param s:str URI @return: str ''' return [ item for item in s.split("/") if item ][-1] def size_in_bytes(data): ''' Gets the size in bytes of a str. @return: long ''' return len(data)
''' Converts a datetime instance into a timestamp string. @param d:datetime Date instance @return:long ''' return long(d.strftime("%s") + "%03d" % (d.time().microsecond / 1000))
identifier_body
base.py
# -*- coding: utf-8 -*- import re from math import modf from datetime import datetime, timedelta ## {{{ http://code.activestate.com/recipes/65215/ (r5)
def to_unicode(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.decode("UTF-8") if type(text) == str else unicode(text) def to_byte_string(text): ''' Converts an input text to a unicode object. @param text:object Input text @returns:unicode ''' return text.encode("UTF-8") if type(text) == unicode else str(text) def is_valid_email(s): ''' Returns a value indicating whether the submitted string is a valid email address. @param s:str Email @return: bool ''' return (s and len(s) > 7 and EMAIL_PATTERN.match(s)) def timestamp_to_datetime(s): ''' Parses a timestamp to a datetime instance. @param: s:str Timestamp string. @return: datetime ''' f, i = modf(long(s) / float(1000)) return datetime.fromtimestamp(i) + timedelta(milliseconds=f * 1000) def datetime_to_timestamp(d): ''' Converts a datetime instance into a timestamp string. @param d:datetime Date instance @return:long ''' return long(d.strftime("%s") + "%03d" % (d.time().microsecond / 1000)) def extract_id_from_uri(s): ''' Returns the ID section of an URI. @param s:str URI @return: str ''' return [ item for item in s.split("/") if item ][-1] def size_in_bytes(data): ''' Gets the size in bytes of a str. @return: long ''' return len(data)
EMAIL_PATTERN = re.compile('^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]' \ '+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$')
random_line_split
wsgi.py
""" WSGI config for influencetx project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os import sys from django.core.wsgi import get_wsgi_application # This allows easy placement of apps within the interior # influencetx directory. app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '') sys.path.append(os.path.join(app_path, 'influencetx')) if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production': from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production") # This application object is used by any WSGI server configured to use this
#if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production': # application = Sentry(application) # Apply WSGI middleware here. #from influencetx.wsgi import influencetx #application = influencetx(application)
# file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application()
random_line_split
wsgi.py
""" WSGI config for influencetx project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os import sys from django.core.wsgi import get_wsgi_application # This allows easy placement of apps within the interior # influencetx directory. app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '') sys.path.append(os.path.join(app_path, 'influencetx')) if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application() #if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production': # application = Sentry(application) # Apply WSGI middleware here. #from influencetx.wsgi import influencetx #application = influencetx(application)
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
conditional_block
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc()
for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
{ enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![];
identifier_body
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ =>
} } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
{}
conditional_block
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn
() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
push_pop_2
identifier_name
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) },
next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
low: AtomicUsize::new(0), high: AtomicUsize::new(0),
random_line_split
commandlineparser.ts
import * as types from "./types"; import * as yargs from "yargs"; export const SECURITIES_DEFAULT = "*"; export const SOURCE_EXTENSION_DEFAULT = "csv"; export interface CommandLineParserResult { options: types.LeanDataKitConversionOptions; error: string; } class CommandLineParserResultImpl implements CommandLineParserResult { public options: types.LeanDataKitConversionOptions; public error: string; constructor(options: types.LeanDataKitConversionOptions, error: string) { this.options = options; this.error = error; } } export class CommandLineParser { public parse(args: string[]): CommandLineParserResult { let parseErrors: string = ""; function
(message: string): any { parseErrors = message; console.error(message); } let options: any = yargs.alias("output-directory", "o") .alias("securities", "s") .alias("securities-file", "f") .alias("input-directory", "i") .alias("source-extension", "e") .alias("data-provider", "p") .demand("p", "the name of the data provider that supports the input data format.") .alias("resolution", "r") .choices("r", ["daily", "hourly", "minute", "second", "tick"]) .default("r", "daily") .alias("type", "t") .choices("t", ["equity", "option", "forex"]) .default("t", "equity") .default("source-extension", SOURCE_EXTENSION_DEFAULT) .default("securities", SECURITIES_DEFAULT) .demand("input-directory") .demand("output-directory") .fail(fail) .parse(args); return new CommandLineParserResultImpl({ "outputDirectory": options["output-directory"], "inputDirectory": options["input-directory"], "sourceFileExtension": options["source-extension"], "securities": options.securities.split(","), "securitiesFile": options["securities-file"], "dataProvider": options["data-provider"], "resolution": (<any> types.Resolution)[options.resolution], "type": (<any> types.SecurityType)[options.type], }, parseErrors); } }
fail
identifier_name
commandlineparser.ts
import * as types from "./types"; import * as yargs from "yargs"; export const SECURITIES_DEFAULT = "*"; export const SOURCE_EXTENSION_DEFAULT = "csv"; export interface CommandLineParserResult { options: types.LeanDataKitConversionOptions; error: string; } class CommandLineParserResultImpl implements CommandLineParserResult { public options: types.LeanDataKitConversionOptions; public error: string; constructor(options: types.LeanDataKitConversionOptions, error: string) { this.options = options; this.error = error; } } export class CommandLineParser { public parse(args: string[]): CommandLineParserResult { let parseErrors: string = ""; function fail(message: string): any { parseErrors = message; console.error(message); }
let options: any = yargs.alias("output-directory", "o") .alias("securities", "s") .alias("securities-file", "f") .alias("input-directory", "i") .alias("source-extension", "e") .alias("data-provider", "p") .demand("p", "the name of the data provider that supports the input data format.") .alias("resolution", "r") .choices("r", ["daily", "hourly", "minute", "second", "tick"]) .default("r", "daily") .alias("type", "t") .choices("t", ["equity", "option", "forex"]) .default("t", "equity") .default("source-extension", SOURCE_EXTENSION_DEFAULT) .default("securities", SECURITIES_DEFAULT) .demand("input-directory") .demand("output-directory") .fail(fail) .parse(args); return new CommandLineParserResultImpl({ "outputDirectory": options["output-directory"], "inputDirectory": options["input-directory"], "sourceFileExtension": options["source-extension"], "securities": options.securities.split(","), "securitiesFile": options["securities-file"], "dataProvider": options["data-provider"], "resolution": (<any> types.Resolution)[options.resolution], "type": (<any> types.SecurityType)[options.type], }, parseErrors); } }
random_line_split
commandlineparser.ts
import * as types from "./types"; import * as yargs from "yargs"; export const SECURITIES_DEFAULT = "*"; export const SOURCE_EXTENSION_DEFAULT = "csv"; export interface CommandLineParserResult { options: types.LeanDataKitConversionOptions; error: string; } class CommandLineParserResultImpl implements CommandLineParserResult { public options: types.LeanDataKitConversionOptions; public error: string; constructor(options: types.LeanDataKitConversionOptions, error: string)
} export class CommandLineParser { public parse(args: string[]): CommandLineParserResult { let parseErrors: string = ""; function fail(message: string): any { parseErrors = message; console.error(message); } let options: any = yargs.alias("output-directory", "o") .alias("securities", "s") .alias("securities-file", "f") .alias("input-directory", "i") .alias("source-extension", "e") .alias("data-provider", "p") .demand("p", "the name of the data provider that supports the input data format.") .alias("resolution", "r") .choices("r", ["daily", "hourly", "minute", "second", "tick"]) .default("r", "daily") .alias("type", "t") .choices("t", ["equity", "option", "forex"]) .default("t", "equity") .default("source-extension", SOURCE_EXTENSION_DEFAULT) .default("securities", SECURITIES_DEFAULT) .demand("input-directory") .demand("output-directory") .fail(fail) .parse(args); return new CommandLineParserResultImpl({ "outputDirectory": options["output-directory"], "inputDirectory": options["input-directory"], "sourceFileExtension": options["source-extension"], "securities": options.securities.split(","), "securitiesFile": options["securities-file"], "dataProvider": options["data-provider"], "resolution": (<any> types.Resolution)[options.resolution], "type": (<any> types.SecurityType)[options.type], }, parseErrors); } }
{ this.options = options; this.error = error; }
identifier_body
test_multimeter.py
import unittest import nest from nix4nest.nest_api.models.multimeter import NestMultimeter class TestNode(unittest.TestCase): def setUp(self): nest.ResetKernel() self.neuron_id = nest.Create('iaf_neuron')[0] rec_params = {'record_from': ['V_m'], 'withtime': True} self.mm_id = nest.Create('multimeter', params=rec_params)[0] nest.Connect([self.mm_id], [self.neuron_id]) self.mm = NestMultimeter(self.mm_id, 'V_m') def
(self): nest.ResetKernel() def test_properties(self): for k in nest.GetStatus([self.mm_id])[0].keys(): assert(k in self.mm.properties) def test_data(self): assert(len(self.mm.data) == 0) nest.Simulate(50) assert(len(self.mm.data) == 0) self.mm.refresh() assert(len(self.mm.data) == 49) assert(self.neuron_id in self.mm.senders) assert((self.mm.senders == self.neuron_id).all())
tearDown
identifier_name