file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
configurable_validator_signer.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{Error, PersistentSafetyStorage};
use diem_crypto::{
ed25519::{Ed25519PrivateKey, Ed25519PublicKey, Ed25519Signature},
hash::CryptoHash,
};
use diem_global_constants::CONSENSUS_KEY;
use diem_types::{account_address::AccountAddress, validator_signer::ValidatorSigner};
use serde::Serialize;
/// A ConfigurableValidatorSigner is a ValidatorSigner wrapper that offers either
/// a ValidatorSigner instance or a ValidatorHandle instance, depending on the
/// configuration chosen. This abstracts away the complexities of handling either
/// instance, while offering the same API as a ValidatorSigner.
pub enum ConfigurableValidatorSigner {
Signer(ValidatorSigner),
Handle(ValidatorHandle),
}
impl ConfigurableValidatorSigner {
/// Returns a new ValidatorSigner instance
pub fn new_signer(author: AccountAddress, consensus_key: Ed25519PrivateKey) -> Self {
let signer = ValidatorSigner::new(author, consensus_key);
ConfigurableValidatorSigner::Signer(signer)
}
/// Returns a new ValidatorHandle instance
pub fn new_handle(author: AccountAddress, key_version: Ed25519PublicKey) -> Self {
let handle = ValidatorHandle::new(author, key_version);
ConfigurableValidatorSigner::Handle(handle)
}
/// Returns the author associated with the signer configuration.
pub fn author(&self) -> AccountAddress {
match self {
ConfigurableValidatorSigner::Signer(signer) => signer.author(),
ConfigurableValidatorSigner::Handle(handle) => handle.author(),
}
}
/// Returns the public key associated with the signer configuration.
pub fn public_key(&self) -> Ed25519PublicKey {
match self {
ConfigurableValidatorSigner::Signer(signer) => signer.public_key(),
ConfigurableValidatorSigner::Handle(handle) => handle.key_version(),
}
}
/// Signs a given message using the signer configuration.
pub fn sign<T: Serialize + CryptoHash>(
&self,
message: &T,
storage: &PersistentSafetyStorage,
) -> Result<Ed25519Signature, Error> {
match self {
ConfigurableValidatorSigner::Signer(signer) => Ok(signer.sign(message)),
ConfigurableValidatorSigner::Handle(handle) => handle.sign(message, storage),
}
}
}
/// A ValidatorHandle associates a validator with a consensus key version held in storage.
/// In contrast to a ValidatorSigner, ValidatorHandle does not hold the private
/// key directly but rather holds a reference to that private key which should be
/// accessed using the handle and the secure storage backend.
pub struct ValidatorHandle {
author: AccountAddress,
key_version: Ed25519PublicKey,
}
impl ValidatorHandle {
pub fn new(author: AccountAddress, key_version: Ed25519PublicKey) -> Self |
/// Returns the author associated with this handle.
pub fn author(&self) -> AccountAddress {
self.author
}
/// Returns the public key version associated with this handle.
pub fn key_version(&self) -> Ed25519PublicKey {
self.key_version.clone()
}
/// Signs a given message using this handle and a given secure storage backend.
pub fn sign<T: Serialize + CryptoHash>(
&self,
message: &T,
storage: &PersistentSafetyStorage,
) -> Result<Ed25519Signature, Error> {
storage.sign(CONSENSUS_KEY.into(), self.key_version(), message)
}
}
| {
ValidatorHandle {
author,
key_version,
}
} | identifier_body |
configurable_validator_signer.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{Error, PersistentSafetyStorage};
use diem_crypto::{
ed25519::{Ed25519PrivateKey, Ed25519PublicKey, Ed25519Signature},
hash::CryptoHash,
};
use diem_global_constants::CONSENSUS_KEY;
use diem_types::{account_address::AccountAddress, validator_signer::ValidatorSigner};
use serde::Serialize;
/// A ConfigurableValidatorSigner is a ValidatorSigner wrapper that offers either
/// a ValidatorSigner instance or a ValidatorHandle instance, depending on the
/// configuration chosen. This abstracts away the complexities of handling either
/// instance, while offering the same API as a ValidatorSigner.
pub enum ConfigurableValidatorSigner {
Signer(ValidatorSigner),
Handle(ValidatorHandle),
}
impl ConfigurableValidatorSigner {
/// Returns a new ValidatorSigner instance
pub fn new_signer(author: AccountAddress, consensus_key: Ed25519PrivateKey) -> Self {
let signer = ValidatorSigner::new(author, consensus_key);
ConfigurableValidatorSigner::Signer(signer)
}
/// Returns a new ValidatorHandle instance
pub fn new_handle(author: AccountAddress, key_version: Ed25519PublicKey) -> Self {
let handle = ValidatorHandle::new(author, key_version);
ConfigurableValidatorSigner::Handle(handle)
}
/// Returns the author associated with the signer configuration.
pub fn author(&self) -> AccountAddress {
match self {
ConfigurableValidatorSigner::Signer(signer) => signer.author(),
ConfigurableValidatorSigner::Handle(handle) => handle.author(),
}
}
/// Returns the public key associated with the signer configuration.
pub fn public_key(&self) -> Ed25519PublicKey {
match self {
ConfigurableValidatorSigner::Signer(signer) => signer.public_key(),
ConfigurableValidatorSigner::Handle(handle) => handle.key_version(),
}
}
/// Signs a given message using the signer configuration.
pub fn sign<T: Serialize + CryptoHash>(
&self,
message: &T,
storage: &PersistentSafetyStorage,
) -> Result<Ed25519Signature, Error> {
match self {
ConfigurableValidatorSigner::Signer(signer) => Ok(signer.sign(message)),
ConfigurableValidatorSigner::Handle(handle) => handle.sign(message, storage),
}
}
}
/// A ValidatorHandle associates a validator with a consensus key version held in storage.
/// In contrast to a ValidatorSigner, ValidatorHandle does not hold the private
/// key directly but rather holds a reference to that private key which should be
/// accessed using the handle and the secure storage backend.
pub struct ValidatorHandle {
author: AccountAddress,
key_version: Ed25519PublicKey,
}
impl ValidatorHandle {
pub fn new(author: AccountAddress, key_version: Ed25519PublicKey) -> Self {
ValidatorHandle {
author,
key_version,
} | }
/// Returns the public key version associated with this handle.
pub fn key_version(&self) -> Ed25519PublicKey {
self.key_version.clone()
}
/// Signs a given message using this handle and a given secure storage backend.
pub fn sign<T: Serialize + CryptoHash>(
&self,
message: &T,
storage: &PersistentSafetyStorage,
) -> Result<Ed25519Signature, Error> {
storage.sign(CONSENSUS_KEY.into(), self.key_version(), message)
}
} | }
/// Returns the author associated with this handle.
pub fn author(&self) -> AccountAddress {
self.author | random_line_split |
calculateTransform.js | import { Transform } from './transform.js';
/**
* Calculate the transform for a Cornerstone enabled element
*
* @param {EnabledElement} enabledElement The Cornerstone Enabled Element
* @param {Number} [scale] The viewport scale
* @return {Transform} The current transform
*/
export default function (enabledElement, scale) {
const transform = new Transform();
transform.translate(enabledElement.canvas.width / 2, enabledElement.canvas.height / 2);
// Apply the rotation before scaling for non square pixels
const angle = enabledElement.viewport.rotation;
if (angle !== 0) {
transform.rotate(angle * Math.PI / 180);
}
// Apply the scale
let widthScale = enabledElement.viewport.scale;
let heightScale = enabledElement.viewport.scale;
if (enabledElement.image.rowPixelSpacing < enabledElement.image.columnPixelSpacing) {
widthScale *= (enabledElement.image.columnPixelSpacing / enabledElement.image.rowPixelSpacing);
} else if (enabledElement.image.columnPixelSpacing < enabledElement.image.rowPixelSpacing) {
heightScale *= (enabledElement.image.rowPixelSpacing / enabledElement.image.columnPixelSpacing);
}
transform.scale(widthScale, heightScale);
// Unrotate to so we can translate unrotated
if (angle !== 0) {
transform.rotate(-angle * Math.PI / 180);
}
// Apply the pan offset
transform.translate(enabledElement.viewport.translation.x, enabledElement.viewport.translation.y);
// Rotate again so we can apply general scale
if (angle !== 0) {
transform.rotate(angle * Math.PI / 180);
}
if (scale !== undefined) {
// Apply the font scale
transform.scale(scale, scale); | }
if (enabledElement.viewport.vflip) {
transform.scale(1, -1);
}
// Translate the origin back to the corner of the image so the event handlers can draw in image coordinate system
transform.translate(-enabledElement.image.width / 2, -enabledElement.image.height / 2);
return transform;
} | }
// Apply Flip if required
if (enabledElement.viewport.hflip) {
transform.scale(-1, 1); | random_line_split |
calculateTransform.js | import { Transform } from './transform.js';
/**
* Calculate the transform for a Cornerstone enabled element
*
* @param {EnabledElement} enabledElement The Cornerstone Enabled Element
* @param {Number} [scale] The viewport scale
* @return {Transform} The current transform
*/
export default function (enabledElement, scale) {
const transform = new Transform();
transform.translate(enabledElement.canvas.width / 2, enabledElement.canvas.height / 2);
// Apply the rotation before scaling for non square pixels
const angle = enabledElement.viewport.rotation;
if (angle !== 0) {
transform.rotate(angle * Math.PI / 180);
}
// Apply the scale
let widthScale = enabledElement.viewport.scale;
let heightScale = enabledElement.viewport.scale;
if (enabledElement.image.rowPixelSpacing < enabledElement.image.columnPixelSpacing) {
widthScale *= (enabledElement.image.columnPixelSpacing / enabledElement.image.rowPixelSpacing);
} else if (enabledElement.image.columnPixelSpacing < enabledElement.image.rowPixelSpacing) {
heightScale *= (enabledElement.image.rowPixelSpacing / enabledElement.image.columnPixelSpacing);
}
transform.scale(widthScale, heightScale);
// Unrotate to so we can translate unrotated
if (angle !== 0) {
transform.rotate(-angle * Math.PI / 180);
}
// Apply the pan offset
transform.translate(enabledElement.viewport.translation.x, enabledElement.viewport.translation.y);
// Rotate again so we can apply general scale
if (angle !== 0) {
transform.rotate(angle * Math.PI / 180);
}
if (scale !== undefined) {
// Apply the font scale
transform.scale(scale, scale);
}
// Apply Flip if required
if (enabledElement.viewport.hflip) {
transform.scale(-1, 1);
}
if (enabledElement.viewport.vflip) |
// Translate the origin back to the corner of the image so the event handlers can draw in image coordinate system
transform.translate(-enabledElement.image.width / 2, -enabledElement.image.height / 2);
return transform;
}
| {
transform.scale(1, -1);
} | conditional_block |
layout.js | import * as React from "react"
import loadable from "@loadable/component"
import PropTypes from "prop-types"
import { useStaticQuery, graphql } from "gatsby"
import "./layout.css"
const Header = loadable(() => import("./header"))
const Layout = ({ children }) => {
const data = useStaticQuery(graphql`
query SiteTitleQuery {
site {
siteMetadata {
title
}
}
}
`)
return (
<>
<Header siteTitle={data.site.siteMetadata?.title || `Title`} />
<div
style={{
margin: `0 auto`,
maxWidth: 960,
padding: `0 1.0875rem 1.45rem`, | marginTop: `2rem`,
}}
>
© {new Date().getFullYear()}, Built with
{` `}
<a href="https://www.gatsbyjs.com">Gatsby</a>
</footer>
</div>
</>
)
}
Layout.propTypes = {
children: PropTypes.node.isRequired,
}
export default Layout | }}
>
<main>{children}</main>
<footer
style={{ | random_line_split |
labels.js | var inText = {top: "2.75rem", left: "0.75rem"};
var outText = {top: "0.5rem", left: 0};
function check() |
$(document).ready(function() {
// console.log("test2")
check();
$(".login-form").on("change", ".login", check);
$(".login-form").on("focus", ".login", function() {
$(this).parent().children("label").animate(outText, 250);
});
$(".login-form").on("focusout", ".login", function() {
if ($(this).val() == "") {
$(this).parent().children("label").animate(inText, 250);
}
});
});
| {
$(".login").each(function() {
if ($(this).val() != "") {
$(this).parent().children("label").animate(outText, 250);
} else {
$(this).parent().children("label").animate(inText, 250);
}
})
} | identifier_body |
labels.js | var inText = {top: "2.75rem", left: "0.75rem"};
var outText = {top: "0.5rem", left: 0};
function | () {
$(".login").each(function() {
if ($(this).val() != "") {
$(this).parent().children("label").animate(outText, 250);
} else {
$(this).parent().children("label").animate(inText, 250);
}
})
}
$(document).ready(function() {
// console.log("test2")
check();
$(".login-form").on("change", ".login", check);
$(".login-form").on("focus", ".login", function() {
$(this).parent().children("label").animate(outText, 250);
});
$(".login-form").on("focusout", ".login", function() {
if ($(this).val() == "") {
$(this).parent().children("label").animate(inText, 250);
}
});
});
| check | identifier_name |
labels.js | var inText = {top: "2.75rem", left: "0.75rem"};
var outText = {top: "0.5rem", left: 0};
function check() {
$(".login").each(function() {
if ($(this).val() != "") {
$(this).parent().children("label").animate(outText, 250);
} else {
$(this).parent().children("label").animate(inText, 250);
}
}) | // console.log("test2")
check();
$(".login-form").on("change", ".login", check);
$(".login-form").on("focus", ".login", function() {
$(this).parent().children("label").animate(outText, 250);
});
$(".login-form").on("focusout", ".login", function() {
if ($(this).val() == "") {
$(this).parent().children("label").animate(inText, 250);
}
});
}); | }
$(document).ready(function() { | random_line_split |
labels.js | var inText = {top: "2.75rem", left: "0.75rem"};
var outText = {top: "0.5rem", left: 0};
function check() {
$(".login").each(function() {
if ($(this).val() != "") {
$(this).parent().children("label").animate(outText, 250);
} else {
$(this).parent().children("label").animate(inText, 250);
}
})
}
$(document).ready(function() {
// console.log("test2")
check();
$(".login-form").on("change", ".login", check);
$(".login-form").on("focus", ".login", function() {
$(this).parent().children("label").animate(outText, 250);
});
$(".login-form").on("focusout", ".login", function() {
if ($(this).val() == "") |
});
});
| {
$(this).parent().children("label").animate(inText, 250);
} | conditional_block |
bfi.rs | use crate::core::bits::Bits;
use crate::core::instruction::{BfiParams, Instruction};
use crate::core::register::Reg;
#[allow(non_snake_case)]
pub fn decode_BFI_t1(opcode: u32) -> Instruction {
let rn: u8 = opcode.get_bits(16..20) as u8;
let rd: u8 = opcode.get_bits(8..12) as u8;
let imm3: u8 = opcode.get_bits(12..15) as u8; |
// msbit = lsbit + width -1 <=>
// width = msbit - lsbit + 1
let width = msbit - lsbit + 1;
Instruction::BFI {
params: BfiParams {
rd: Reg::from(rd),
rn: Reg::from(rn),
lsbit: lsbit as usize,
width: width as usize,
},
}
} | let imm2: u8 = opcode.get_bits(6..8) as u8;
let lsbit = u32::from((imm3 << 2) + imm2);
let msbit = opcode.get_bits(0..5); | random_line_split |
bfi.rs | use crate::core::bits::Bits;
use crate::core::instruction::{BfiParams, Instruction};
use crate::core::register::Reg;
#[allow(non_snake_case)]
pub fn decode_BFI_t1(opcode: u32) -> Instruction | },
}
}
| {
let rn: u8 = opcode.get_bits(16..20) as u8;
let rd: u8 = opcode.get_bits(8..12) as u8;
let imm3: u8 = opcode.get_bits(12..15) as u8;
let imm2: u8 = opcode.get_bits(6..8) as u8;
let lsbit = u32::from((imm3 << 2) + imm2);
let msbit = opcode.get_bits(0..5);
// msbit = lsbit + width -1 <=>
// width = msbit - lsbit + 1
let width = msbit - lsbit + 1;
Instruction::BFI {
params: BfiParams {
rd: Reg::from(rd),
rn: Reg::from(rn),
lsbit: lsbit as usize,
width: width as usize, | identifier_body |
bfi.rs | use crate::core::bits::Bits;
use crate::core::instruction::{BfiParams, Instruction};
use crate::core::register::Reg;
#[allow(non_snake_case)]
pub fn | (opcode: u32) -> Instruction {
let rn: u8 = opcode.get_bits(16..20) as u8;
let rd: u8 = opcode.get_bits(8..12) as u8;
let imm3: u8 = opcode.get_bits(12..15) as u8;
let imm2: u8 = opcode.get_bits(6..8) as u8;
let lsbit = u32::from((imm3 << 2) + imm2);
let msbit = opcode.get_bits(0..5);
// msbit = lsbit + width -1 <=>
// width = msbit - lsbit + 1
let width = msbit - lsbit + 1;
Instruction::BFI {
params: BfiParams {
rd: Reg::from(rd),
rn: Reg::from(rn),
lsbit: lsbit as usize,
width: width as usize,
},
}
}
| decode_BFI_t1 | identifier_name |
helpers.rs |
use errors::*;
use crypto::digest::Digest;
use crypto::blake2b::Blake2b;
/// Helper to calculate a discovery key from a public key. 'key' should be 32 bytes; the returned
/// array will also be 32 bytes long.
///
/// dat discovery keys are calculated as a BLAKE2b "keyed hash" (using the passed key) of the string
/// "hypercore" (with no trailing null byte).
pub fn | (key: &[u8]) -> Vec<u8> {
let mut discovery_key = [0; 32];
let mut hash = Blake2b::new_keyed(32, key);
hash.input(&"hypercore".as_bytes());
hash.result(&mut discovery_key);
discovery_key.to_vec()
}
/// Helper to parse a dat address (aka, public key) in string format.
///
/// Address can start with 'dat://'. It should contain 64 hexadecimal characters.
pub fn parse_dat_address(input: &str) -> Result<Vec<u8>> {
let raw_key = if input.starts_with("dat://") {
&input[6..]
} else {
input
};
if raw_key.len() != 32 * 2 {
bail!("dat key not correct length");
}
let mut key_bytes = vec![];
for i in 0..32 {
let r = u8::from_str_radix(&raw_key[2 * i..2 * i + 2], 16);
match r {
Ok(b) => key_bytes.push(b),
Err(e) => bail!("Problem with hex: {}", e),
};
}
Ok(key_bytes)
}
#[test]
fn test_parse_dat_address() {
assert!(parse_dat_address(
"c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"C7638882870ABD4044D6467B0738F15E3A36F57C3A7F7F3417FD7E4E0841D597").is_ok());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"c7638882870ab").is_err());
assert!(parse_dat_address(
"g7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d5970").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d59").is_err());
}
| make_discovery_key | identifier_name |
helpers.rs | use errors::*;
use crypto::digest::Digest;
use crypto::blake2b::Blake2b;
/// Helper to calculate a discovery key from a public key. 'key' should be 32 bytes; the returned
/// array will also be 32 bytes long.
///
/// dat discovery keys are calculated as a BLAKE2b "keyed hash" (using the passed key) of the string
/// "hypercore" (with no trailing null byte).
pub fn make_discovery_key(key: &[u8]) -> Vec<u8> {
let mut discovery_key = [0; 32];
let mut hash = Blake2b::new_keyed(32, key);
hash.input(&"hypercore".as_bytes());
hash.result(&mut discovery_key);
discovery_key.to_vec()
}
/// Helper to parse a dat address (aka, public key) in string format.
///
/// Address can start with 'dat://'. It should contain 64 hexadecimal characters.
pub fn parse_dat_address(input: &str) -> Result<Vec<u8>> {
let raw_key = if input.starts_with("dat://") {
&input[6..]
} else {
input
};
if raw_key.len() != 32 * 2 {
bail!("dat key not correct length");
}
let mut key_bytes = vec![];
for i in 0..32 {
let r = u8::from_str_radix(&raw_key[2 * i..2 * i + 2], 16);
match r {
Ok(b) => key_bytes.push(b),
Err(e) => bail!("Problem with hex: {}", e),
};
} | Ok(key_bytes)
}
#[test]
fn test_parse_dat_address() {
assert!(parse_dat_address(
"c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"C7638882870ABD4044D6467B0738F15E3A36F57C3A7F7F3417FD7E4E0841D597").is_ok());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"c7638882870ab").is_err());
assert!(parse_dat_address(
"g7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d5970").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d59").is_err());
} | random_line_split |
|
helpers.rs |
use errors::*;
use crypto::digest::Digest;
use crypto::blake2b::Blake2b;
/// Helper to calculate a discovery key from a public key. 'key' should be 32 bytes; the returned
/// array will also be 32 bytes long.
///
/// dat discovery keys are calculated as a BLAKE2b "keyed hash" (using the passed key) of the string
/// "hypercore" (with no trailing null byte).
pub fn make_discovery_key(key: &[u8]) -> Vec<u8> {
let mut discovery_key = [0; 32];
let mut hash = Blake2b::new_keyed(32, key);
hash.input(&"hypercore".as_bytes());
hash.result(&mut discovery_key);
discovery_key.to_vec()
}
/// Helper to parse a dat address (aka, public key) in string format.
///
/// Address can start with 'dat://'. It should contain 64 hexadecimal characters.
pub fn parse_dat_address(input: &str) -> Result<Vec<u8>> |
#[test]
fn test_parse_dat_address() {
assert!(parse_dat_address(
"c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"C7638882870ABD4044D6467B0738F15E3A36F57C3A7F7F3417FD7E4E0841D597").is_ok());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_ok());
assert!(parse_dat_address(
"c7638882870ab").is_err());
assert!(parse_dat_address(
"g7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d597").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d5970").is_err());
assert!(parse_dat_address(
"dat://c7638882870abd4044d6467b0738f15e3a36f57c3a7f7f3417fd7e4e0841d59").is_err());
}
| {
let raw_key = if input.starts_with("dat://") {
&input[6..]
} else {
input
};
if raw_key.len() != 32 * 2 {
bail!("dat key not correct length");
}
let mut key_bytes = vec![];
for i in 0..32 {
let r = u8::from_str_radix(&raw_key[2 * i..2 * i + 2], 16);
match r {
Ok(b) => key_bytes.push(b),
Err(e) => bail!("Problem with hex: {}", e),
};
}
Ok(key_bytes)
} | identifier_body |
img.rs | use std::iter::Iterator;
use std::path;
use image;
use image::{DynamicImage, GenericImageView};
pub struct Image {
pub width: u32,
pub height: u32,
img_buf: DynamicImage,
}
impl Image {
pub fn new<P: AsRef<path::Path> + ToString>(path: P) -> Image {
let img_buf = image::open(&path).unwrap();
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
pub fn | (img_buf: DynamicImage) -> Image {
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
#[cfg(feature = "qrcode_builder")]
pub fn from_qr(code: &str, width: u32) -> Image {
use image::ImageBuffer;
use qrcode::QrCode;
let code = QrCode::new(code.as_bytes()).unwrap();
let code_width = code.width() as u32;
let point_width = width / (code_width + 2);
// QR code quite zone width
let quite_width = (width % (code_width + 2)) / 2 + point_width;
#[allow(clippy::many_single_char_names)]
let img_buf = ImageBuffer::from_fn(width, width, |x, y| {
let is_white = x < quite_width
|| y < quite_width
|| x >= (width - quite_width)
|| y >= (width - quite_width)
|| !code[(
((x - quite_width) / point_width) as usize,
((y - quite_width) / point_width) as usize,
)];
if is_white {
image::Rgb([0xFF, 0xFF, 0xFF])
} else {
image::Rgb([0, 0, 0])
}
});
Image {
width,
height: width,
img_buf: DynamicImage::ImageRgb8(img_buf),
}
}
pub fn is_blank_pixel(&self, x: u32, y: u32) -> bool {
let pixel = self.img_buf.get_pixel(x, y);
// full transprant OR is white
pixel[3] == 0 || (pixel[0] & pixel[1] & pixel[2]) == 0xFF
}
pub fn bitimage_lines(&self, density: u32) -> BitimageLines {
BitimageLines {
line: 0,
density,
image: self,
}
}
#[allow(clippy::many_single_char_names)]
fn get_line(&self, num: u32, density: u32) -> Option<Box<[u8]>> {
let n = self.height as u32 / density;
let y = num - 1;
if y >= n {
return None;
}
let c = density / 8;
let mut data: Vec<u8> = vec![0; (self.width * c) as usize];
// println!(">>> num={}, density={}, n={}, y={}, c={}, data.len()={}",
// num, density, n, y, c, data.len());
for x in 0..self.width {
for b in 0..density {
let i = x * c + (b >> 3);
// println!("x={}, b={}, i={}, b>>8={}", x, b, i, b>>3);
let l = y * density + b;
if l < self.height && !self.is_blank_pixel(x, l) {
data[i as usize] += 0x80 >> (b & 0x07);
}
}
}
Some(data.into_boxed_slice())
}
#[allow(clippy::many_single_char_names)]
pub fn get_raster(&self) -> Box<[u8]> {
let n = (self.width + 7) / 8; // Number of bytes per line
let mut data: Vec<u8> = vec![0; (n * self.height) as usize];
for y in 0..self.height {
for x in 0..n {
for b in 0..8 {
let i = x * 8 + b;
if i < self.width && !self.is_blank_pixel(i, y) {
data[(y * n + x) as usize] += 0x80 >> (b & 0x7);
}
}
}
}
data.into_boxed_slice()
}
}
pub struct BitimageLines<'a> {
line: u32,
density: u32,
image: &'a Image,
}
impl<'a> Iterator for BitimageLines<'a> {
type Item = Box<[u8]>;
fn next(&mut self) -> Option<Box<[u8]>> {
self.line += 1;
self.image.get_line(self.line, self.density)
}
}
| from | identifier_name |
img.rs | use std::iter::Iterator;
use std::path;
use image;
use image::{DynamicImage, GenericImageView};
pub struct Image {
pub width: u32,
pub height: u32,
img_buf: DynamicImage,
}
impl Image {
pub fn new<P: AsRef<path::Path> + ToString>(path: P) -> Image {
let img_buf = image::open(&path).unwrap();
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
pub fn from(img_buf: DynamicImage) -> Image {
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
#[cfg(feature = "qrcode_builder")]
pub fn from_qr(code: &str, width: u32) -> Image {
use image::ImageBuffer;
use qrcode::QrCode;
let code = QrCode::new(code.as_bytes()).unwrap();
let code_width = code.width() as u32;
let point_width = width / (code_width + 2);
// QR code quite zone width
let quite_width = (width % (code_width + 2)) / 2 + point_width;
#[allow(clippy::many_single_char_names)]
let img_buf = ImageBuffer::from_fn(width, width, |x, y| {
let is_white = x < quite_width
|| y < quite_width
|| x >= (width - quite_width)
|| y >= (width - quite_width)
|| !code[(
((x - quite_width) / point_width) as usize,
((y - quite_width) / point_width) as usize,
)];
if is_white | else {
image::Rgb([0, 0, 0])
}
});
Image {
width,
height: width,
img_buf: DynamicImage::ImageRgb8(img_buf),
}
}
pub fn is_blank_pixel(&self, x: u32, y: u32) -> bool {
let pixel = self.img_buf.get_pixel(x, y);
// full transprant OR is white
pixel[3] == 0 || (pixel[0] & pixel[1] & pixel[2]) == 0xFF
}
pub fn bitimage_lines(&self, density: u32) -> BitimageLines {
BitimageLines {
line: 0,
density,
image: self,
}
}
#[allow(clippy::many_single_char_names)]
fn get_line(&self, num: u32, density: u32) -> Option<Box<[u8]>> {
let n = self.height as u32 / density;
let y = num - 1;
if y >= n {
return None;
}
let c = density / 8;
let mut data: Vec<u8> = vec![0; (self.width * c) as usize];
// println!(">>> num={}, density={}, n={}, y={}, c={}, data.len()={}",
// num, density, n, y, c, data.len());
for x in 0..self.width {
for b in 0..density {
let i = x * c + (b >> 3);
// println!("x={}, b={}, i={}, b>>8={}", x, b, i, b>>3);
let l = y * density + b;
if l < self.height && !self.is_blank_pixel(x, l) {
data[i as usize] += 0x80 >> (b & 0x07);
}
}
}
Some(data.into_boxed_slice())
}
#[allow(clippy::many_single_char_names)]
pub fn get_raster(&self) -> Box<[u8]> {
let n = (self.width + 7) / 8; // Number of bytes per line
let mut data: Vec<u8> = vec![0; (n * self.height) as usize];
for y in 0..self.height {
for x in 0..n {
for b in 0..8 {
let i = x * 8 + b;
if i < self.width && !self.is_blank_pixel(i, y) {
data[(y * n + x) as usize] += 0x80 >> (b & 0x7);
}
}
}
}
data.into_boxed_slice()
}
}
pub struct BitimageLines<'a> {
line: u32,
density: u32,
image: &'a Image,
}
impl<'a> Iterator for BitimageLines<'a> {
type Item = Box<[u8]>;
fn next(&mut self) -> Option<Box<[u8]>> {
self.line += 1;
self.image.get_line(self.line, self.density)
}
}
| {
image::Rgb([0xFF, 0xFF, 0xFF])
} | conditional_block |
img.rs | use std::iter::Iterator;
use std::path;
use image;
use image::{DynamicImage, GenericImageView};
pub struct Image {
pub width: u32,
pub height: u32,
img_buf: DynamicImage,
}
impl Image {
pub fn new<P: AsRef<path::Path> + ToString>(path: P) -> Image {
let img_buf = image::open(&path).unwrap();
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
pub fn from(img_buf: DynamicImage) -> Image {
let (width, height) = img_buf.dimensions();
Image {
width,
height,
img_buf,
}
}
#[cfg(feature = "qrcode_builder")]
pub fn from_qr(code: &str, width: u32) -> Image {
use image::ImageBuffer;
use qrcode::QrCode;
let code = QrCode::new(code.as_bytes()).unwrap();
let code_width = code.width() as u32;
let point_width = width / (code_width + 2);
// QR code quite zone width
let quite_width = (width % (code_width + 2)) / 2 + point_width;
#[allow(clippy::many_single_char_names)]
let img_buf = ImageBuffer::from_fn(width, width, |x, y| {
let is_white = x < quite_width
|| y < quite_width
|| x >= (width - quite_width)
|| y >= (width - quite_width)
|| !code[(
((x - quite_width) / point_width) as usize,
((y - quite_width) / point_width) as usize,
)];
if is_white {
image::Rgb([0xFF, 0xFF, 0xFF])
} else {
image::Rgb([0, 0, 0])
}
});
Image {
width,
height: width,
img_buf: DynamicImage::ImageRgb8(img_buf),
}
}
pub fn is_blank_pixel(&self, x: u32, y: u32) -> bool {
let pixel = self.img_buf.get_pixel(x, y);
// full transprant OR is white
pixel[3] == 0 || (pixel[0] & pixel[1] & pixel[2]) == 0xFF
}
pub fn bitimage_lines(&self, density: u32) -> BitimageLines {
BitimageLines {
line: 0,
density,
image: self,
}
}
#[allow(clippy::many_single_char_names)]
fn get_line(&self, num: u32, density: u32) -> Option<Box<[u8]>> {
let n = self.height as u32 / density;
let y = num - 1;
if y >= n {
return None;
}
let c = density / 8;
let mut data: Vec<u8> = vec![0; (self.width * c) as usize];
// println!(">>> num={}, density={}, n={}, y={}, c={}, data.len()={}",
// num, density, n, y, c, data.len());
for x in 0..self.width {
for b in 0..density {
let i = x * c + (b >> 3);
// println!("x={}, b={}, i={}, b>>8={}", x, b, i, b>>3);
let l = y * density + b;
if l < self.height && !self.is_blank_pixel(x, l) {
data[i as usize] += 0x80 >> (b & 0x07);
}
}
}
Some(data.into_boxed_slice())
}
#[allow(clippy::many_single_char_names)]
pub fn get_raster(&self) -> Box<[u8]> {
let n = (self.width + 7) / 8; // Number of bytes per line
let mut data: Vec<u8> = vec![0; (n * self.height) as usize];
for y in 0..self.height {
for x in 0..n {
for b in 0..8 {
let i = x * 8 + b;
if i < self.width && !self.is_blank_pixel(i, y) {
data[(y * n + x) as usize] += 0x80 >> (b & 0x7);
}
}
}
}
data.into_boxed_slice()
}
}
pub struct BitimageLines<'a> {
line: u32,
density: u32,
image: &'a Image,
}
impl<'a> Iterator for BitimageLines<'a> {
type Item = Box<[u8]>;
fn next(&mut self) -> Option<Box<[u8]>> {
self.line += 1;
self.image.get_line(self.line, self.density)
} | } | random_line_split |
|
msg_log.py | import sys
import os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from proxy.repr_to_bytes import repr_bytes_to_bytes_gen
class Message():
def __init__(self, side, log=None, side_log=None):
self.side = side
self.msg = b''
self.pin_pong = self.pin_pong_f(log, side_log)
def pin_pong_f(self, _log, _side_log):
log = {'client' : [], 'server' : [], }
last_string = b''
last_side = 'client'
for i in range(len(_log)):
if i == 0:
last_string = _log[i]
last_side = 'client'
elif i in _side_log[last_side]:
last_string = b''.join([last_string, _log[i]])
elif i not in _side_log[last_side]:
log[last_side].append(last_string)
last_string = _log[i]
last_side = 'server' if last_side == 'client' else 'client'
_pin_pong = {}
if self.side == 'client':
_pin_pong.update({b'':log['client'].pop(0)})
while log['client'] and log['server']:
_pin_pong.update({log['server'].pop(0):log['client'].pop(0)})
elif self.side == 'server':
while log['client'] and log['server']:
_pin_pong.update({log['client'].pop(0):log['server'].pop(0)})
return _pin_pong
def __call__(self, msg=b''):
self.msg = b''.join([self.msg, msg])
if self.msg in self.pin_pong:
yield self.pin_pong[self.msg]
self.msg = b''
@staticmethod
def game_log_from_import(log):
_log = []
_side_log = {'client': [], 'server': []}
i = 0
while log:
if log and log[0].get('C') != None:
_side_log['client'].append(i)
_log.append(log.pop(0)['C'])
elif log and log[0].get('S') != None:
_side_log['server'].append(i)
_log.append(log.pop(0)['S'])
else:
raise Exception("S/C key wrong")
i += 1
return (_log, _side_log,)
@staticmethod
def get_log_from_file(f, pattern):
log = []
with open(f, 'rb') as f:
for line in f:
|
return log
| line = b''.join(repr_bytes_to_bytes_gen(line))
if line[0:len(pattern['c'])] == pattern['c']:
log.append({"C": line[pattern['start']:pattern['end']]})
elif line[0:len(pattern['s'])] == pattern['s']:
log.append({"S": line[pattern['start']:pattern['end']]}) | conditional_block |
msg_log.py | import sys
import os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from proxy.repr_to_bytes import repr_bytes_to_bytes_gen
class Message():
def __init__(self, side, log=None, side_log=None):
self.side = side
self.msg = b''
self.pin_pong = self.pin_pong_f(log, side_log)
def pin_pong_f(self, _log, _side_log):
log = {'client' : [], 'server' : [], }
last_string = b''
last_side = 'client'
for i in range(len(_log)):
if i == 0:
last_string = _log[i]
last_side = 'client'
elif i in _side_log[last_side]:
last_string = b''.join([last_string, _log[i]])
elif i not in _side_log[last_side]:
log[last_side].append(last_string)
last_string = _log[i]
last_side = 'server' if last_side == 'client' else 'client'
_pin_pong = {}
if self.side == 'client':
_pin_pong.update({b'':log['client'].pop(0)})
while log['client'] and log['server']:
_pin_pong.update({log['server'].pop(0):log['client'].pop(0)})
elif self.side == 'server':
while log['client'] and log['server']:
_pin_pong.update({log['client'].pop(0):log['server'].pop(0)})
return _pin_pong
def __call__(self, msg=b''):
self.msg = b''.join([self.msg, msg])
if self.msg in self.pin_pong:
yield self.pin_pong[self.msg]
self.msg = b''
@staticmethod
def game_log_from_import(log):
|
@staticmethod
def get_log_from_file(f, pattern):
log = []
with open(f, 'rb') as f:
for line in f:
line = b''.join(repr_bytes_to_bytes_gen(line))
if line[0:len(pattern['c'])] == pattern['c']:
log.append({"C": line[pattern['start']:pattern['end']]})
elif line[0:len(pattern['s'])] == pattern['s']:
log.append({"S": line[pattern['start']:pattern['end']]})
return log
| _log = []
_side_log = {'client': [], 'server': []}
i = 0
while log:
if log and log[0].get('C') != None:
_side_log['client'].append(i)
_log.append(log.pop(0)['C'])
elif log and log[0].get('S') != None:
_side_log['server'].append(i)
_log.append(log.pop(0)['S'])
else:
raise Exception("S/C key wrong")
i += 1
return (_log, _side_log,) | identifier_body |
msg_log.py | import sys
import os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from proxy.repr_to_bytes import repr_bytes_to_bytes_gen
class Message():
def __init__(self, side, log=None, side_log=None):
self.side = side
self.msg = b''
self.pin_pong = self.pin_pong_f(log, side_log)
def pin_pong_f(self, _log, _side_log):
log = {'client' : [], 'server' : [], }
last_string = b''
last_side = 'client'
for i in range(len(_log)):
if i == 0:
last_string = _log[i]
last_side = 'client'
elif i in _side_log[last_side]:
last_string = b''.join([last_string, _log[i]])
elif i not in _side_log[last_side]:
log[last_side].append(last_string)
last_string = _log[i]
last_side = 'server' if last_side == 'client' else 'client'
_pin_pong = {}
if self.side == 'client':
_pin_pong.update({b'':log['client'].pop(0)})
while log['client'] and log['server']:
_pin_pong.update({log['server'].pop(0):log['client'].pop(0)})
elif self.side == 'server':
while log['client'] and log['server']:
_pin_pong.update({log['client'].pop(0):log['server'].pop(0)})
return _pin_pong |
def __call__(self, msg=b''):
self.msg = b''.join([self.msg, msg])
if self.msg in self.pin_pong:
yield self.pin_pong[self.msg]
self.msg = b''
@staticmethod
def game_log_from_import(log):
_log = []
_side_log = {'client': [], 'server': []}
i = 0
while log:
if log and log[0].get('C') != None:
_side_log['client'].append(i)
_log.append(log.pop(0)['C'])
elif log and log[0].get('S') != None:
_side_log['server'].append(i)
_log.append(log.pop(0)['S'])
else:
raise Exception("S/C key wrong")
i += 1
return (_log, _side_log,)
@staticmethod
def get_log_from_file(f, pattern):
log = []
with open(f, 'rb') as f:
for line in f:
line = b''.join(repr_bytes_to_bytes_gen(line))
if line[0:len(pattern['c'])] == pattern['c']:
log.append({"C": line[pattern['start']:pattern['end']]})
elif line[0:len(pattern['s'])] == pattern['s']:
log.append({"S": line[pattern['start']:pattern['end']]})
return log | random_line_split |
|
msg_log.py | import sys
import os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from proxy.repr_to_bytes import repr_bytes_to_bytes_gen
class Message():
def __init__(self, side, log=None, side_log=None):
self.side = side
self.msg = b''
self.pin_pong = self.pin_pong_f(log, side_log)
def | (self, _log, _side_log):
log = {'client' : [], 'server' : [], }
last_string = b''
last_side = 'client'
for i in range(len(_log)):
if i == 0:
last_string = _log[i]
last_side = 'client'
elif i in _side_log[last_side]:
last_string = b''.join([last_string, _log[i]])
elif i not in _side_log[last_side]:
log[last_side].append(last_string)
last_string = _log[i]
last_side = 'server' if last_side == 'client' else 'client'
_pin_pong = {}
if self.side == 'client':
_pin_pong.update({b'':log['client'].pop(0)})
while log['client'] and log['server']:
_pin_pong.update({log['server'].pop(0):log['client'].pop(0)})
elif self.side == 'server':
while log['client'] and log['server']:
_pin_pong.update({log['client'].pop(0):log['server'].pop(0)})
return _pin_pong
def __call__(self, msg=b''):
self.msg = b''.join([self.msg, msg])
if self.msg in self.pin_pong:
yield self.pin_pong[self.msg]
self.msg = b''
@staticmethod
def game_log_from_import(log):
_log = []
_side_log = {'client': [], 'server': []}
i = 0
while log:
if log and log[0].get('C') != None:
_side_log['client'].append(i)
_log.append(log.pop(0)['C'])
elif log and log[0].get('S') != None:
_side_log['server'].append(i)
_log.append(log.pop(0)['S'])
else:
raise Exception("S/C key wrong")
i += 1
return (_log, _side_log,)
@staticmethod
def get_log_from_file(f, pattern):
log = []
with open(f, 'rb') as f:
for line in f:
line = b''.join(repr_bytes_to_bytes_gen(line))
if line[0:len(pattern['c'])] == pattern['c']:
log.append({"C": line[pattern['start']:pattern['end']]})
elif line[0:len(pattern['s'])] == pattern['s']:
log.append({"S": line[pattern['start']:pattern['end']]})
return log
| pin_pong_f | identifier_name |
json-crud.d.ts | export = JsonDB;
declare function JsonDB(path?: string; options?: Options): Promise<JsonDBInstance>;
declare namespace JsonDB {
export type Id = string | number;
export type IdOrError = Id | Error;
export type Results = { [key: Id]: any };
export type Data = { [key: Id]: any } | any[];
export type FieldFilter =
/// Matches values that are equal to a specified value.
{ $eq: any }
/// Matches values that are greater than a specified value.
| { $gt: any }
/// Matches values that are greater than or equal to a specified value.
| { $gte: any }
/// Matches values that are less than a specified value.
| { $lt: any }
/// Matches values that are less than or equal to a specified value.
| { $lte: any }
/// Matches all values that are not equal to a specified value.
| { $ne: any }
/// Matches any of the values specified in an array.
| { $in: any[] }
/// Matches none of the values specified in an array.
| { $nin: any[] };
export type Filter = Id | Id[] | {
/// Field value comparison
[field: string]: FieldFilter | any,
/// Logical AND
$and?: Filter[],
/// Logical OR
$or?: Filter[],
/// Logical NOT
$not?: Filter,
};
export interface JsonDBInstance {
/**
* Inserts data into the JSON database.
*
* @param data Either an object of key-value pairs, an array
* containing key/value pairs ([key, value,...]) or, if the key field has
* been specified, an array of object values each with the key field set
*
* @returns {Promise} A promise that will resolve with an array containing
* keys or errors in creating the data of the inserted data.
*/
create: (data: Data) => Promise<IdOrError[]>;
/**
* Retrieve values from the database | * @returns A promise that will resolve to an object containing the
* key/values of the values matched
*/
read: (filter?: Filter) => Promise<Results>;
/**
* Updates data in the JSON database. Data can either be given as
* key-value parameter pairs, OR if the key field has been specified Object
* values. New values will be merge into any existing values.
*
* @param data Either:
* - an array of key-value pairs,
* - an array of object value(s) containing the key value (if a key has
* been specified)
* @param filter true if the existing items should be replaced
* instead of or false to merge existing values (if values are mergeable)
*
* @returns {Promise} A promise that will resolve with an array containing
* keys of the updated data.
*/
update: (data: Data, filter: boolean) => Promise<IdOrError[]>;
/**
* Updates data in the JSON database. Data can either be given as
* key-value parameter pairs, OR if the key field has been specified Object
* values. New values will be merge into any existing values.
*
* @param data Property values to update of any objects that
* match the given filter
* @param [filter] A filter to select the items that
* should be updated
*
* @returns {Promise} A promise that will resolve with an array containing
* keys of the updated data.
*/
update: (data: { [ property: string]: any }, filter: Filter) => Promise<IdOrError[]>;
/**
* Deletes values from the database
*
* @param [filter] Filter to use to match the values to delete. If true
* all values will be deleted
*
* @returns A promise resolving to an array of the Ids of the values
* deleted
*/
delete: (filter: Filter | true) => Promise;
/**
* Closes the CRUD database instance
*/
close: () => void;
};
export interface Options {
/// Path to file/folder to contain JSON database
path: string;
/// Field of data objects to be used as the key for the object
id: string;
/// Cache keys of the values in the database
cacheKeys: boolean;
/// Cache the values in the database
cacheValues: boolean;
};
}; | *
* @param [filter] Filter to use to match the values to return. If not
* given, all values will be returned
* | random_line_split |
index.d.ts | // Type definitions for geojson 7946.0
// Project: https://geojson.org/
// Definitions by: Jacob Bruun <https://github.com/cobster>
// Arne Schubert <https://github.com/atd-schubert>
// Jeff Jacobson <https://github.com/JeffJacobson>
// Ilia Choly <https://github.com/icholy>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
// Note: as of the RFC 7946 version of GeoJSON, Coordinate Reference Systems
// are no longer supported. (See https://tools.ietf.org/html/rfc7946#appendix-B)}
export as namespace GeoJSON;
/**
* The valid values for the "type" property of GeoJSON geometry objects.
* https://tools.ietf.org/html/rfc7946#section-1.4
*/
export type GeoJsonGeometryTypes = "Point" | "LineString" | "MultiPoint" | "Polygon" | "MultiLineString" |
"MultiPolygon" | "GeometryCollection";
/**
* The value values for the "type" property of GeoJSON Objects.
* https://tools.ietf.org/html/rfc7946#section-1.4
*/
export type GeoJsonTypes = "FeatureCollection" | "Feature" | GeoJsonGeometryTypes;
/**
* Bounding box
* https://tools.ietf.org/html/rfc7946#section-5
*/
export type BBox = [number, number, number, number] | [number, number, number, number, number, number];
/**
* A Position is an array of coordinates.
* https://tools.ietf.org/html/rfc7946#section-3.1.1
* Array should contain between two and three elements.
* The previous GeoJSON specification allowed more elements (e.g., which could be used to represent M values),
* but the current specification only allows X, Y, and (optionally) Z to be defined.
*/
export type Position = number[]; // [number, number] | [number, number, number];
/**
* The base GeoJSON object.
* https://tools.ietf.org/html/rfc7946#section-3
* The GeoJSON specification also allows foreign members
* (https://tools.ietf.org/html/rfc7946#section-6.1)
* Developers should use "&" type in TypeScript or extend the interface
* to add these foreign members.
*/ | // [key: string]: any;
/**
* Specifies the type of GeoJSON object.
*/
type: GeoJsonTypes;
/**
* Bounding box of the coordinate range of the object's Geometries, Features, or Feature Collections.
* https://tools.ietf.org/html/rfc7946#section-5
*/
bbox?: BBox;
}
/**
* Union of GeoJSON objects.
*/
export type GeoJSON = Geometry | Feature | FeatureCollection;
/**
* A geometry object.
* https://tools.ietf.org/html/rfc7946#section-3
*/
export interface GeometryObject extends GeoJsonObject {
type: GeoJsonGeometryTypes;
}
/**
* Union of geometry objects.
* https://tools.ietf.org/html/rfc7946#section-3
*/
export type Geometry = Point | MultiPoint | LineString | MultiLineString | Polygon | MultiPolygon | GeometryCollection;
/**
* Point geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.2
*/
export interface Point extends GeometryObject {
type: "Point";
coordinates: Position;
}
/**
* MultiPoint geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.3
*/
export interface MultiPoint extends GeometryObject {
type: "MultiPoint";
coordinates: Position[];
}
/**
* LineString geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.4
*/
export interface LineString extends GeometryObject {
type: "LineString";
coordinates: Position[];
}
/**
* MultiLineString geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.5
*/
export interface MultiLineString extends GeometryObject {
type: "MultiLineString";
coordinates: Position[][];
}
/**
* Polygon geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.6
*/
export interface Polygon extends GeometryObject {
type: "Polygon";
coordinates: Position[][];
}
/**
* MultiPolygon geometry object.
* https://tools.ietf.org/html/rfc7946#section-3.1.7
*/
export interface MultiPolygon extends GeometryObject {
type: "MultiPolygon";
coordinates: Position[][][];
}
/**
* Geometry Collection
* https://tools.ietf.org/html/rfc7946#section-3.1.8
*/
export interface GeometryCollection extends GeometryObject {
type: "GeometryCollection";
geometries: Geometry[];
}
export type GeoJsonProperties = { [name: string]: any; } | null;
/**
* A feature object which contains a geometry and associated properties.
* https://tools.ietf.org/html/rfc7946#section-3.2
*/
export interface Feature<G extends GeometryObject | null = Geometry, P = GeoJsonProperties> extends GeoJsonObject {
type: "Feature";
/**
* The feature's geometry
*/
geometry: G;
/**
* A value that uniquely identifies this feature in a
* https://tools.ietf.org/html/rfc7946#section-3.2.
*/
id?: string | number;
/**
* Properties associated with this feature.
*/
properties: P;
}
/**
* A collection of feature objects.
* https://tools.ietf.org/html/rfc7946#section-3.3
*/
export interface FeatureCollection<G extends GeometryObject | null = Geometry, P = GeoJsonProperties> extends GeoJsonObject {
type: "FeatureCollection";
features: Array<Feature<G, P>>;
} | export interface GeoJsonObject {
// Don't include foreign members directly into this type def.
// in order to preserve type safety. | random_line_split |
chrome_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net::chrome_loader::resolve_chrome_url;
use url::Url;
#[test]
fn test_relative() {
let url = Url::parse("chrome://../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_relative_2() {
let url = Url::parse("chrome://subdir/../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(not(target_os = "windows"))]
fn test_absolute() {
let url = Url::parse("chrome:///etc/passwd").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
| fn test_absolute_2() {
let url = Url::parse("chrome://C:\\Windows").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(target_os = "windows")]
fn test_absolute_3() {
let url = Url::parse("chrome://\\\\server/C$").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_valid() {
let url = Url::parse("chrome://badcert.jpg").unwrap();
let resolved = resolve_chrome_url(&url).unwrap();
assert_eq!(resolved.scheme, "file");
} | #[test]
#[cfg(target_os = "windows")] | random_line_split |
chrome_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net::chrome_loader::resolve_chrome_url;
use url::Url;
#[test]
fn test_relative() {
let url = Url::parse("chrome://../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_relative_2() {
let url = Url::parse("chrome://subdir/../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(not(target_os = "windows"))]
fn test_absolute() {
let url = Url::parse("chrome:///etc/passwd").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(target_os = "windows")]
fn test_absolute_2() |
#[test]
#[cfg(target_os = "windows")]
fn test_absolute_3() {
let url = Url::parse("chrome://\\\\server/C$").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_valid() {
let url = Url::parse("chrome://badcert.jpg").unwrap();
let resolved = resolve_chrome_url(&url).unwrap();
assert_eq!(resolved.scheme, "file");
}
| {
let url = Url::parse("chrome://C:\\Windows").unwrap();
assert!(resolve_chrome_url(&url).is_err());
} | identifier_body |
chrome_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net::chrome_loader::resolve_chrome_url;
use url::Url;
#[test]
fn test_relative() {
let url = Url::parse("chrome://../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_relative_2() {
let url = Url::parse("chrome://subdir/../something").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(not(target_os = "windows"))]
fn | () {
let url = Url::parse("chrome:///etc/passwd").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(target_os = "windows")]
fn test_absolute_2() {
let url = Url::parse("chrome://C:\\Windows").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
#[cfg(target_os = "windows")]
fn test_absolute_3() {
let url = Url::parse("chrome://\\\\server/C$").unwrap();
assert!(resolve_chrome_url(&url).is_err());
}
#[test]
fn test_valid() {
let url = Url::parse("chrome://badcert.jpg").unwrap();
let resolved = resolve_chrome_url(&url).unwrap();
assert_eq!(resolved.scheme, "file");
}
| test_absolute | identifier_name |
defaulter-list-resource.service.spec.ts | import { TestBed, async, inject, fakeAsync } from '@angular/core/testing';
import { LocalStorageService } from '../utils/local-storage.service';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DefaulterListResourceService } from './defaulter-list-resource.service';
import { CacheModule, CacheService } from 'ionic-cache';
import { DataCacheService } from '../shared/services/data-cache.service';
import {
HttpClientTestingModule,
HttpTestingController
} from '@angular/common/http/testing';
import { CacheStorageService } from 'ionic-cache/dist/cache-storage';
class MockCacheStorageService {
constructor(a, b) {}
public ready() |
}
const expectedResults = {
startIndex: 0,
size: 3,
result: [
{
patient_uuid: 'patient-uuid',
person_id: 102322,
encounter_id: 636033226,
location_id: 1,
location_uuid: '08feae7c-1352-11df-a1f1-0026b9348838',
days_since_rtc: 30,
encounter_datetime: '2016-09-19T21:00:00.000Z',
rtc_date: '2017-02-06T21:00:00.000Z',
arv_start_date: '2009-09-15T21:00:00.000Z',
encounter_type_name: 'ADULTNONCLINICALMEDICATION',
person_name: 'Peter kenya Munya',
phone_number: null,
identifiers: '24371MT-9, 009421138-0, 15204-21078',
filed_id: '38-11-42-09-0',
gender: 'M',
birthdate: '1965-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 50842,
date_created: '2009-09-19T05:28:30.000Z',
changed_by: 131180,
date_changed: '2010-02-15T06:40:49.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: 'a4ce27ae-f1e5-4893-9248-50332de6281e',
deathdate_estimated: 0,
birthtime: null,
age: 51
},
{
patient_uuid: 'patient-uuid',
person_id: 35432803,
encounter_id: 658032945,
location_id: 1,
location_uuid: 'location-uuid',
days_since_rtc: 31,
encounter_datetime: '2017-01-08T21:00:00.000Z',
rtc_date: '2017-02-05T21:00:00.000Z',
arv_start_date: '2011-05-16T21:00:00.000Z',
encounter_type_name: 'ADULTRETURN',
person_name: 'man me toa',
phone_number: '0727091346',
identifiers: '295169210-8, 15204-25723',
filed_id: '10-92-16-95-2',
gender: 'F',
birthdate: '1975-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 83039,
date_created: '2011-05-03T07:38:31.000Z',
changed_by: 165060,
date_changed: '2013-08-05T09:02:05.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: '2f8213c6-5c26-4889-ba28-7f28dadd237e',
deathdate_estimated: 0,
birthtime: null,
age: 41
}
]
};
describe('DefaulterListResourceService Tests', () => {
let s;
let httpMock;
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [],
imports: [CacheModule, HttpClientTestingModule],
providers: [
DefaulterListResourceService,
AppSettingsService,
LocalStorageService,
CacheService,
DataCacheService,
{
provide: CacheStorageService,
useFactory: () => {
return new MockCacheStorageService(null, null);
}
}
]
});
httpMock = TestBed.get(HttpTestingController);
s = TestBed.get(DefaulterListResourceService);
});
afterEach(() => {
TestBed.resetTestingModule();
});
it('should be defined', () => {
expect(s).toBeTruthy();
});
it('all defaulter list resource methods should be defined', () => {
expect(s.getDefaulterList).toBeDefined();
expect(s.getUrl).toBeDefined();
});
it(
'should return a list containing list of defaulters for a given ' +
' date range and location ',
() => {
s.getDefaulterList({
defaulterPeriod: 30,
maxDefaultPeriod: 100,
startIndex: undefined,
locationUuids: 'uuid',
limit: undefined
}).subscribe((result) => {
expect(result).toBeDefined();
expect(result).toEqual(expectedResults.result);
});
}
);
});
| {
return true;
} | identifier_body |
defaulter-list-resource.service.spec.ts | import { TestBed, async, inject, fakeAsync } from '@angular/core/testing';
import { LocalStorageService } from '../utils/local-storage.service';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DefaulterListResourceService } from './defaulter-list-resource.service';
import { CacheModule, CacheService } from 'ionic-cache';
import { DataCacheService } from '../shared/services/data-cache.service';
import {
HttpClientTestingModule,
HttpTestingController
} from '@angular/common/http/testing';
import { CacheStorageService } from 'ionic-cache/dist/cache-storage';
class MockCacheStorageService {
constructor(a, b) {}
public ready() {
return true;
}
}
const expectedResults = {
startIndex: 0,
size: 3,
result: [
{
patient_uuid: 'patient-uuid',
person_id: 102322,
encounter_id: 636033226,
location_id: 1,
location_uuid: '08feae7c-1352-11df-a1f1-0026b9348838',
days_since_rtc: 30,
encounter_datetime: '2016-09-19T21:00:00.000Z',
rtc_date: '2017-02-06T21:00:00.000Z',
arv_start_date: '2009-09-15T21:00:00.000Z',
encounter_type_name: 'ADULTNONCLINICALMEDICATION',
person_name: 'Peter kenya Munya',
phone_number: null,
identifiers: '24371MT-9, 009421138-0, 15204-21078',
filed_id: '38-11-42-09-0',
gender: 'M',
birthdate: '1965-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 50842,
date_created: '2009-09-19T05:28:30.000Z',
changed_by: 131180,
date_changed: '2010-02-15T06:40:49.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: 'a4ce27ae-f1e5-4893-9248-50332de6281e',
deathdate_estimated: 0,
birthtime: null,
age: 51
},
{
patient_uuid: 'patient-uuid',
person_id: 35432803,
encounter_id: 658032945,
location_id: 1,
location_uuid: 'location-uuid',
days_since_rtc: 31,
encounter_datetime: '2017-01-08T21:00:00.000Z',
rtc_date: '2017-02-05T21:00:00.000Z',
arv_start_date: '2011-05-16T21:00:00.000Z',
encounter_type_name: 'ADULTRETURN',
person_name: 'man me toa',
phone_number: '0727091346',
identifiers: '295169210-8, 15204-25723',
filed_id: '10-92-16-95-2',
gender: 'F',
birthdate: '1975-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 83039,
date_created: '2011-05-03T07:38:31.000Z', | date_changed: '2013-08-05T09:02:05.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: '2f8213c6-5c26-4889-ba28-7f28dadd237e',
deathdate_estimated: 0,
birthtime: null,
age: 41
}
]
};
describe('DefaulterListResourceService Tests', () => {
let s;
let httpMock;
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [],
imports: [CacheModule, HttpClientTestingModule],
providers: [
DefaulterListResourceService,
AppSettingsService,
LocalStorageService,
CacheService,
DataCacheService,
{
provide: CacheStorageService,
useFactory: () => {
return new MockCacheStorageService(null, null);
}
}
]
});
httpMock = TestBed.get(HttpTestingController);
s = TestBed.get(DefaulterListResourceService);
});
afterEach(() => {
TestBed.resetTestingModule();
});
it('should be defined', () => {
expect(s).toBeTruthy();
});
it('all defaulter list resource methods should be defined', () => {
expect(s.getDefaulterList).toBeDefined();
expect(s.getUrl).toBeDefined();
});
it(
'should return a list containing list of defaulters for a given ' +
' date range and location ',
() => {
s.getDefaulterList({
defaulterPeriod: 30,
maxDefaultPeriod: 100,
startIndex: undefined,
locationUuids: 'uuid',
limit: undefined
}).subscribe((result) => {
expect(result).toBeDefined();
expect(result).toEqual(expectedResults.result);
});
}
);
}); | changed_by: 165060, | random_line_split |
defaulter-list-resource.service.spec.ts | import { TestBed, async, inject, fakeAsync } from '@angular/core/testing';
import { LocalStorageService } from '../utils/local-storage.service';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DefaulterListResourceService } from './defaulter-list-resource.service';
import { CacheModule, CacheService } from 'ionic-cache';
import { DataCacheService } from '../shared/services/data-cache.service';
import {
HttpClientTestingModule,
HttpTestingController
} from '@angular/common/http/testing';
import { CacheStorageService } from 'ionic-cache/dist/cache-storage';
class MockCacheStorageService {
constructor(a, b) {}
public | () {
return true;
}
}
const expectedResults = {
startIndex: 0,
size: 3,
result: [
{
patient_uuid: 'patient-uuid',
person_id: 102322,
encounter_id: 636033226,
location_id: 1,
location_uuid: '08feae7c-1352-11df-a1f1-0026b9348838',
days_since_rtc: 30,
encounter_datetime: '2016-09-19T21:00:00.000Z',
rtc_date: '2017-02-06T21:00:00.000Z',
arv_start_date: '2009-09-15T21:00:00.000Z',
encounter_type_name: 'ADULTNONCLINICALMEDICATION',
person_name: 'Peter kenya Munya',
phone_number: null,
identifiers: '24371MT-9, 009421138-0, 15204-21078',
filed_id: '38-11-42-09-0',
gender: 'M',
birthdate: '1965-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 50842,
date_created: '2009-09-19T05:28:30.000Z',
changed_by: 131180,
date_changed: '2010-02-15T06:40:49.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: 'a4ce27ae-f1e5-4893-9248-50332de6281e',
deathdate_estimated: 0,
birthtime: null,
age: 51
},
{
patient_uuid: 'patient-uuid',
person_id: 35432803,
encounter_id: 658032945,
location_id: 1,
location_uuid: 'location-uuid',
days_since_rtc: 31,
encounter_datetime: '2017-01-08T21:00:00.000Z',
rtc_date: '2017-02-05T21:00:00.000Z',
arv_start_date: '2011-05-16T21:00:00.000Z',
encounter_type_name: 'ADULTRETURN',
person_name: 'man me toa',
phone_number: '0727091346',
identifiers: '295169210-8, 15204-25723',
filed_id: '10-92-16-95-2',
gender: 'F',
birthdate: '1975-12-31T21:00:00.000Z',
birthdate_estimated: 0,
dead: 0,
death_date: null,
cause_of_death: null,
creator: 83039,
date_created: '2011-05-03T07:38:31.000Z',
changed_by: 165060,
date_changed: '2013-08-05T09:02:05.000Z',
voided: 0,
voided_by: null,
date_voided: null,
void_reason: null,
uuid: '2f8213c6-5c26-4889-ba28-7f28dadd237e',
deathdate_estimated: 0,
birthtime: null,
age: 41
}
]
};
describe('DefaulterListResourceService Tests', () => {
let s;
let httpMock;
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [],
imports: [CacheModule, HttpClientTestingModule],
providers: [
DefaulterListResourceService,
AppSettingsService,
LocalStorageService,
CacheService,
DataCacheService,
{
provide: CacheStorageService,
useFactory: () => {
return new MockCacheStorageService(null, null);
}
}
]
});
httpMock = TestBed.get(HttpTestingController);
s = TestBed.get(DefaulterListResourceService);
});
afterEach(() => {
TestBed.resetTestingModule();
});
it('should be defined', () => {
expect(s).toBeTruthy();
});
it('all defaulter list resource methods should be defined', () => {
expect(s.getDefaulterList).toBeDefined();
expect(s.getUrl).toBeDefined();
});
it(
'should return a list containing list of defaulters for a given ' +
' date range and location ',
() => {
s.getDefaulterList({
defaulterPeriod: 30,
maxDefaultPeriod: 100,
startIndex: undefined,
locationUuids: 'uuid',
limit: undefined
}).subscribe((result) => {
expect(result).toBeDefined();
expect(result).toEqual(expectedResults.result);
});
}
);
});
| ready | identifier_name |
config-file.ts | import { promises as fs } from "fs"
import * as tr from "text-runner-core"
import * as YAML from "yamljs"
import * as config from "./configuration"
/** provides the config file content as a Configuration instance */
export async function load(cmdLineArgs: config.Data): Promise<config.Data> {
return parse(await read(cmdLineArgs))
}
/** creates a new Text-Runner configuration file */
export async function create(cmdLineArgs: config.Data): Promise<void> { | # To exclude the "vendor" folder: '{,!(vendor)/**/}*.md'
files: "**/*.md"
# black-list of files not to test
# This is applied after the white-list above.
exclude: []
# the formatter to use (detailed, dot, progress, summary)
format: detailed
# Define which folders of your Markdown source get compiled to HTML
# and published under a different URL path.
#
# In this example, the public URL "/blog/foo"
# is hosted as "post/foo.md":
# publications:
# - localPath: /posts/
# publicPath: /blog
# publicExtension: ''
# Name of the default filename in folders.
# If this setting is given, and a link points to a folder,
# the link is assumed to point to the default file in that folder.
# defaultFile: index.md
# prefix that makes anchor tags active regions
regionMarker: type
# whether to display/emit skipped activities
showSkipped: false
# whether to run the tests in an external temp directory,
# uses ./tmp if false,
# you can also provide a custom directory path here
systemTmp: false
# whether to verify online files/links (warning: this makes tests flaky)
online: false
# whether to delete all files in the workspace folder before running the tests
emptyWorkspace: true`
)
}
/** provides the textual config file content */
async function read(cmdLineArgs: config.Data): Promise<string> {
if (cmdLineArgs.configFileName) {
try {
const result = await fs.readFile(cmdLineArgs.configFileName, "utf8")
return result
} catch (e) {
throw new tr.UserError(`cannot read configuration file "${cmdLineArgs.configFileName}"`, e.message)
}
}
try {
const result = await fs.readFile("text-run.yml", "utf8")
return result
} catch (e) {
return ""
}
}
/** parses the textual config file content into a Configuration instance */
function parse(fileContent: string): config.Data {
if (fileContent === "") {
return new config.Data({})
}
const fileData = YAML.parse(fileContent)
return new config.Data({
regionMarker: fileData.regionMarker,
defaultFile: fileData.defaultFile,
emptyWorkspace: fileData.emptyWorkspace,
exclude: fileData.exclude,
files: fileData.files,
formatterName: fileData.format,
online: fileData.online,
publications: fileData.publications,
showSkipped: fileData.showSkipped,
systemTmp: fileData.systemTmp,
workspace: fileData.workspace,
})
} | await fs.writeFile(
cmdLineArgs.configFileName || "text-run.yml",
`# white-list for files to test
# This is a glob expression, see https://github.com/isaacs/node-glob#glob-primer
# The folder "node_modules" is already excluded. | random_line_split |
config-file.ts | import { promises as fs } from "fs"
import * as tr from "text-runner-core"
import * as YAML from "yamljs"
import * as config from "./configuration"
/** provides the config file content as a Configuration instance */
export async function load(cmdLineArgs: config.Data): Promise<config.Data> {
return parse(await read(cmdLineArgs))
}
/** creates a new Text-Runner configuration file */
export async function create(cmdLineArgs: config.Data): Promise<void> {
await fs.writeFile(
cmdLineArgs.configFileName || "text-run.yml",
`# white-list for files to test
# This is a glob expression, see https://github.com/isaacs/node-glob#glob-primer
# The folder "node_modules" is already excluded.
# To exclude the "vendor" folder: '{,!(vendor)/**/}*.md'
files: "**/*.md"
# black-list of files not to test
# This is applied after the white-list above.
exclude: []
# the formatter to use (detailed, dot, progress, summary)
format: detailed
# Define which folders of your Markdown source get compiled to HTML
# and published under a different URL path.
#
# In this example, the public URL "/blog/foo"
# is hosted as "post/foo.md":
# publications:
# - localPath: /posts/
# publicPath: /blog
# publicExtension: ''
# Name of the default filename in folders.
# If this setting is given, and a link points to a folder,
# the link is assumed to point to the default file in that folder.
# defaultFile: index.md
# prefix that makes anchor tags active regions
regionMarker: type
# whether to display/emit skipped activities
showSkipped: false
# whether to run the tests in an external temp directory,
# uses ./tmp if false,
# you can also provide a custom directory path here
systemTmp: false
# whether to verify online files/links (warning: this makes tests flaky)
online: false
# whether to delete all files in the workspace folder before running the tests
emptyWorkspace: true`
)
}
/** provides the textual config file content */
async function read(cmdLineArgs: config.Data): Promise<string> {
if (cmdLineArgs.configFileName) {
try {
const result = await fs.readFile(cmdLineArgs.configFileName, "utf8")
return result
} catch (e) {
throw new tr.UserError(`cannot read configuration file "${cmdLineArgs.configFileName}"`, e.message)
}
}
try {
const result = await fs.readFile("text-run.yml", "utf8")
return result
} catch (e) {
return ""
}
}
/** parses the textual config file content into a Configuration instance */
function | (fileContent: string): config.Data {
if (fileContent === "") {
return new config.Data({})
}
const fileData = YAML.parse(fileContent)
return new config.Data({
regionMarker: fileData.regionMarker,
defaultFile: fileData.defaultFile,
emptyWorkspace: fileData.emptyWorkspace,
exclude: fileData.exclude,
files: fileData.files,
formatterName: fileData.format,
online: fileData.online,
publications: fileData.publications,
showSkipped: fileData.showSkipped,
systemTmp: fileData.systemTmp,
workspace: fileData.workspace,
})
}
| parse | identifier_name |
config-file.ts | import { promises as fs } from "fs"
import * as tr from "text-runner-core"
import * as YAML from "yamljs"
import * as config from "./configuration"
/** provides the config file content as a Configuration instance */
export async function load(cmdLineArgs: config.Data): Promise<config.Data> {
return parse(await read(cmdLineArgs))
}
/** creates a new Text-Runner configuration file */
export async function create(cmdLineArgs: config.Data): Promise<void> {
await fs.writeFile(
cmdLineArgs.configFileName || "text-run.yml",
`# white-list for files to test
# This is a glob expression, see https://github.com/isaacs/node-glob#glob-primer
# The folder "node_modules" is already excluded.
# To exclude the "vendor" folder: '{,!(vendor)/**/}*.md'
files: "**/*.md"
# black-list of files not to test
# This is applied after the white-list above.
exclude: []
# the formatter to use (detailed, dot, progress, summary)
format: detailed
# Define which folders of your Markdown source get compiled to HTML
# and published under a different URL path.
#
# In this example, the public URL "/blog/foo"
# is hosted as "post/foo.md":
# publications:
# - localPath: /posts/
# publicPath: /blog
# publicExtension: ''
# Name of the default filename in folders.
# If this setting is given, and a link points to a folder,
# the link is assumed to point to the default file in that folder.
# defaultFile: index.md
# prefix that makes anchor tags active regions
regionMarker: type
# whether to display/emit skipped activities
showSkipped: false
# whether to run the tests in an external temp directory,
# uses ./tmp if false,
# you can also provide a custom directory path here
systemTmp: false
# whether to verify online files/links (warning: this makes tests flaky)
online: false
# whether to delete all files in the workspace folder before running the tests
emptyWorkspace: true`
)
}
/** provides the textual config file content */
async function read(cmdLineArgs: config.Data): Promise<string> {
if (cmdLineArgs.configFileName) {
try {
const result = await fs.readFile(cmdLineArgs.configFileName, "utf8")
return result
} catch (e) {
throw new tr.UserError(`cannot read configuration file "${cmdLineArgs.configFileName}"`, e.message)
}
}
try {
const result = await fs.readFile("text-run.yml", "utf8")
return result
} catch (e) {
return ""
}
}
/** parses the textual config file content into a Configuration instance */
function parse(fileContent: string): config.Data {
if (fileContent === "") |
const fileData = YAML.parse(fileContent)
return new config.Data({
regionMarker: fileData.regionMarker,
defaultFile: fileData.defaultFile,
emptyWorkspace: fileData.emptyWorkspace,
exclude: fileData.exclude,
files: fileData.files,
formatterName: fileData.format,
online: fileData.online,
publications: fileData.publications,
showSkipped: fileData.showSkipped,
systemTmp: fileData.systemTmp,
workspace: fileData.workspace,
})
}
| {
return new config.Data({})
} | conditional_block |
config-file.ts | import { promises as fs } from "fs"
import * as tr from "text-runner-core"
import * as YAML from "yamljs"
import * as config from "./configuration"
/** provides the config file content as a Configuration instance */
export async function load(cmdLineArgs: config.Data): Promise<config.Data> {
return parse(await read(cmdLineArgs))
}
/** creates a new Text-Runner configuration file */
export async function create(cmdLineArgs: config.Data): Promise<void> | # is hosted as "post/foo.md":
# publications:
# - localPath: /posts/
# publicPath: /blog
# publicExtension: ''
# Name of the default filename in folders.
# If this setting is given, and a link points to a folder,
# the link is assumed to point to the default file in that folder.
# defaultFile: index.md
# prefix that makes anchor tags active regions
regionMarker: type
# whether to display/emit skipped activities
showSkipped: false
# whether to run the tests in an external temp directory,
# uses ./tmp if false,
# you can also provide a custom directory path here
systemTmp: false
# whether to verify online files/links (warning: this makes tests flaky)
online: false
# whether to delete all files in the workspace folder before running the tests
emptyWorkspace: true`
)
}
/** provides the textual config file content */
async function read(cmdLineArgs: config.Data): Promise<string> {
if (cmdLineArgs.configFileName) {
try {
const result = await fs.readFile(cmdLineArgs.configFileName, "utf8")
return result
} catch (e) {
throw new tr.UserError(`cannot read configuration file "${cmdLineArgs.configFileName}"`, e.message)
}
}
try {
const result = await fs.readFile("text-run.yml", "utf8")
return result
} catch (e) {
return ""
}
}
/** parses the textual config file content into a Configuration instance */
function parse(fileContent: string): config.Data {
if (fileContent === "") {
return new config.Data({})
}
const fileData = YAML.parse(fileContent)
return new config.Data({
regionMarker: fileData.regionMarker,
defaultFile: fileData.defaultFile,
emptyWorkspace: fileData.emptyWorkspace,
exclude: fileData.exclude,
files: fileData.files,
formatterName: fileData.format,
online: fileData.online,
publications: fileData.publications,
showSkipped: fileData.showSkipped,
systemTmp: fileData.systemTmp,
workspace: fileData.workspace,
})
}
| {
await fs.writeFile(
cmdLineArgs.configFileName || "text-run.yml",
`# white-list for files to test
# This is a glob expression, see https://github.com/isaacs/node-glob#glob-primer
# The folder "node_modules" is already excluded.
# To exclude the "vendor" folder: '{,!(vendor)/**/}*.md'
files: "**/*.md"
# black-list of files not to test
# This is applied after the white-list above.
exclude: []
# the formatter to use (detailed, dot, progress, summary)
format: detailed
# Define which folders of your Markdown source get compiled to HTML
# and published under a different URL path.
#
# In this example, the public URL "/blog/foo" | identifier_body |
InformationLabel.tsx | import React, { FC, useContext } from 'react';
import ReactiveFormControl from '../form-controls/ReactiveFormControl';
import { Label, Link, Icon } from 'office-ui-fabric-react';
import { style } from 'typestyle';
import { ThemeContext } from '../../ThemeContext';
import { ThemeExtended } from '../../theme/SemanticColorsExtended';
export interface CustomInformationLabelProps {
id: string;
value: string;
label: string;
link?: string;
labelProps?: {
icon?: string;
type?: string;
};
className?: string;
onClick?: () => void;
}
const labelIconStyle = style({
fontSize: '12px',
marginRight: '4px',
});
const getLabelColor = (type: 'success' | 'error' | undefined, theme: ThemeExtended) => {
if (type === 'success') {
return theme.semanticColors.inlineSuccessText;
} else if (type === 'error') {
return theme.semanticColors.inlineErrorText;
} else {
return theme.semanticColors.textColor;
}
};
const defaultLabelStyle = (theme: ThemeExtended) =>
style({
color: theme.semanticColors.textColor,
});
const getLabelStyle = (labelProps: any, theme: ThemeExtended) => {
return labelProps && labelProps.type
? style({
color: getLabelColor(labelProps.type, theme),
})
: defaultLabelStyle(theme);
};
const InformationLabel: FC<CustomInformationLabelProps> = props => {
const { value, id, link, labelProps, className, onClick } = props;
const theme = useContext(ThemeContext);
const getClassNameFromProps = () => {
if (className) {
return className;
}
return labelProps ? getLabelStyle(labelProps, theme) : '';
};
return (
<ReactiveFormControl {...props}>
{link ? (
<Link id={`${id}-value-link`} href={link} target="_blank" aria-labelledby={`${id}-label`}>
{value}
</Link>
) : (
<Label id={`${id}-value`} aria-labelledby={`${id}-label`} onClick={onClick} className={getClassNameFromProps()}> | )}
</ReactiveFormControl>
);
};
export default InformationLabel; | {labelProps && labelProps.icon && <Icon iconName={labelProps.icon} className={labelIconStyle} />}
<span>{value}</span>
</Label> | random_line_split |
InformationLabel.tsx | import React, { FC, useContext } from 'react';
import ReactiveFormControl from '../form-controls/ReactiveFormControl';
import { Label, Link, Icon } from 'office-ui-fabric-react';
import { style } from 'typestyle';
import { ThemeContext } from '../../ThemeContext';
import { ThemeExtended } from '../../theme/SemanticColorsExtended';
export interface CustomInformationLabelProps {
id: string;
value: string;
label: string;
link?: string;
labelProps?: {
icon?: string;
type?: string;
};
className?: string;
onClick?: () => void;
}
const labelIconStyle = style({
fontSize: '12px',
marginRight: '4px',
});
const getLabelColor = (type: 'success' | 'error' | undefined, theme: ThemeExtended) => {
if (type === 'success') {
return theme.semanticColors.inlineSuccessText;
} else if (type === 'error') {
return theme.semanticColors.inlineErrorText;
} else |
};
const defaultLabelStyle = (theme: ThemeExtended) =>
style({
color: theme.semanticColors.textColor,
});
const getLabelStyle = (labelProps: any, theme: ThemeExtended) => {
return labelProps && labelProps.type
? style({
color: getLabelColor(labelProps.type, theme),
})
: defaultLabelStyle(theme);
};
const InformationLabel: FC<CustomInformationLabelProps> = props => {
const { value, id, link, labelProps, className, onClick } = props;
const theme = useContext(ThemeContext);
const getClassNameFromProps = () => {
if (className) {
return className;
}
return labelProps ? getLabelStyle(labelProps, theme) : '';
};
return (
<ReactiveFormControl {...props}>
{link ? (
<Link id={`${id}-value-link`} href={link} target="_blank" aria-labelledby={`${id}-label`}>
{value}
</Link>
) : (
<Label id={`${id}-value`} aria-labelledby={`${id}-label`} onClick={onClick} className={getClassNameFromProps()}>
{labelProps && labelProps.icon && <Icon iconName={labelProps.icon} className={labelIconStyle} />}
<span>{value}</span>
</Label>
)}
</ReactiveFormControl>
);
};
export default InformationLabel;
| {
return theme.semanticColors.textColor;
} | conditional_block |
main.rs | #![feature(slice_position_elem, iter_arith)]
#[macro_use] extern crate libeuler;
/// 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
///
/// What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
fn main() {
solutions!{
inputs: (max_factor: i64 = 20)
sol naive {
let primes = prime_factors_less_than(&max_factor);
let mut needed_factors = Vec::new();
for factors in primes.iter() {
let mut f = needed_factors.clone();
let still_needed: Vec<&i64> = factors.iter()
.filter(|&fac| {
if f.contains(fac) {
let pos = f.position_elem(fac).unwrap();
f.swap_remove(pos);
false
} else {
true
}
}).collect();
for v in still_needed {
needed_factors.push(v.clone());
}
}
needed_factors.iter().map(|&i| i).product::<i64>()
}
};
}
fn factors(value: &i64) -> Vec<i64> {
let mut factor = 2;
let mut v = value.clone();
let mut retval = Vec::new();
while v > 1 {
if v % factor == 0 | else {
factor += 1;
}
}
retval
}
fn prime_factors_less_than(max: &i64) -> Vec<Vec<i64>> {
let mut retval = Vec::new();
for i in 1..*max {
retval.push(factors(&i));
}
retval
}
| {
retval.push(factor);
v /= factor;
} | conditional_block |
main.rs | #![feature(slice_position_elem, iter_arith)]
#[macro_use] extern crate libeuler;
/// 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
///
/// What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
fn main() |
for v in still_needed {
needed_factors.push(v.clone());
}
}
needed_factors.iter().map(|&i| i).product::<i64>()
}
};
}
fn factors(value: &i64) -> Vec<i64> {
let mut factor = 2;
let mut v = value.clone();
let mut retval = Vec::new();
while v > 1 {
if v % factor == 0 {
retval.push(factor);
v /= factor;
} else {
factor += 1;
}
}
retval
}
fn prime_factors_less_than(max: &i64) -> Vec<Vec<i64>> {
let mut retval = Vec::new();
for i in 1..*max {
retval.push(factors(&i));
}
retval
}
| {
solutions!{
inputs: (max_factor: i64 = 20)
sol naive {
let primes = prime_factors_less_than(&max_factor);
let mut needed_factors = Vec::new();
for factors in primes.iter() {
let mut f = needed_factors.clone();
let still_needed: Vec<&i64> = factors.iter()
.filter(|&fac| {
if f.contains(fac) {
let pos = f.position_elem(fac).unwrap();
f.swap_remove(pos);
false
} else {
true
}
}).collect(); | identifier_body |
main.rs | #![feature(slice_position_elem, iter_arith)]
#[macro_use] extern crate libeuler;
/// 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
///
/// What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
fn main() {
solutions!{
inputs: (max_factor: i64 = 20)
sol naive {
let primes = prime_factors_less_than(&max_factor);
let mut needed_factors = Vec::new();
for factors in primes.iter() {
let mut f = needed_factors.clone();
let still_needed: Vec<&i64> = factors.iter()
.filter(|&fac| {
if f.contains(fac) {
let pos = f.position_elem(fac).unwrap();
f.swap_remove(pos);
false
} else {
true
}
}).collect();
for v in still_needed {
needed_factors.push(v.clone());
}
}
needed_factors.iter().map(|&i| i).product::<i64>()
}
};
}
fn | (value: &i64) -> Vec<i64> {
let mut factor = 2;
let mut v = value.clone();
let mut retval = Vec::new();
while v > 1 {
if v % factor == 0 {
retval.push(factor);
v /= factor;
} else {
factor += 1;
}
}
retval
}
fn prime_factors_less_than(max: &i64) -> Vec<Vec<i64>> {
let mut retval = Vec::new();
for i in 1..*max {
retval.push(factors(&i));
}
retval
}
| factors | identifier_name |
main.rs | #![feature(slice_position_elem, iter_arith)]
#[macro_use] extern crate libeuler;
/// 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
///
/// What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
fn main() {
solutions!{
inputs: (max_factor: i64 = 20)
sol naive {
let primes = prime_factors_less_than(&max_factor);
let mut needed_factors = Vec::new();
for factors in primes.iter() {
let mut f = needed_factors.clone();
let still_needed: Vec<&i64> = factors.iter()
.filter(|&fac| {
if f.contains(fac) {
let pos = f.position_elem(fac).unwrap();
f.swap_remove(pos);
false
} else {
true
}
}).collect();
for v in still_needed { |
needed_factors.iter().map(|&i| i).product::<i64>()
}
};
}
fn factors(value: &i64) -> Vec<i64> {
let mut factor = 2;
let mut v = value.clone();
let mut retval = Vec::new();
while v > 1 {
if v % factor == 0 {
retval.push(factor);
v /= factor;
} else {
factor += 1;
}
}
retval
}
fn prime_factors_less_than(max: &i64) -> Vec<Vec<i64>> {
let mut retval = Vec::new();
for i in 1..*max {
retval.push(factors(&i));
}
retval
} | needed_factors.push(v.clone());
}
} | random_line_split |
modelling.py | from gensim import corpora,models,utils
import numpy as np
from pymongo import MongoClient
from preprocess_text import clean
from corpus_dictionary import custom_corpus, CompleteCorpus
client = MongoClient()
db = client['crawled_news']
collection = db['crawled_news']
def tagged_docs():
for d in collection.find():
cleaned = clean(d["content"])
doc_id = str(d["_id"])
yield models.doc2vec.TaggedDocument(cleaned.split(),[doc_id])
def update_doc2vec_model(model_path="model/doc2vec.model",size=400,min_count=5):
doc2vec = models.doc2vec.Doc2Vec(tagged_docs(), size=size, window=8, min_count=min_count, workers=6)
doc2vec.save(model_path)
return doc2vec
def update_lda_model(corpus,model_path="model/lda.model",size=100):
print("LDA Topic Modelling")
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=size,eta="auto",workers=6)
lda.save(model_path)
return lda
def mini_lda_model(collection,num_topics=10):
corpus = custom_corpus(collection)
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=num_topics,eta="auto",workers=6)
return lda,corpus.dictionary
def google_news_model(model_target_path="model/google_news_model.model",model_source_path="/home/amir/makmal/ular_makan_surat_khabar/word_embeddings/GoogleNews-vectors-negative300.bin"):
doc2vec = models.doc2vec.Doc2Vec(min_count=10)
doc2vec.load_word2vec_format(model_source_path,binary=True)
# doc2vec.init_sims(replace=False)
doc2vec.save(model_target_path)
return doc2vec
def save_lda_topics_to_db(lda_model_path,dictionary):
lda = models.ldamulticore.LdaMulticore.load(lda_model_path)
i = 1
for document in collection.find():
if i%100 == 0:
print(i)
i += 1
document_id = document["_id"]
cleaned = clean(document["content"])
doc_bow = dictionary.doc2bow(cleaned.split())
modified = document
modified["lda_topics"] = lda[doc_bow]
del(modified["_id"])
collection.replace_one({"_id":document_id},modified)
# topic_ids = []
# topic_probabilities = []
# for topic_id,topic_probability in lda[doc_bow]:
# topic_ids.append(topic_id) | corpus.dictionary.save("dictionary/temp_complete_dictionary.dict")
dictionary = corpora.dictionary.Dictionary.load("dictionary/temp_complete_dictionary.dict")
c = corpora.mmcorpus.MmCorpus("corpus/temp_complete_corpus.mm")
lda = update_lda_model(c,model_path,size)
save_lda_topics_to_db(model_path,dictionary)
return lda, corpus.dictionary
if __name__ == "__main__":
# for el in quranic_sentences_with_docs():
# print(el)
# print("doc2vec 400")
# update_doc2vec_model()
# quran_dictionary = corpora.dictionary.Dictionary.load("dictionary/quranic_ayat_en.dict")
# LDA Topic Modelling
corpus = CompleteCorpus()
# update_lda_model(corpus)
save_lda_topics_to_db("model/lda.model",corpus.dictionary)
# Doc2Vec Modelling
#
# print("doc2vec modelling") | # topic_probabilities.append(topic_probability)
def compute_complete_lda_topics(model_path,size=100):
corpus = CompleteCorpus()
corpora.MmCorpus.serialize("corpus/temp_complete_corpus.mm", corpus) | random_line_split |
modelling.py | from gensim import corpora,models,utils
import numpy as np
from pymongo import MongoClient
from preprocess_text import clean
from corpus_dictionary import custom_corpus, CompleteCorpus
client = MongoClient()
db = client['crawled_news']
collection = db['crawled_news']
def tagged_docs():
for d in collection.find():
|
def update_doc2vec_model(model_path="model/doc2vec.model",size=400,min_count=5):
doc2vec = models.doc2vec.Doc2Vec(tagged_docs(), size=size, window=8, min_count=min_count, workers=6)
doc2vec.save(model_path)
return doc2vec
def update_lda_model(corpus,model_path="model/lda.model",size=100):
print("LDA Topic Modelling")
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=size,eta="auto",workers=6)
lda.save(model_path)
return lda
def mini_lda_model(collection,num_topics=10):
corpus = custom_corpus(collection)
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=num_topics,eta="auto",workers=6)
return lda,corpus.dictionary
def google_news_model(model_target_path="model/google_news_model.model",model_source_path="/home/amir/makmal/ular_makan_surat_khabar/word_embeddings/GoogleNews-vectors-negative300.bin"):
doc2vec = models.doc2vec.Doc2Vec(min_count=10)
doc2vec.load_word2vec_format(model_source_path,binary=True)
# doc2vec.init_sims(replace=False)
doc2vec.save(model_target_path)
return doc2vec
def save_lda_topics_to_db(lda_model_path,dictionary):
lda = models.ldamulticore.LdaMulticore.load(lda_model_path)
i = 1
for document in collection.find():
if i%100 == 0:
print(i)
i += 1
document_id = document["_id"]
cleaned = clean(document["content"])
doc_bow = dictionary.doc2bow(cleaned.split())
modified = document
modified["lda_topics"] = lda[doc_bow]
del(modified["_id"])
collection.replace_one({"_id":document_id},modified)
# topic_ids = []
# topic_probabilities = []
# for topic_id,topic_probability in lda[doc_bow]:
# topic_ids.append(topic_id)
# topic_probabilities.append(topic_probability)
def compute_complete_lda_topics(model_path,size=100):
corpus = CompleteCorpus()
corpora.MmCorpus.serialize("corpus/temp_complete_corpus.mm", corpus)
corpus.dictionary.save("dictionary/temp_complete_dictionary.dict")
dictionary = corpora.dictionary.Dictionary.load("dictionary/temp_complete_dictionary.dict")
c = corpora.mmcorpus.MmCorpus("corpus/temp_complete_corpus.mm")
lda = update_lda_model(c,model_path,size)
save_lda_topics_to_db(model_path,dictionary)
return lda, corpus.dictionary
if __name__ == "__main__":
# for el in quranic_sentences_with_docs():
# print(el)
# print("doc2vec 400")
# update_doc2vec_model()
# quran_dictionary = corpora.dictionary.Dictionary.load("dictionary/quranic_ayat_en.dict")
# LDA Topic Modelling
corpus = CompleteCorpus()
# update_lda_model(corpus)
save_lda_topics_to_db("model/lda.model",corpus.dictionary)
# Doc2Vec Modelling
#
# print("doc2vec modelling")
| cleaned = clean(d["content"])
doc_id = str(d["_id"])
yield models.doc2vec.TaggedDocument(cleaned.split(),[doc_id]) | conditional_block |
modelling.py | from gensim import corpora,models,utils
import numpy as np
from pymongo import MongoClient
from preprocess_text import clean
from corpus_dictionary import custom_corpus, CompleteCorpus
client = MongoClient()
db = client['crawled_news']
collection = db['crawled_news']
def tagged_docs():
for d in collection.find():
cleaned = clean(d["content"])
doc_id = str(d["_id"])
yield models.doc2vec.TaggedDocument(cleaned.split(),[doc_id])
def update_doc2vec_model(model_path="model/doc2vec.model",size=400,min_count=5):
doc2vec = models.doc2vec.Doc2Vec(tagged_docs(), size=size, window=8, min_count=min_count, workers=6)
doc2vec.save(model_path)
return doc2vec
def update_lda_model(corpus,model_path="model/lda.model",size=100):
print("LDA Topic Modelling")
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=size,eta="auto",workers=6)
lda.save(model_path)
return lda
def mini_lda_model(collection,num_topics=10):
corpus = custom_corpus(collection)
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=num_topics,eta="auto",workers=6)
return lda,corpus.dictionary
def google_news_model(model_target_path="model/google_news_model.model",model_source_path="/home/amir/makmal/ular_makan_surat_khabar/word_embeddings/GoogleNews-vectors-negative300.bin"):
|
def save_lda_topics_to_db(lda_model_path,dictionary):
lda = models.ldamulticore.LdaMulticore.load(lda_model_path)
i = 1
for document in collection.find():
if i%100 == 0:
print(i)
i += 1
document_id = document["_id"]
cleaned = clean(document["content"])
doc_bow = dictionary.doc2bow(cleaned.split())
modified = document
modified["lda_topics"] = lda[doc_bow]
del(modified["_id"])
collection.replace_one({"_id":document_id},modified)
# topic_ids = []
# topic_probabilities = []
# for topic_id,topic_probability in lda[doc_bow]:
# topic_ids.append(topic_id)
# topic_probabilities.append(topic_probability)
def compute_complete_lda_topics(model_path,size=100):
corpus = CompleteCorpus()
corpora.MmCorpus.serialize("corpus/temp_complete_corpus.mm", corpus)
corpus.dictionary.save("dictionary/temp_complete_dictionary.dict")
dictionary = corpora.dictionary.Dictionary.load("dictionary/temp_complete_dictionary.dict")
c = corpora.mmcorpus.MmCorpus("corpus/temp_complete_corpus.mm")
lda = update_lda_model(c,model_path,size)
save_lda_topics_to_db(model_path,dictionary)
return lda, corpus.dictionary
if __name__ == "__main__":
# for el in quranic_sentences_with_docs():
# print(el)
# print("doc2vec 400")
# update_doc2vec_model()
# quran_dictionary = corpora.dictionary.Dictionary.load("dictionary/quranic_ayat_en.dict")
# LDA Topic Modelling
corpus = CompleteCorpus()
# update_lda_model(corpus)
save_lda_topics_to_db("model/lda.model",corpus.dictionary)
# Doc2Vec Modelling
#
# print("doc2vec modelling")
| doc2vec = models.doc2vec.Doc2Vec(min_count=10)
doc2vec.load_word2vec_format(model_source_path,binary=True)
# doc2vec.init_sims(replace=False)
doc2vec.save(model_target_path)
return doc2vec | identifier_body |
modelling.py | from gensim import corpora,models,utils
import numpy as np
from pymongo import MongoClient
from preprocess_text import clean
from corpus_dictionary import custom_corpus, CompleteCorpus
client = MongoClient()
db = client['crawled_news']
collection = db['crawled_news']
def tagged_docs():
for d in collection.find():
cleaned = clean(d["content"])
doc_id = str(d["_id"])
yield models.doc2vec.TaggedDocument(cleaned.split(),[doc_id])
def update_doc2vec_model(model_path="model/doc2vec.model",size=400,min_count=5):
doc2vec = models.doc2vec.Doc2Vec(tagged_docs(), size=size, window=8, min_count=min_count, workers=6)
doc2vec.save(model_path)
return doc2vec
def update_lda_model(corpus,model_path="model/lda.model",size=100):
print("LDA Topic Modelling")
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=size,eta="auto",workers=6)
lda.save(model_path)
return lda
def mini_lda_model(collection,num_topics=10):
corpus = custom_corpus(collection)
lda = models.ldamulticore.LdaMulticore(corpus,num_topics=num_topics,eta="auto",workers=6)
return lda,corpus.dictionary
def google_news_model(model_target_path="model/google_news_model.model",model_source_path="/home/amir/makmal/ular_makan_surat_khabar/word_embeddings/GoogleNews-vectors-negative300.bin"):
doc2vec = models.doc2vec.Doc2Vec(min_count=10)
doc2vec.load_word2vec_format(model_source_path,binary=True)
# doc2vec.init_sims(replace=False)
doc2vec.save(model_target_path)
return doc2vec
def save_lda_topics_to_db(lda_model_path,dictionary):
lda = models.ldamulticore.LdaMulticore.load(lda_model_path)
i = 1
for document in collection.find():
if i%100 == 0:
print(i)
i += 1
document_id = document["_id"]
cleaned = clean(document["content"])
doc_bow = dictionary.doc2bow(cleaned.split())
modified = document
modified["lda_topics"] = lda[doc_bow]
del(modified["_id"])
collection.replace_one({"_id":document_id},modified)
# topic_ids = []
# topic_probabilities = []
# for topic_id,topic_probability in lda[doc_bow]:
# topic_ids.append(topic_id)
# topic_probabilities.append(topic_probability)
def | (model_path,size=100):
corpus = CompleteCorpus()
corpora.MmCorpus.serialize("corpus/temp_complete_corpus.mm", corpus)
corpus.dictionary.save("dictionary/temp_complete_dictionary.dict")
dictionary = corpora.dictionary.Dictionary.load("dictionary/temp_complete_dictionary.dict")
c = corpora.mmcorpus.MmCorpus("corpus/temp_complete_corpus.mm")
lda = update_lda_model(c,model_path,size)
save_lda_topics_to_db(model_path,dictionary)
return lda, corpus.dictionary
if __name__ == "__main__":
# for el in quranic_sentences_with_docs():
# print(el)
# print("doc2vec 400")
# update_doc2vec_model()
# quran_dictionary = corpora.dictionary.Dictionary.load("dictionary/quranic_ayat_en.dict")
# LDA Topic Modelling
corpus = CompleteCorpus()
# update_lda_model(corpus)
save_lda_topics_to_db("model/lda.model",corpus.dictionary)
# Doc2Vec Modelling
#
# print("doc2vec modelling")
| compute_complete_lda_topics | identifier_name |
editSelect.js | /*
* 此方法用于 图书搜索页面 的 搜索结果中的 “编辑” 按钮进去 编辑页面后 三个 下拉框的默认选择。
* 分别是 “图书类型” “书架” “出版社”
*/
| function selectOption( name )
{
nameVal = $('.' + name).find('select').attr('value');
nameElm = $('.' + name);
nameElm.find('li').each(function(){
if ( $(this).attr('data-raw-value') == nameVal) {
text = $(this).text();
// 显示层 select 改变值
trigger = nameElm.find('div[class=trigger]');
trigger.text( text );
}
});
nameElm.find('option').each(function(){
if ( $(this).val() == nameVal ){
// 实际的 <option> 也要改变
$(this).attr('selected', 'selected');
}
});
} | random_line_split |
|
editSelect.js | /*
* 此方法用于 图书搜索页面 的 搜索结果中的 “编辑” 按钮进去 编辑页面后 三个 下拉框的默认选择。
* 分别是 “图书类型” “书架” “出版社”
*/
function selectOption( name )
{
nameVal = $('.' + name).find('select').attr('value');
nameElm = $('.' + name);
nameElm.find( | unction(){
if ( $(this).attr('data-raw-value') == nameVal) {
text = $(this).text();
// 显示层 select 改变值
trigger = nameElm.find('div[class=trigger]');
trigger.text( text );
}
});
nameElm.find('option').each(function(){
if ( $(this).val() == nameVal ){
// 实际的 <option> 也要改变
$(this).attr('selected', 'selected');
}
});
}
| 'li').each(f | identifier_name |
editSelect.js | /*
* 此方法用于 图书搜索页面 的 搜索结果中的 “编辑” 按钮进去 编辑页面后 三个 下拉框的默认选择。
* 分别是 “图书类型” “书架” “出版社”
*/
function selectOption( name )
{
nameVal = $('.' + name).find('select').attr('value');
nameElm = $('.' + name);
nameElm.find('li').each(function(){
if ( $(this).attr('data-raw-value') == nameVal) {
text = $(this).text();
// 显示层 select 改变值
trigger = nameElm.find('div[class=trigger]');
trigger.text( | cted', 'selected');
}
});
}
| text );
}
});
nameElm.find('option').each(function(){
if ( $(this).val() == nameVal ){
// 实际的 <option> 也要改变
$(this).attr('sele | conditional_block |
editSelect.js | /*
* 此方法用于 图书搜索页面 的 搜索结果中的 “编辑” 按钮进去 编辑页面后 三个 下拉框的默认选择。
* 分别是 “图书类型” “书架” “出版社”
*/
function selectOption( name )
{
nameVal = $('.' + name).find('select').attr('value');
nameElm = $('.' + name);
nameElm.find('li').each(function() | {
if ( $(this).attr('data-raw-value') == nameVal) {
text = $(this).text();
// 显示层 select 改变值
trigger = nameElm.find('div[class=trigger]');
trigger.text( text );
}
});
nameElm.find('option').each(function(){
if ( $(this).val() == nameVal ){
// 实际的 <option> 也要改变
$(this).attr('selected', 'selected');
}
});
} | identifier_body |
|
usage.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class | (Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
| Usage | identifier_name |
usage.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
| self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name | identifier_body |
|
usage.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True}, | 'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name | }
_attribute_map = { | random_line_split |
components.ts | import styled, { css } from "styled-components";
import { Colors } from "../../util/constants";
export const InspectorContainer = styled.div<{ visible?: boolean }>`
background: ${Colors.Inspector.Background};
box-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.1);
display: ${({ visible }) => (visible ? "flex" : "none")};
flex-direction: column;
height: 100%;
overflow-y: scroll;
overflow-x: hidden;
top: 25px;
width: 250px;
@media print {
display: none;
}
`;
export const SectionTitle = styled.h1<{ marginTop?: boolean }>`
color: ${Colors.Inspector.SectionTitleText};
font-weight: bold;
font-size: 12px;
padding: 0 0.8em;
margin-bottom: 0.667em;
${({ marginTop }) =>
marginTop &&
css`
margin-top: 1em;
`};
`;
export const ControlGrid = styled.div<{ compressedMargins?: boolean }>`
display: grid;
grid-template-columns: repeat(2, calc(50% - 0.4em));
grid-gap: 0.8em;
padding: 0 0.8em;
margin-bottom: 1.5em;
${({ compressedMargins }) =>
compressedMargins &&
css`
margin-bottom: 1em;
`};
`;
export const VerticalControlGrid = styled.div`
display: grid;
grid-template-rows: auto;
grid-gap: 0.8em; | export const Separator = styled.div`
height: 1px;
background-color: hsla(0, 0%, 0%, 0.25);
`;
export const Text = styled.p<{ bold?: boolean }>`
color: #fff;
margin: 0 0.8em;
${({ bold }) =>
bold &&
css`
font-weight: bold;
`};
`;
export const Tabs = styled.nav`
background: ${Colors.App.Background};
display: grid;
grid-template-columns: repeat(2, 50%);
`;
export const Tab = styled.div<{ active: boolean }>`
align-items: center;
background: ${({ active }) =>
active
? Colors.Inspector.TabActiveBackground
: Colors.Inspector.TabInactiveBackground};
box-shadow: inset 0 -1px 0 hsla(0, 0%, 100%, 0.2);
color: ${Colors.Inspector.Text};
cursor: default;
display: flex;
font-size: 12px;
font-weight: bold;
margin-top: 2px;
justify-content: center;
height: 23px;
${({ active }) =>
active &&
css`
border-radius: 4px 4px 0 0;
box-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.1);
`};
`; | padding: 0 0.8em;
margin-bottom: 1em;
`;
| random_line_split |
MeetingControllerTests.ts | ///ts:import=MeetingController
import MeetingController = require('../../Client/MeetingController'); ///ts:import:generated
///ts:import=IMeetingSvc
import IMeetingSvc = require('../../Client/IMeetingSvc'); ///ts:import:generated
///ts:import=MeetingData
import MeetingData = require('../../Common/MeetingData'); ///ts:import:generated
///ts:import=app
import app = require('../../Client/app'); ///ts:import:generated
///ts:import=IUserSvc
import IUserSvc = require('../../Client/IUserSvc'); ///ts:import:generated
describe("MeetingController", function () {
var $httpBackend: ng.IHttpBackendService;
var $http: ng.IHttpService;
var $routeParams;
var $rootScope;
var controller: MeetingController;
var meetingSvc: IMeetingSvc;
var mockMeetingSvc: SinonMock;
var userSvc: IUserSvc;
var mockUserSvc: SinonMock;
beforeEach(inject(function (_$httpBackend_, _$http_, _$rootScope_) { | $rootScope = _$rootScope_.$new();
meetingSvc = <IMeetingSvc>{ createMeeting: function () {}};
mockMeetingSvc = sinon.mock(meetingSvc);
mockUserSvc = sinon.mock(userSvc);
}));
afterEach(function () {
$httpBackend.verifyNoOutstandingExpectation();
$httpBackend.verifyNoOutstandingRequest();
});
it("CannotGetMeetings", function () {
$httpBackend.expectGET('/api/GetMeetingById/3').respond(401);
controller = new MeetingController($rootScope, $http, $routeParams, meetingSvc, userSvc);
$httpBackend.flush();
expect($rootScope.contentLoaded).to.equal(false);
});
it("GetMeetings", function () {
mockMeetingSvc.expects("createMeeting").once().returns(null);
$httpBackend.expectGET('/api/GetMeetingById/3').respond(200, new MeetingData());
controller = new MeetingController($rootScope, $http, $routeParams, meetingSvc, userSvc);
$httpBackend.flush();
expect($rootScope.contentLoaded).to.equal(true);
});
}); | $httpBackend = _$httpBackend_;
$http = _$http_;
$routeParams = { id: 3 }; | random_line_split |
ligature-subdivision.js | description('Test that the caret is positioned correctly when its offset occurrs in the middle of a ligature,\
and that hit-testing in the middle of a ligature works correctly.');
var latin = document.createElement("div");
latin.innerHTML = "office";
latin.style.fontSize = "72px";
latin.style.textRendering = "optimizelegibility";
latin.style.position = "absolute";
latin.style.top = "0";
latin.style.left = "0";
document.body.appendChild(latin);
var y = latin.offsetTop + latin.offsetHeight / 2;
document.body.offsetTop;
shouldBe('document.caretRangeFromPoint(10, y).startOffset', '0');
shouldBe('document.caretRangeFromPoint(30, y).startOffset', '1');
shouldBe('document.caretRangeFromPoint(60, y).startOffset', '2');
shouldBe('document.caretRangeFromPoint(80, y).startOffset', '3');
shouldBe('document.caretRangeFromPoint(100, y).startOffset', '4');
shouldBe('document.caretRangeFromPoint(120, y).startOffset', '5');
var range = document.createRange();
range.setStart(latin.firstChild, 0);
range.setEnd(latin.firstChild, 3);
shouldBe('range.getBoundingClientRect().width', '80');
document.body.removeChild(latin);
var arabic = document.createElement("div");
arabic.innerHTML = "حلاح";
arabic.style.fontSize = "72px";
arabic.style.direction = "rtl";
arabic.style.position = "absolute";
arabic.style.top = "0";
arabic.style.right = "0";
document.body.appendChild(arabic);
y = arabic.offsetTop + arabic.offsetHeight / 2;
var x = arabic.offsetLeft + arabic.offsetWidth;
shouldBe('document.caretRangeFromPoint(x - 20, y).startOffset', '0');
shouldBe('document.caretRangeFromPoint(x - 50, y).startOffset', '1');
shouldBe('document.caretRangeFromPoint(x - 64, y).startOffset', '2'); | shouldBe('document.caretRangeFromPoint(x - 90, y).startOffset', '3');
range.setStart(arabic.firstChild, 0);
range.setEnd(arabic.firstChild, 2);
var w = range.getBoundingClientRect().width;
// Widths vary between Mac OS X Leopard, Snow Leopard, and current Mac OS X.
shouldBeTrue('w === 66 || w === 65 || w === 61');
document.body.removeChild(arabic);
var successfullyParsed = true; | random_line_split |
|
truncate_reverse_primer.py | #!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def get_rev_primer_seqs(mapping_fp):
|
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir,
fasta_fp):
""" Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n" %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w"))
| """ Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {} | identifier_body |
truncate_reverse_primer.py | #!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def get_rev_primer_seqs(mapping_fp):
""" Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {}
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir, | """ Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n" %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w")) | fasta_fp): | random_line_split |
truncate_reverse_primer.py | #!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def | (mapping_fp):
""" Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {}
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir,
fasta_fp):
""" Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n" %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w"))
| get_rev_primer_seqs | identifier_name |
truncate_reverse_primer.py | #!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def get_rev_primer_seqs(mapping_fp):
""" Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
|
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir,
fasta_fp):
""" Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n" %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w"))
| id_map[curr_data[0]] = {} | conditional_block |
SupervisorJob.py | from jinja2 import Template
class SupervisorJob(object):
"""docstring for SupervisorJob"""
def | (self, config):
""" Specify the configuration options for a job.
'config' must be a dictionary containing the following keys:
- env_vars: dict containing the key/value pairs to be specified as
environment variables
e.g. {"ES_HOME" : "/home/es"}
- name: job name, as used by Supervisor to uniquely identify the job
e.g. "elasticsearch"
- base_dir: Base directory for the supervisor job
e.g. "/home/es"
- cmd: Full command (with args) that supervisor will run
e.g. "elasticsearch -p /home/es/es.pid"
- stdout_file: Full path to the file where stdout will be dumped to by
Supervisor
e.g. "/home/es/logs/es.out"
- stderr_file: Full path to the file where stderr will be dumped to by
Supervisor
e.g. "/home/es/logs/es.err"
"""
super(SupervisorJob, self).__init__()
self.config = config
self['env_vars'] = config.get('env_vars', {})
# self.env_vars = env_vars
# self.base_dir = base_dir
# self.cmd = cmd
# self.name = name
# self.stdout_file = stdout_file
# self.stderr_file = stderr_file
def prepare(self):
raise NotImplementedError("base class")
def __getitem__(self, k):
return self.config[k]
def __setitem__(self, k, v):
self.config[k] = v
def __contains__(self, k):
return k in self.config
def add_env(self, k, v):
self['env_vars'][k] = v
def as_supervisor_program(self):
config = """[program:{{program_name}}]
command = {{cmd}}
directory = {{base_dir}}
autostart = true
autorestart = true
stopsignal = KILL
killasgroup = true
stopasgroup = true
environment = {{env}}
stdout_logfile = {{stdout}}
stderr_logfile = {{stderr}}
"""
env = Template(config)
return env.render({
"program_name" : self.config['name'],
"base_dir" : self.config['base_dir'],
"env" : self.get_env_str(),
"cmd" : self.config['cmd'],
"stdout" : self.config['stdout_file'],
"stderr" : self.config['stderr_file'],
})
def as_exports(self):
res = ""
for k, v in self['env_vars'].items():
res += "export %s=%s\n" % (k, v)
return res
def get_env_str(self):
return ", ".join([k + "=\"" + str(v) + "\"" for k,v in self['env_vars'].items()])
| __init__ | identifier_name |
SupervisorJob.py | from jinja2 import Template
class SupervisorJob(object):
"""docstring for SupervisorJob"""
def __init__(self, config):
""" Specify the configuration options for a job.
'config' must be a dictionary containing the following keys:
- env_vars: dict containing the key/value pairs to be specified as
environment variables
e.g. {"ES_HOME" : "/home/es"}
- name: job name, as used by Supervisor to uniquely identify the job
e.g. "elasticsearch"
- base_dir: Base directory for the supervisor job
e.g. "/home/es"
- cmd: Full command (with args) that supervisor will run
e.g. "elasticsearch -p /home/es/es.pid"
- stdout_file: Full path to the file where stdout will be dumped to by
Supervisor
e.g. "/home/es/logs/es.out"
- stderr_file: Full path to the file where stderr will be dumped to by
Supervisor
e.g. "/home/es/logs/es.err"
"""
super(SupervisorJob, self).__init__()
self.config = config
self['env_vars'] = config.get('env_vars', {})
# self.env_vars = env_vars
# self.base_dir = base_dir
# self.cmd = cmd
# self.name = name
# self.stdout_file = stdout_file
# self.stderr_file = stderr_file
def prepare(self):
raise NotImplementedError("base class")
def __getitem__(self, k):
return self.config[k]
def __setitem__(self, k, v):
self.config[k] = v
def __contains__(self, k):
return k in self.config
def add_env(self, k, v):
self['env_vars'][k] = v
def as_supervisor_program(self):
config = """[program:{{program_name}}]
command = {{cmd}}
directory = {{base_dir}}
autostart = true
autorestart = true
stopsignal = KILL
killasgroup = true
stopasgroup = true
environment = {{env}}
stdout_logfile = {{stdout}}
stderr_logfile = {{stderr}}
"""
env = Template(config)
return env.render({
"program_name" : self.config['name'],
"base_dir" : self.config['base_dir'],
"env" : self.get_env_str(),
"cmd" : self.config['cmd'],
"stdout" : self.config['stdout_file'],
"stderr" : self.config['stderr_file'],
})
def as_exports(self):
res = ""
for k, v in self['env_vars'].items():
|
return res
def get_env_str(self):
return ", ".join([k + "=\"" + str(v) + "\"" for k,v in self['env_vars'].items()])
| res += "export %s=%s\n" % (k, v) | conditional_block |
SupervisorJob.py | from jinja2 import Template
class SupervisorJob(object):
"""docstring for SupervisorJob"""
def __init__(self, config):
""" Specify the configuration options for a job.
'config' must be a dictionary containing the following keys:
- env_vars: dict containing the key/value pairs to be specified as
environment variables
e.g. {"ES_HOME" : "/home/es"}
- name: job name, as used by Supervisor to uniquely identify the job
e.g. "elasticsearch"
- base_dir: Base directory for the supervisor job
e.g. "/home/es"
- cmd: Full command (with args) that supervisor will run
e.g. "elasticsearch -p /home/es/es.pid"
- stdout_file: Full path to the file where stdout will be dumped to by
Supervisor
e.g. "/home/es/logs/es.out"
- stderr_file: Full path to the file where stderr will be dumped to by
Supervisor
e.g. "/home/es/logs/es.err"
"""
super(SupervisorJob, self).__init__()
self.config = config
self['env_vars'] = config.get('env_vars', {})
# self.env_vars = env_vars
# self.base_dir = base_dir
# self.cmd = cmd
# self.name = name
# self.stdout_file = stdout_file
# self.stderr_file = stderr_file
def prepare(self):
raise NotImplementedError("base class")
def __getitem__(self, k):
return self.config[k]
def __setitem__(self, k, v):
self.config[k] = v
def __contains__(self, k):
return k in self.config
def add_env(self, k, v):
self['env_vars'][k] = v
def as_supervisor_program(self):
config = """[program:{{program_name}}]
command = {{cmd}}
directory = {{base_dir}}
autostart = true
autorestart = true
stopsignal = KILL
killasgroup = true
stopasgroup = true
environment = {{env}}
stdout_logfile = {{stdout}}
stderr_logfile = {{stderr}} | env = Template(config)
return env.render({
"program_name" : self.config['name'],
"base_dir" : self.config['base_dir'],
"env" : self.get_env_str(),
"cmd" : self.config['cmd'],
"stdout" : self.config['stdout_file'],
"stderr" : self.config['stderr_file'],
})
def as_exports(self):
res = ""
for k, v in self['env_vars'].items():
res += "export %s=%s\n" % (k, v)
return res
def get_env_str(self):
return ", ".join([k + "=\"" + str(v) + "\"" for k,v in self['env_vars'].items()]) |
"""
| random_line_split |
SupervisorJob.py | from jinja2 import Template
class SupervisorJob(object):
"""docstring for SupervisorJob"""
def __init__(self, config):
""" Specify the configuration options for a job.
'config' must be a dictionary containing the following keys:
- env_vars: dict containing the key/value pairs to be specified as
environment variables
e.g. {"ES_HOME" : "/home/es"}
- name: job name, as used by Supervisor to uniquely identify the job
e.g. "elasticsearch"
- base_dir: Base directory for the supervisor job
e.g. "/home/es"
- cmd: Full command (with args) that supervisor will run
e.g. "elasticsearch -p /home/es/es.pid"
- stdout_file: Full path to the file where stdout will be dumped to by
Supervisor
e.g. "/home/es/logs/es.out"
- stderr_file: Full path to the file where stderr will be dumped to by
Supervisor
e.g. "/home/es/logs/es.err"
"""
super(SupervisorJob, self).__init__()
self.config = config
self['env_vars'] = config.get('env_vars', {})
# self.env_vars = env_vars
# self.base_dir = base_dir
# self.cmd = cmd
# self.name = name
# self.stdout_file = stdout_file
# self.stderr_file = stderr_file
def prepare(self):
|
def __getitem__(self, k):
return self.config[k]
def __setitem__(self, k, v):
self.config[k] = v
def __contains__(self, k):
return k in self.config
def add_env(self, k, v):
self['env_vars'][k] = v
def as_supervisor_program(self):
config = """[program:{{program_name}}]
command = {{cmd}}
directory = {{base_dir}}
autostart = true
autorestart = true
stopsignal = KILL
killasgroup = true
stopasgroup = true
environment = {{env}}
stdout_logfile = {{stdout}}
stderr_logfile = {{stderr}}
"""
env = Template(config)
return env.render({
"program_name" : self.config['name'],
"base_dir" : self.config['base_dir'],
"env" : self.get_env_str(),
"cmd" : self.config['cmd'],
"stdout" : self.config['stdout_file'],
"stderr" : self.config['stderr_file'],
})
def as_exports(self):
res = ""
for k, v in self['env_vars'].items():
res += "export %s=%s\n" % (k, v)
return res
def get_env_str(self):
return ", ".join([k + "=\"" + str(v) + "\"" for k,v in self['env_vars'].items()])
| raise NotImplementedError("base class") | identifier_body |
mqtt_helper.py | import uuid
from paho.mqtt import publish
from paho.mqtt.client import MQTTv31
from conf.mqttconf import *
def | (msg, user_list, qos=2, retain=False):
"""
发布mqtt消息
:param msg:消息内容,可以是字符串、int、bytearray
:param user_list: 用户列表数组(不带前缀的),例如:["zhangsan","lilei"]
:param qos: 消息质量(0:至多一次,1:至少一次,2:只有一次)
:param retain:设置是否保存消息,为True时当订阅者不在线时发送的消息等上线后会得到通知,否则只发送给在线的设备
:return:
"""
auth = {"username": MOSQUITTO_PUB_USER, "password": MOSQUITTO_PUB_PWD}
client_id = MOSQUITTO_PREFIX + str(uuid.uuid1())
msgs = []
for i in user_list:
print(i)
msg_obj = dict()
msg_obj["qos"] = qos
msg_obj["retain"] = retain
msg_obj["topic"] = MOSQUITTO_TOPIC_PREFIX + str(i)
msg_obj["payload"] = msg
msgs.append(msg_obj)
if len(msgs) > 0 and msg:
print(msgs)
try:
publish.multiple(msgs, hostname=MOSQUITTO_HOST, port=MOSQUITTO_PORT, client_id=client_id, keepalive=60,
will=None, auth=auth, tls=None, protocol=MQTTv31)
ret = 1
except Exception as e:
print(str(e))
ret = -1
else:
ret = -2
return ret | send | identifier_name |
mqtt_helper.py | import uuid
from paho.mqtt import publish
from paho.mqtt.client import MQTTv31
from conf.mqttconf import *
def send(msg, user_list, qos=2, retain=False):
| print(msgs)
try:
publish.multiple(msgs, hostname=MOSQUITTO_HOST, port=MOSQUITTO_PORT, client_id=client_id, keepalive=60,
will=None, auth=auth, tls=None, protocol=MQTTv31)
ret = 1
except Exception as e:
print(str(e))
ret = -1
else:
ret = -2
return ret | """
发布mqtt消息
:param msg:消息内容,可以是字符串、int、bytearray
:param user_list: 用户列表数组(不带前缀的),例如:["zhangsan","lilei"]
:param qos: 消息质量(0:至多一次,1:至少一次,2:只有一次)
:param retain:设置是否保存消息,为True时当订阅者不在线时发送的消息等上线后会得到通知,否则只发送给在线的设备
:return:
"""
auth = {"username": MOSQUITTO_PUB_USER, "password": MOSQUITTO_PUB_PWD}
client_id = MOSQUITTO_PREFIX + str(uuid.uuid1())
msgs = []
for i in user_list:
print(i)
msg_obj = dict()
msg_obj["qos"] = qos
msg_obj["retain"] = retain
msg_obj["topic"] = MOSQUITTO_TOPIC_PREFIX + str(i)
msg_obj["payload"] = msg
msgs.append(msg_obj)
if len(msgs) > 0 and msg: | identifier_body |
mqtt_helper.py | import uuid
from paho.mqtt import publish
from paho.mqtt.client import MQTTv31
from conf.mqttconf import *
def send(msg, user_list, qos=2, retain=False):
"""
发布mqtt消息
:param msg:消息内容,可以是字符串、int、bytearray
:param user_list: 用户列表数组(不带前缀的),例如:["zhangsan","lilei"]
:param qos: 消息质量(0:至多一次,1:至少一次,2:只有一次)
:param retain:设置是否保存消息,为True时当订阅者不在线时发送的消息等上线后会得到通知,否则只发送给在线的设备
:return:
"""
auth = {"username": MOSQUITTO_PUB_USER, "password": MOSQUITTO_PUB_PWD}
client_id = MOSQUITTO_PREFIX + str(uuid.uuid1())
msgs = []
for i in user_list:
print(i)
msg_obj = dict()
msg_obj["qos"] = qos
msg_obj["retain"] = retain
msg_obj["topic"] = MOSQUITTO_TOPIC_PREFIX + str(i)
msg_obj["payload"] = msg
msgs.append(msg_obj)
if len(msgs) > 0 and msg:
print(msgs)
try:
publish.multiple(msgs, hostname=MOSQUITTO_HOST, port=MOSQUITTO_PORT, client_id=client_id, keepalive=60,
will=None, auth=auth, tls=None, protocol=MQTTv31)
ret = 1
except Exception as e:
print(str(e))
ret = -1
else:
ret = -2
return ret | conditional_block |
||
mqtt_helper.py | import uuid
from paho.mqtt import publish
from paho.mqtt.client import MQTTv31
from conf.mqttconf import *
def send(msg, user_list, qos=2, retain=False):
""" | :param user_list: 用户列表数组(不带前缀的),例如:["zhangsan","lilei"]
:param qos: 消息质量(0:至多一次,1:至少一次,2:只有一次)
:param retain:设置是否保存消息,为True时当订阅者不在线时发送的消息等上线后会得到通知,否则只发送给在线的设备
:return:
"""
auth = {"username": MOSQUITTO_PUB_USER, "password": MOSQUITTO_PUB_PWD}
client_id = MOSQUITTO_PREFIX + str(uuid.uuid1())
msgs = []
for i in user_list:
print(i)
msg_obj = dict()
msg_obj["qos"] = qos
msg_obj["retain"] = retain
msg_obj["topic"] = MOSQUITTO_TOPIC_PREFIX + str(i)
msg_obj["payload"] = msg
msgs.append(msg_obj)
if len(msgs) > 0 and msg:
print(msgs)
try:
publish.multiple(msgs, hostname=MOSQUITTO_HOST, port=MOSQUITTO_PORT, client_id=client_id, keepalive=60,
will=None, auth=auth, tls=None, protocol=MQTTv31)
ret = 1
except Exception as e:
print(str(e))
ret = -1
else:
ret = -2
return ret | 发布mqtt消息
:param msg:消息内容,可以是字符串、int、bytearray | random_line_split |
index.d.ts | // Type definitions for minilog 2.0
// Project: https://github.com/mixu/minilog
// Definitions by: Guido <http://guido.io>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
//These type definitions are not complete, although basic usage should be typed.
interface Minilog {
debug(...msg: any[]): Minilog;
info(...msg: any[]): Minilog;
log(...msg: any[]): Minilog;
warn(...msg: any[]): Minilog;
error(...msg: any[]): Minilog;
}
declare function Minilog(namespace: string): Minilog;
declare namespace Minilog {
export function enable(): Minilog;
export function disable() : Minilog;
export function pipe(dest: any): Transform;
export var suggest: Filter;
export var backends: Minilog.MinilogBackends;
export var defaultBackend: any;
export var defaultFormatter: string;
export class Filter extends Transform{
/**
* Adds an entry to the whitelist
* Returns this filter
*/
allow(name: any, level?: any): Filter;
/**
* Adds an entry to the blacklist
* Returns this filter
*/
deny(name: any, level?: any): Filter;
/**
* Empties the whitelist and blacklist
* Returns this filter
*/
clear(): Filter;
test(name:any, level:any): boolean;
/**
* specifies the behavior when a log line doesn't match either the whitelist or the blacklist.
The default is true (= "allow by default") - lines that do not match the whitelist or the blacklist are not filtered (e.g. ).
If you want to flip the default so that lines are filtered unless they are on the whitelist, set this to false (= "deny by default").
*/
defaultResult: boolean;
/**
* controls whether the filter is enabled. Default: true
*/
enabled: boolean;
}
export interface MinilogBackends {
array: any;
browser: any;
console: Console;
localstorage: any;
jQuery: any;
}
export class Console extends Transform{
/**
* List of available formatters
*/
formatters: string[];
//Only available on client
color: Transform;
minilog: Transform;
//Only available on backend
formatClean: Transform;
formatColor: Transform;
formatNpm: Transform;
formatLearnboost: Transform;
formatMinilog: Transform;
formatWithStack: Transform;
}
export class | {
write(name: any, level: any, args: any): void;
pipe(dest: any): any;
unpipe(from: any): Transform;
mixin(dest: any): void;
}
}
export = Minilog;
export as namespace Minilog;
| Transform | identifier_name |
index.d.ts | // Type definitions for minilog 2.0
// Project: https://github.com/mixu/minilog
// Definitions by: Guido <http://guido.io>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
//These type definitions are not complete, although basic usage should be typed.
interface Minilog {
debug(...msg: any[]): Minilog;
info(...msg: any[]): Minilog;
log(...msg: any[]): Minilog;
warn(...msg: any[]): Minilog;
error(...msg: any[]): Minilog;
}
declare function Minilog(namespace: string): Minilog;
declare namespace Minilog { | export var suggest: Filter;
export var backends: Minilog.MinilogBackends;
export var defaultBackend: any;
export var defaultFormatter: string;
export class Filter extends Transform{
/**
* Adds an entry to the whitelist
* Returns this filter
*/
allow(name: any, level?: any): Filter;
/**
* Adds an entry to the blacklist
* Returns this filter
*/
deny(name: any, level?: any): Filter;
/**
* Empties the whitelist and blacklist
* Returns this filter
*/
clear(): Filter;
test(name:any, level:any): boolean;
/**
* specifies the behavior when a log line doesn't match either the whitelist or the blacklist.
The default is true (= "allow by default") - lines that do not match the whitelist or the blacklist are not filtered (e.g. ).
If you want to flip the default so that lines are filtered unless they are on the whitelist, set this to false (= "deny by default").
*/
defaultResult: boolean;
/**
* controls whether the filter is enabled. Default: true
*/
enabled: boolean;
}
export interface MinilogBackends {
array: any;
browser: any;
console: Console;
localstorage: any;
jQuery: any;
}
export class Console extends Transform{
/**
* List of available formatters
*/
formatters: string[];
//Only available on client
color: Transform;
minilog: Transform;
//Only available on backend
formatClean: Transform;
formatColor: Transform;
formatNpm: Transform;
formatLearnboost: Transform;
formatMinilog: Transform;
formatWithStack: Transform;
}
export class Transform {
write(name: any, level: any, args: any): void;
pipe(dest: any): any;
unpipe(from: any): Transform;
mixin(dest: any): void;
}
}
export = Minilog;
export as namespace Minilog; | export function enable(): Minilog;
export function disable() : Minilog;
export function pipe(dest: any): Transform;
| random_line_split |
analyzer.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be |
import * as ts from 'typescript';
import {ModuleResolver} from '../../imports';
import {PartialEvaluator} from '../../partial_evaluator';
import {scanForRouteEntryPoints} from './lazy';
import {RouterEntryPointManager} from './route';
export interface NgModuleRawRouteData {
sourceFile: ts.SourceFile;
moduleName: string;
imports: ts.Expression|null;
exports: ts.Expression|null;
providers: ts.Expression|null;
}
export interface LazyRoute {
route: string;
module: {name: string, filePath: string};
referencedModule: {name: string, filePath: string};
}
export class NgModuleRouteAnalyzer {
private modules = new Map<string, NgModuleRawRouteData>();
private entryPointManager: RouterEntryPointManager;
constructor(moduleResolver: ModuleResolver, private evaluator: PartialEvaluator) {
this.entryPointManager = new RouterEntryPointManager(moduleResolver);
}
add(sourceFile: ts.SourceFile, moduleName: string, imports: ts.Expression|null,
exports: ts.Expression|null, providers: ts.Expression|null): void {
const key = `${sourceFile.fileName}#${moduleName}`;
if (this.modules.has(key)) {
throw new Error(`Double route analyzing ${key}`);
}
this.modules.set(
key, {
sourceFile, moduleName, imports, exports, providers,
});
}
listLazyRoutes(): LazyRoute[] {
const routes: LazyRoute[] = [];
for (const key of Array.from(this.modules.keys())) {
const data = this.modules.get(key) !;
const entryPoints = scanForRouteEntryPoints(
data.sourceFile, data.moduleName, data, this.entryPointManager, this.evaluator);
routes.push(...entryPoints.map(entryPoint => ({
route: entryPoint.loadChildren,
module: entryPoint.from,
referencedModule: entryPoint.resolvedTo,
})));
}
return routes;
}
} | * found in the LICENSE file at https://angular.io/license
*/ | random_line_split |
analyzer.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import * as ts from 'typescript';
import {ModuleResolver} from '../../imports';
import {PartialEvaluator} from '../../partial_evaluator';
import {scanForRouteEntryPoints} from './lazy';
import {RouterEntryPointManager} from './route';
export interface NgModuleRawRouteData {
sourceFile: ts.SourceFile;
moduleName: string;
imports: ts.Expression|null;
exports: ts.Expression|null;
providers: ts.Expression|null;
}
export interface LazyRoute {
route: string;
module: {name: string, filePath: string};
referencedModule: {name: string, filePath: string};
}
export class NgModuleRouteAnalyzer {
private modules = new Map<string, NgModuleRawRouteData>();
private entryPointManager: RouterEntryPointManager;
constructor(moduleResolver: ModuleResolver, private evaluator: PartialEvaluator) |
add(sourceFile: ts.SourceFile, moduleName: string, imports: ts.Expression|null,
exports: ts.Expression|null, providers: ts.Expression|null): void {
const key = `${sourceFile.fileName}#${moduleName}`;
if (this.modules.has(key)) {
throw new Error(`Double route analyzing ${key}`);
}
this.modules.set(
key, {
sourceFile, moduleName, imports, exports, providers,
});
}
listLazyRoutes(): LazyRoute[] {
const routes: LazyRoute[] = [];
for (const key of Array.from(this.modules.keys())) {
const data = this.modules.get(key) !;
const entryPoints = scanForRouteEntryPoints(
data.sourceFile, data.moduleName, data, this.entryPointManager, this.evaluator);
routes.push(...entryPoints.map(entryPoint => ({
route: entryPoint.loadChildren,
module: entryPoint.from,
referencedModule: entryPoint.resolvedTo,
})));
}
return routes;
}
}
| {
this.entryPointManager = new RouterEntryPointManager(moduleResolver);
} | identifier_body |
analyzer.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import * as ts from 'typescript';
import {ModuleResolver} from '../../imports';
import {PartialEvaluator} from '../../partial_evaluator';
import {scanForRouteEntryPoints} from './lazy';
import {RouterEntryPointManager} from './route';
export interface NgModuleRawRouteData {
sourceFile: ts.SourceFile;
moduleName: string;
imports: ts.Expression|null;
exports: ts.Expression|null;
providers: ts.Expression|null;
}
export interface LazyRoute {
route: string;
module: {name: string, filePath: string};
referencedModule: {name: string, filePath: string};
}
export class NgModuleRouteAnalyzer {
private modules = new Map<string, NgModuleRawRouteData>();
private entryPointManager: RouterEntryPointManager;
constructor(moduleResolver: ModuleResolver, private evaluator: PartialEvaluator) {
this.entryPointManager = new RouterEntryPointManager(moduleResolver);
}
add(sourceFile: ts.SourceFile, moduleName: string, imports: ts.Expression|null,
exports: ts.Expression|null, providers: ts.Expression|null): void {
const key = `${sourceFile.fileName}#${moduleName}`;
if (this.modules.has(key)) |
this.modules.set(
key, {
sourceFile, moduleName, imports, exports, providers,
});
}
listLazyRoutes(): LazyRoute[] {
const routes: LazyRoute[] = [];
for (const key of Array.from(this.modules.keys())) {
const data = this.modules.get(key) !;
const entryPoints = scanForRouteEntryPoints(
data.sourceFile, data.moduleName, data, this.entryPointManager, this.evaluator);
routes.push(...entryPoints.map(entryPoint => ({
route: entryPoint.loadChildren,
module: entryPoint.from,
referencedModule: entryPoint.resolvedTo,
})));
}
return routes;
}
}
| {
throw new Error(`Double route analyzing ${key}`);
} | conditional_block |
analyzer.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import * as ts from 'typescript';
import {ModuleResolver} from '../../imports';
import {PartialEvaluator} from '../../partial_evaluator';
import {scanForRouteEntryPoints} from './lazy';
import {RouterEntryPointManager} from './route';
export interface NgModuleRawRouteData {
sourceFile: ts.SourceFile;
moduleName: string;
imports: ts.Expression|null;
exports: ts.Expression|null;
providers: ts.Expression|null;
}
export interface LazyRoute {
route: string;
module: {name: string, filePath: string};
referencedModule: {name: string, filePath: string};
}
export class NgModuleRouteAnalyzer {
private modules = new Map<string, NgModuleRawRouteData>();
private entryPointManager: RouterEntryPointManager;
| (moduleResolver: ModuleResolver, private evaluator: PartialEvaluator) {
this.entryPointManager = new RouterEntryPointManager(moduleResolver);
}
add(sourceFile: ts.SourceFile, moduleName: string, imports: ts.Expression|null,
exports: ts.Expression|null, providers: ts.Expression|null): void {
const key = `${sourceFile.fileName}#${moduleName}`;
if (this.modules.has(key)) {
throw new Error(`Double route analyzing ${key}`);
}
this.modules.set(
key, {
sourceFile, moduleName, imports, exports, providers,
});
}
listLazyRoutes(): LazyRoute[] {
const routes: LazyRoute[] = [];
for (const key of Array.from(this.modules.keys())) {
const data = this.modules.get(key) !;
const entryPoints = scanForRouteEntryPoints(
data.sourceFile, data.moduleName, data, this.entryPointManager, this.evaluator);
routes.push(...entryPoints.map(entryPoint => ({
route: entryPoint.loadChildren,
module: entryPoint.from,
referencedModule: entryPoint.resolvedTo,
})));
}
return routes;
}
}
| constructor | identifier_name |
single-book.js | jQuery( document ).ready(function() {
// hide the bottom buy links if the excerpt is closed
if (jQuery('.mbm-book-excerpt-read-more').is(":visible")) |
// handle the editions hide/show
jQuery('.mbm-book-editions-toggle').on('click', function() {
jQuery(this).siblings('.mbm-book-editions-subinfo').toggle();
jQuery(this).toggleClass('mbm-book-editions-open');
});
// open the excerpt
jQuery('.mbm-book-excerpt-read-more').on( 'click', function() {
jQuery(this).siblings('.mbm-book-excerpt-text-hidden').toggle();
jQuery(this).toggle();
jQuery('#mbm-book-links2').show();
});
// close the excerpt
jQuery('.mbm-book-excerpt-collapse').on( 'click', function() {
jQuery(this).parent().toggle();
jQuery(this).parent().siblings('.mbm-book-excerpt-read-more').toggle();
jQuery('#mbm-book-links2').hide();
jQuery('html, body').animate({
scrollTop: (jQuery('.mbm-book-excerpt-read-more').offset().top - 100)
},500);
});
}); // ready
| {
jQuery('#mbm-book-links2').hide();
} | conditional_block |
single-book.js | jQuery( document ).ready(function() {
// hide the bottom buy links if the excerpt is closed
if (jQuery('.mbm-book-excerpt-read-more').is(":visible")) {
jQuery('#mbm-book-links2').hide();
}
// handle the editions hide/show
jQuery('.mbm-book-editions-toggle').on('click', function() {
jQuery(this).siblings('.mbm-book-editions-subinfo').toggle();
jQuery(this).toggleClass('mbm-book-editions-open');
});
// open the excerpt
jQuery('.mbm-book-excerpt-read-more').on( 'click', function() {
jQuery(this).siblings('.mbm-book-excerpt-text-hidden').toggle();
jQuery(this).toggle();
jQuery('#mbm-book-links2').show();
});
// close the excerpt | jQuery(this).parent().toggle();
jQuery(this).parent().siblings('.mbm-book-excerpt-read-more').toggle();
jQuery('#mbm-book-links2').hide();
jQuery('html, body').animate({
scrollTop: (jQuery('.mbm-book-excerpt-read-more').offset().top - 100)
},500);
});
}); // ready | jQuery('.mbm-book-excerpt-collapse').on( 'click', function() { | random_line_split |
lib.rs | #[macro_use]
extern crate bitflags;
extern crate nix;
extern crate libc;
mod itimer_spec;
mod sys;
use std::mem;
use std::ptr;
use std::os::unix::io::{RawFd, AsRawFd};
use nix::{Errno, Error};
use nix::unistd;
use libc::{c_int, clockid_t};
pub use self::itimer_spec::*;
#[doc(hidden)]
const TIMERFD_DATA_SIZE: usize = 8;
bitflags! {
/// Flags for TimerFd
#[derive(Default)]
pub struct TFDFlags: c_int {
/// Set close-on-exec on TimerFd
const TFD_CLOSEXEC = 524288;
/// Set TimerFd to non-block mode
const TFD_NONBLOCK = 2048;
}
}
bitflags! {
/// Flags for TimerFd::set_time
#[derive(Default)]
pub struct TFDTimerFlags: c_int {
/// Set an absolute timer
const TFD_TIMER_ABSTIME = 1;
}
}
#[inline]
pub fn timerfd_create(clock_id: clockid_t, flags: TFDFlags) -> nix::Result<RawFd> {
unsafe { Errno::result(sys::timerfd_create(clock_id, flags.bits())) }
}
#[inline]
pub fn timerfd_settime(
fd: RawFd,
flags: TFDTimerFlags,
utmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
let res = unsafe {
sys::timerfd_settime(
fd,
flags.bits(),
utmr as *const ITimerSpec,
otmr.map(|x| x as *mut ITimerSpec)
.unwrap_or(ptr::null_mut()),
)
};
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[inline]
pub fn timerfd_gettime(fd: RawFd, otmr: &mut ITimerSpec) -> nix::Result<()> {
let res = unsafe { sys::timerfd_gettime(fd, otmr as *mut ITimerSpec) };
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[repr(i32)]
#[derive(Debug, Copy, Clone)]
pub enum ClockId {
/// A settable system-wide clock
Realtime = CLOCK_REALTIME,
/// A nonsettable clock which is not affected discontinuous changes in the system clock
Monotonic = CLOCK_MONOTONIC,
}
/// A helper struct for creating, reading, and closing a `timerfd` instance.
///
/// ## Example
///
/// ```
/// use timerfd::{TimerFd, ClockId, ITimerSpec};
///
/// let mut timerfd = TimerFd::new(ClockId::Monotonic).unwrap();
///
/// // Set timer
/// timerfd.set_time(&ITimerSpec::seconds(3), None).unwrap();
///
/// match timerfd.read_time() {
/// // Timer is expired
/// Ok(Some(expirations)) => {},
/// // There is no expired timer. (Only happend when TFD_NONBLOCK set)
/// Ok(None) => {},
/// Err(err) => {}, // Some error happend
/// }
/// ```
#[derive(Debug)]
pub struct TimerFd(RawFd);
impl TimerFd {
/// Create a new TimerFd
pub fn new(clock_id: ClockId) -> nix::Result<TimerFd> {
Self::with_flags(clock_id, Default::default())
}
/// Create a new TimerFd with flags
pub fn with_flags(clock_id: ClockId, flags: TFDFlags) -> nix::Result<TimerFd> {
Ok(TimerFd(timerfd_create(clock_id as clockid_t, flags)?))
}
/// Start or stop a timer
pub fn set_time(
&mut self,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
self.set_time_with_flags(Default::default(), itmr, otmr)
}
/// Return current timer
pub fn get_time(&self, otmr: &mut ITimerSpec) -> nix::Result<()> {
timerfd_gettime(self.0, otmr)
}
/// Set a timer with flags
pub fn set_time_with_flags(
&mut self,
flags: TFDTimerFlags,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
timerfd_settime(self.0, flags, itmr, otmr)
}
pub fn read_time(&mut self) -> nix::Result<Option<u64>> {
let mut buf: [u8; TIMERFD_DATA_SIZE] = unsafe { mem::uninitialized() };
match unistd::read(self.0, &mut buf) {
Ok(TIMERFD_DATA_SIZE) => Ok(Some(unsafe { mem::transmute(buf) })),
Ok(_) => unreachable!("partial read of timerfd"),
Err(Error::Sys(Errno::EAGAIN)) => Ok(None),
Err(err) => Err(err),
}
}
}
impl Iterator for TimerFd {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
self.read_time().unwrap_or(None)
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl Drop for TimerFd {
fn | (&mut self) {
let _ = unistd::close(self.0);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{time, thread};
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
fn test_read_timerfd() {
let mut timer =
TimerFd::with_flags(ClockId::Monotonic, TFD_NONBLOCK).expect("Fail to create timerfd");
assert_eq!(timer.read_time(), Ok(None));
timer
.set_time(
&ITimerSpec {
it_value: TimeSpec::seconds(3),
it_interval: TimeSpec::seconds(0),
},
None,
)
.expect("Fail to set time");
assert_eq!(timer.read_time(), Ok(None));
thread::sleep(time::Duration::from_secs(3));
assert!(timer.read_time().unwrap().is_some());
}
}
| drop | identifier_name |
lib.rs | #[macro_use]
extern crate bitflags;
extern crate nix;
extern crate libc;
mod itimer_spec;
mod sys;
use std::mem;
use std::ptr;
use std::os::unix::io::{RawFd, AsRawFd};
use nix::{Errno, Error};
use nix::unistd;
use libc::{c_int, clockid_t};
pub use self::itimer_spec::*;
#[doc(hidden)]
const TIMERFD_DATA_SIZE: usize = 8;
bitflags! {
/// Flags for TimerFd
#[derive(Default)]
pub struct TFDFlags: c_int {
/// Set close-on-exec on TimerFd
const TFD_CLOSEXEC = 524288;
/// Set TimerFd to non-block mode
const TFD_NONBLOCK = 2048;
}
}
bitflags! {
/// Flags for TimerFd::set_time
#[derive(Default)]
pub struct TFDTimerFlags: c_int {
/// Set an absolute timer
const TFD_TIMER_ABSTIME = 1;
}
}
#[inline]
pub fn timerfd_create(clock_id: clockid_t, flags: TFDFlags) -> nix::Result<RawFd> |
#[inline]
pub fn timerfd_settime(
fd: RawFd,
flags: TFDTimerFlags,
utmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
let res = unsafe {
sys::timerfd_settime(
fd,
flags.bits(),
utmr as *const ITimerSpec,
otmr.map(|x| x as *mut ITimerSpec)
.unwrap_or(ptr::null_mut()),
)
};
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[inline]
pub fn timerfd_gettime(fd: RawFd, otmr: &mut ITimerSpec) -> nix::Result<()> {
let res = unsafe { sys::timerfd_gettime(fd, otmr as *mut ITimerSpec) };
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[repr(i32)]
#[derive(Debug, Copy, Clone)]
pub enum ClockId {
/// A settable system-wide clock
Realtime = CLOCK_REALTIME,
/// A nonsettable clock which is not affected discontinuous changes in the system clock
Monotonic = CLOCK_MONOTONIC,
}
/// A helper struct for creating, reading, and closing a `timerfd` instance.
///
/// ## Example
///
/// ```
/// use timerfd::{TimerFd, ClockId, ITimerSpec};
///
/// let mut timerfd = TimerFd::new(ClockId::Monotonic).unwrap();
///
/// // Set timer
/// timerfd.set_time(&ITimerSpec::seconds(3), None).unwrap();
///
/// match timerfd.read_time() {
/// // Timer is expired
/// Ok(Some(expirations)) => {},
/// // There is no expired timer. (Only happend when TFD_NONBLOCK set)
/// Ok(None) => {},
/// Err(err) => {}, // Some error happend
/// }
/// ```
#[derive(Debug)]
pub struct TimerFd(RawFd);
impl TimerFd {
/// Create a new TimerFd
pub fn new(clock_id: ClockId) -> nix::Result<TimerFd> {
Self::with_flags(clock_id, Default::default())
}
/// Create a new TimerFd with flags
pub fn with_flags(clock_id: ClockId, flags: TFDFlags) -> nix::Result<TimerFd> {
Ok(TimerFd(timerfd_create(clock_id as clockid_t, flags)?))
}
/// Start or stop a timer
pub fn set_time(
&mut self,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
self.set_time_with_flags(Default::default(), itmr, otmr)
}
/// Return current timer
pub fn get_time(&self, otmr: &mut ITimerSpec) -> nix::Result<()> {
timerfd_gettime(self.0, otmr)
}
/// Set a timer with flags
pub fn set_time_with_flags(
&mut self,
flags: TFDTimerFlags,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
timerfd_settime(self.0, flags, itmr, otmr)
}
pub fn read_time(&mut self) -> nix::Result<Option<u64>> {
let mut buf: [u8; TIMERFD_DATA_SIZE] = unsafe { mem::uninitialized() };
match unistd::read(self.0, &mut buf) {
Ok(TIMERFD_DATA_SIZE) => Ok(Some(unsafe { mem::transmute(buf) })),
Ok(_) => unreachable!("partial read of timerfd"),
Err(Error::Sys(Errno::EAGAIN)) => Ok(None),
Err(err) => Err(err),
}
}
}
impl Iterator for TimerFd {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
self.read_time().unwrap_or(None)
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl Drop for TimerFd {
fn drop(&mut self) {
let _ = unistd::close(self.0);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{time, thread};
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
fn test_read_timerfd() {
let mut timer =
TimerFd::with_flags(ClockId::Monotonic, TFD_NONBLOCK).expect("Fail to create timerfd");
assert_eq!(timer.read_time(), Ok(None));
timer
.set_time(
&ITimerSpec {
it_value: TimeSpec::seconds(3),
it_interval: TimeSpec::seconds(0),
},
None,
)
.expect("Fail to set time");
assert_eq!(timer.read_time(), Ok(None));
thread::sleep(time::Duration::from_secs(3));
assert!(timer.read_time().unwrap().is_some());
}
}
| {
unsafe { Errno::result(sys::timerfd_create(clock_id, flags.bits())) }
} | identifier_body |
lib.rs | #[macro_use]
extern crate bitflags;
extern crate nix;
extern crate libc;
mod itimer_spec;
mod sys;
use std::mem;
use std::ptr;
use std::os::unix::io::{RawFd, AsRawFd};
use nix::{Errno, Error};
use nix::unistd;
use libc::{c_int, clockid_t};
pub use self::itimer_spec::*;
#[doc(hidden)]
const TIMERFD_DATA_SIZE: usize = 8;
bitflags! {
/// Flags for TimerFd
#[derive(Default)]
pub struct TFDFlags: c_int {
/// Set close-on-exec on TimerFd
const TFD_CLOSEXEC = 524288;
/// Set TimerFd to non-block mode
const TFD_NONBLOCK = 2048;
}
}
bitflags! {
/// Flags for TimerFd::set_time
#[derive(Default)]
pub struct TFDTimerFlags: c_int {
/// Set an absolute timer
const TFD_TIMER_ABSTIME = 1;
}
}
#[inline]
pub fn timerfd_create(clock_id: clockid_t, flags: TFDFlags) -> nix::Result<RawFd> {
unsafe { Errno::result(sys::timerfd_create(clock_id, flags.bits())) }
}
#[inline]
pub fn timerfd_settime(
fd: RawFd,
flags: TFDTimerFlags,
utmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
let res = unsafe {
sys::timerfd_settime(
fd,
flags.bits(),
utmr as *const ITimerSpec,
otmr.map(|x| x as *mut ITimerSpec)
.unwrap_or(ptr::null_mut()),
)
};
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[inline]
pub fn timerfd_gettime(fd: RawFd, otmr: &mut ITimerSpec) -> nix::Result<()> {
let res = unsafe { sys::timerfd_gettime(fd, otmr as *mut ITimerSpec) };
if res == -1 {
return Err(Error::last());
}
Ok(())
}
#[repr(i32)]
#[derive(Debug, Copy, Clone)]
pub enum ClockId {
/// A settable system-wide clock
Realtime = CLOCK_REALTIME,
/// A nonsettable clock which is not affected discontinuous changes in the system clock
Monotonic = CLOCK_MONOTONIC,
}
/// A helper struct for creating, reading, and closing a `timerfd` instance.
///
/// ## Example
///
/// ```
/// use timerfd::{TimerFd, ClockId, ITimerSpec};
///
/// let mut timerfd = TimerFd::new(ClockId::Monotonic).unwrap();
///
/// // Set timer
/// timerfd.set_time(&ITimerSpec::seconds(3), None).unwrap();
///
/// match timerfd.read_time() {
/// // Timer is expired
/// Ok(Some(expirations)) => {},
/// // There is no expired timer. (Only happend when TFD_NONBLOCK set)
/// Ok(None) => {},
/// Err(err) => {}, // Some error happend
/// }
/// ```
#[derive(Debug)]
pub struct TimerFd(RawFd);
impl TimerFd {
/// Create a new TimerFd
pub fn new(clock_id: ClockId) -> nix::Result<TimerFd> {
Self::with_flags(clock_id, Default::default())
}
/// Create a new TimerFd with flags
pub fn with_flags(clock_id: ClockId, flags: TFDFlags) -> nix::Result<TimerFd> {
Ok(TimerFd(timerfd_create(clock_id as clockid_t, flags)?))
}
/// Start or stop a timer
pub fn set_time(
&mut self,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
self.set_time_with_flags(Default::default(), itmr, otmr)
}
/// Return current timer
pub fn get_time(&self, otmr: &mut ITimerSpec) -> nix::Result<()> {
timerfd_gettime(self.0, otmr)
}
/// Set a timer with flags
pub fn set_time_with_flags(
&mut self,
flags: TFDTimerFlags,
itmr: &ITimerSpec,
otmr: Option<&mut ITimerSpec>,
) -> nix::Result<()> {
timerfd_settime(self.0, flags, itmr, otmr)
}
pub fn read_time(&mut self) -> nix::Result<Option<u64>> {
let mut buf: [u8; TIMERFD_DATA_SIZE] = unsafe { mem::uninitialized() };
match unistd::read(self.0, &mut buf) {
Ok(TIMERFD_DATA_SIZE) => Ok(Some(unsafe { mem::transmute(buf) })),
Ok(_) => unreachable!("partial read of timerfd"),
Err(Error::Sys(Errno::EAGAIN)) => Ok(None),
Err(err) => Err(err),
}
}
}
impl Iterator for TimerFd {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
self.read_time().unwrap_or(None)
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl Drop for TimerFd {
fn drop(&mut self) {
let _ = unistd::close(self.0);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{time, thread};
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
fn test_read_timerfd() {
let mut timer =
TimerFd::with_flags(ClockId::Monotonic, TFD_NONBLOCK).expect("Fail to create timerfd");
assert_eq!(timer.read_time(), Ok(None));
timer
.set_time(
&ITimerSpec {
it_value: TimeSpec::seconds(3),
it_interval: TimeSpec::seconds(0),
},
None,
)
.expect("Fail to set time");
assert_eq!(timer.read_time(), Ok(None));
thread::sleep(time::Duration::from_secs(3)); | assert!(timer.read_time().unwrap().is_some());
}
} | random_line_split |
|
opening.rs | use std::io::prelude::*;
use std::path::Path;
use std::fs::File;
use std::io::Result;
use bitboard::Board;
use bitboard::Move;
use bitboard::Turn;
///
///Opening book file format:
///(16 bytes: hash table array size (n))
///n * (20 bytes: (1 byte used)(16 bytes: board position, normalized)(1 byte: white best move)(1 byte: black best move))
///
struct OpeningNode {
hash_val_1 : u32,
hash_val_2 : u32,
black_minimax : i16,
white_minimax : i16,
best_alt_move : u16,
alt_score : i16,
flags : u16,
}
pub struct | {
file : File,
}
impl Opening {
pub fn new(filename : String) -> Result<Opening> {
let open_file = File::open(&filename)?;
Ok(Opening {
file : open_file,
})
}
pub fn get_move(&mut self, bb : Board, t : Turn) -> Result<Move> {
Ok(Move::null())
}
}
| Opening | identifier_name |
opening.rs | use std::io::prelude::*;
use std::path::Path;
use std::fs::File;
use std::io::Result;
use bitboard::Board;
use bitboard::Move;
use bitboard::Turn;
/// | ///
struct OpeningNode {
hash_val_1 : u32,
hash_val_2 : u32,
black_minimax : i16,
white_minimax : i16,
best_alt_move : u16,
alt_score : i16,
flags : u16,
}
pub struct Opening {
file : File,
}
impl Opening {
pub fn new(filename : String) -> Result<Opening> {
let open_file = File::open(&filename)?;
Ok(Opening {
file : open_file,
})
}
pub fn get_move(&mut self, bb : Board, t : Turn) -> Result<Move> {
Ok(Move::null())
}
} | ///Opening book file format:
///(16 bytes: hash table array size (n))
///n * (20 bytes: (1 byte used)(16 bytes: board position, normalized)(1 byte: white best move)(1 byte: black best move)) | random_line_split |
unix-sockets.js | 'use strict';
const assert = require('assert');
let http = require('http');
let https = require('https');
const os = require('os');
const fs = require('fs');
const express = require('../support/express');
const request = require('../support/client');
const app = express();
const key = fs.readFileSync(`${__dirname}/fixtures/key.pem`);
const cert = fs.readFileSync(`${__dirname}/fixtures/cert.pem`);
const cacert = fs.readFileSync(`${__dirname}/fixtures/ca.cert.pem`);
const httpSockPath = [os.tmpdir(), 'superagent-http.sock'].join('/');
const httpsSockPath = [os.tmpdir(), 'superagent-https.sock'].join('/');
let httpServer;
let httpsServer;
if (process.env.HTTP2_TEST) {
http = https = require('http2');
}
app.get('/', (req, res) => {
res.send('root ok!');
});
app.get('/request/path', (req, res) => {
res.send('request path ok!');
});
describe('[unix-sockets] http', () => {
if (process.platform === 'win32') {
return;
}
before(done => { | if (fs.existsSync(httpSockPath) === true) {
// try unlink if sock file exists
fs.unlinkSync(httpSockPath);
}
httpServer = http.createServer(app);
httpServer.listen(httpSockPath, done);
});
const base = `http+unix://${httpSockPath.replace(/\//g, '%2F')}`;
describe('request', () => {
it('path: / (root)', done => {
request.get(`${base}/`).end((err, res) => {
assert(res.ok);
assert.strictEqual('root ok!', res.text);
done();
});
});
it('path: /request/path', done => {
request.get(`${base}/request/path`).end((err, res) => {
assert(res.ok);
assert.strictEqual('request path ok!', res.text);
done();
});
});
});
after(() => {
if (typeof httpServer.close === 'function') {
httpServer.close();
} else httpServer.destroy();
});
});
describe('[unix-sockets] https', () => {
if (process.platform === 'win32') {
return;
}
before(done => {
if (fs.existsSync(httpsSockPath) === true) {
// try unlink if sock file exists
fs.unlinkSync(httpsSockPath);
}
if (process.env.HTTP2_TEST) {
httpsServer = https.createSecureServer({ key, cert }, app);
} else {
httpsServer = https.createServer({ key, cert }, app);
}
httpsServer.listen(httpsSockPath, done);
});
const base = `https+unix://${httpsSockPath.replace(/\//g, '%2F')}`;
describe('request', () => {
it('path: / (root)', done => {
request
.get(`${base}/`)
.ca(cacert)
.end((err, res) => {
assert.ifError(err);
assert(res.ok);
assert.strictEqual('root ok!', res.text);
done();
});
});
it('path: /request/path', done => {
request
.get(`${base}/request/path`)
.ca(cacert)
.end((err, res) => {
assert.ifError(err);
assert(res.ok);
assert.strictEqual('request path ok!', res.text);
done();
});
});
});
after(done => {
httpsServer.close(done);
});
}); | random_line_split |
|
unix-sockets.js | 'use strict';
const assert = require('assert');
let http = require('http');
let https = require('https');
const os = require('os');
const fs = require('fs');
const express = require('../support/express');
const request = require('../support/client');
const app = express();
const key = fs.readFileSync(`${__dirname}/fixtures/key.pem`);
const cert = fs.readFileSync(`${__dirname}/fixtures/cert.pem`);
const cacert = fs.readFileSync(`${__dirname}/fixtures/ca.cert.pem`);
const httpSockPath = [os.tmpdir(), 'superagent-http.sock'].join('/');
const httpsSockPath = [os.tmpdir(), 'superagent-https.sock'].join('/');
let httpServer;
let httpsServer;
if (process.env.HTTP2_TEST) {
http = https = require('http2');
}
app.get('/', (req, res) => {
res.send('root ok!');
});
app.get('/request/path', (req, res) => {
res.send('request path ok!');
});
describe('[unix-sockets] http', () => {
if (process.platform === 'win32') {
return;
}
before(done => {
if (fs.existsSync(httpSockPath) === true) {
// try unlink if sock file exists
fs.unlinkSync(httpSockPath);
}
httpServer = http.createServer(app);
httpServer.listen(httpSockPath, done);
});
const base = `http+unix://${httpSockPath.replace(/\//g, '%2F')}`;
describe('request', () => {
it('path: / (root)', done => {
request.get(`${base}/`).end((err, res) => {
assert(res.ok);
assert.strictEqual('root ok!', res.text);
done();
});
});
it('path: /request/path', done => {
request.get(`${base}/request/path`).end((err, res) => {
assert(res.ok);
assert.strictEqual('request path ok!', res.text);
done();
});
});
});
after(() => {
if (typeof httpServer.close === 'function') {
httpServer.close();
} else httpServer.destroy();
});
});
describe('[unix-sockets] https', () => {
if (process.platform === 'win32') {
return;
}
before(done => {
if (fs.existsSync(httpsSockPath) === true) {
// try unlink if sock file exists
fs.unlinkSync(httpsSockPath);
}
if (process.env.HTTP2_TEST) | else {
httpsServer = https.createServer({ key, cert }, app);
}
httpsServer.listen(httpsSockPath, done);
});
const base = `https+unix://${httpsSockPath.replace(/\//g, '%2F')}`;
describe('request', () => {
it('path: / (root)', done => {
request
.get(`${base}/`)
.ca(cacert)
.end((err, res) => {
assert.ifError(err);
assert(res.ok);
assert.strictEqual('root ok!', res.text);
done();
});
});
it('path: /request/path', done => {
request
.get(`${base}/request/path`)
.ca(cacert)
.end((err, res) => {
assert.ifError(err);
assert(res.ok);
assert.strictEqual('request path ok!', res.text);
done();
});
});
});
after(done => {
httpsServer.close(done);
});
});
| {
httpsServer = https.createSecureServer({ key, cert }, app);
} | conditional_block |
block_device_mapping_v1.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from webob import exc
from nova.api.openstack import extensions
from nova import block_device
from nova import exception
from nova.i18n import _
from nova.openstack.common import strutils
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping"
ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
| if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True
| """Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
| identifier_body |
block_device_mapping_v1.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from webob import exc
from nova.api.openstack import extensions
from nova import block_device
from nova import exception
from nova.i18n import _
from nova.openstack.common import strutils
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping"
ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class | (extensions.V3APIExtensionBase):
"""Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True
| BlockDeviceMappingV1 | identifier_name |
block_device_mapping_v1.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from webob import exc
from nova.api.openstack import extensions
from nova import block_device
from nova import exception
from nova.i18n import _
from nova.openstack.common import strutils
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping"
ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
"""Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
| create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True | conditional_block |
|
block_device_mapping_v1.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from webob import exc
from nova.api.openstack import extensions
from nova import block_device
from nova import exception | ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
"""Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True | from nova.i18n import _
from nova.openstack.common import strutils
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping" | random_line_split |
carmen.py | #!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class City(object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
|
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
print "%d. %s" % (i+1, x.name)
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn()
| def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ") | identifier_body |
carmen.py | #!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License | # along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class City(object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ")
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
print "%d. %s" % (i+1, x.name)
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn() | random_line_split |
|
carmen.py | #!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class | (object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ")
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
print "%d. %s" % (i+1, x.name)
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn()
| City | identifier_name |
carmen.py | #!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class City(object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ")
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
|
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn()
| print "%d. %s" % (i+1, x.name) | conditional_block |
use_sax.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
class DefaultSaxHandler(object):
def start_element(self, name, attrs):
print('sax:start_element: %s, attrs: %s' % (name, str(attrs)))
def | (self, name):
print('sax:end_element: %s' % name)
def char_data(self, text):
print('sax:char_data: %s' % text)
xml = r'''<?xml version="1.0"?>
<ol>
<li><a href="/python">Python</a></li>
<li><a href="/ruby">Ruby</a></li>
</ol>
'''
handler = DefaultSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
| end_element | identifier_name |
use_sax.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
class DefaultSaxHandler(object):
def start_element(self, name, attrs):
|
def end_element(self, name):
print('sax:end_element: %s' % name)
def char_data(self, text):
print('sax:char_data: %s' % text)
xml = r'''<?xml version="1.0"?>
<ol>
<li><a href="/python">Python</a></li>
<li><a href="/ruby">Ruby</a></li>
</ol>
'''
handler = DefaultSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
| print('sax:start_element: %s, attrs: %s' % (name, str(attrs))) | identifier_body |
use_sax.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
class DefaultSaxHandler(object):
def start_element(self, name, attrs):
print('sax:start_element: %s, attrs: %s' % (name, str(attrs)))
def end_element(self, name):
print('sax:end_element: %s' % name)
def char_data(self, text): | xml = r'''<?xml version="1.0"?>
<ol>
<li><a href="/python">Python</a></li>
<li><a href="/ruby">Ruby</a></li>
</ol>
'''
handler = DefaultSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml) | print('sax:char_data: %s' % text)
| random_line_split |
screenbuffer-rect-test.js | /*
Terminal Kit
Copyright (c) 2009 - 2021 Cédric Ronvel
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
"use strict" ;
var termkit = require( '../lib/termkit.js' ) ;
var term = termkit.terminal ;
var ScreenBuffer = termkit.ScreenBuffer ;
var Rect = termkit.Rect ;
describe( "ScreenBuffer.Rect" , function() {
it( "Rect.create( Terminal )" , function() {
expect( Rect.create( term ) ).to.be.like( {
xmin: 1 ,
ymin: 1 ,
xmax: term.width ,
ymax: term.height ,
width: term.width ,
height: term.height ,
isNull: false
} ) ;
} ) ;
it( "Rect.create( xmin , ymin , xmax , ymax )" , function() {
expect( Rect.create( 1 , 2 , 3 , 4 ) ).to.be.like( {
xmin: 1 ,
ymin: 2 ,
xmax: 3 ,
ymax: 4 ,
width: 3 ,
height: 3 ,
isNull: false
} ) ;
} ) ;
it( ".clip() should adjust accordingly" , function() {
var srcRect , dstRect ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 , isNull: false } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 , isNull: false } ) ;
srcRect.clip( dstRect , 0 , 0 , true ) ;
expect( dstRect ).to.be.like( { xmin: 10, ymin: 20, xmax: 25, ymax: 40 , width: 16 , height: 21 , isNull: false } ) ;
expect( srcRect ).to.be.like( { xmin: 10, ymin: 20, xmax: 25, ymax: 40 , width: 16 , height: 21 , isNull: false } ) ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 } ) ;
srcRect.clip( dstRect , 5 , 0 , true ) ;
expect( dstRect ).to.be.like( { xmin: 15, ymin: 20, xmax: 25, ymax: 40 , width: 11 , height: 21 , isNull: false } ) ;
expect( srcRect ).to.be.like( { xmin: 10, ymin: 20, xmax: 20, ymax: 40 , width: 11 , height: 21 , isNull: false } ) ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 } ) ;
srcRect.clip( dstRect , -8 , 0 , true ) ;
expect( dstRect ).to.be.like( { xmin: 2, ymin: 20, xmax: 22, ymax: 40 , width: 21 , height: 21 , isNull: false } ) ;
expect( srcRect ).to.be.like( { xmin: 10, ymin: 20, xmax: 30, ymax: 40 , width: 21 , height: 21 , isNull: false } ) ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 } ) ;
srcRect.clip( dstRect , -31 , 0 , true ) ;
expect( dstRect.isNull ).to.be( true ) ;
expect( srcRect.isNull ).to.be( true ) ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 } ) ;
srcRect.clip( dstRect , -8 , 5 , true ) ;
expect( dstRect ).to.be.like( { xmin: 2, ymin: 20, xmax: 22, ymax: 45 , width: 21 , height: 26 , isNull: false } ) ;
expect( srcRect ).to.be.like( { xmin: 10, ymin: 15, xmax: 30, ymax: 40 , width: 21 , height: 26 , isNull: false } ) ;
dstRect = Rect.create( { xmin: 0 , ymin: 20 , xmax: 25 , ymax: 45 } ) ;
srcRect = Rect.create( { xmin: 10 , ymin: 10 , xmax: 30 , ymax: 40 } ) ; | } ) ; | srcRect.clip( dstRect , 0 , -21 , true ) ;
expect( dstRect.isNull ).to.be( true ) ;
expect( srcRect.isNull ).to.be( true ) ;
} ) ; | random_line_split |
app.js | 'use strict';
angular.module('plankApp', [
'ngCookies',
'ngResource',
'ngSanitize',
'ui.router',
'ui.bootstrap'
])
.config(function ($stateProvider, $urlRouterProvider, $locationProvider, $httpProvider) {
$urlRouterProvider.otherwise('/');
$locationProvider.html5Mode(true); // true mean drop the # on urls
$httpProvider.interceptors.push('authInterceptor');
})
.factory('authInterceptor', function ($rootScope, $q, $cookieStore, $location) {
return {
// Add authorization token to headers
request: function (config) { | config.headers = config.headers || {};
if ($cookieStore.get('token')) {
config.headers.Authorization = 'Bearer ' + $cookieStore.get('token');
}
return config;
},
// Intercept 401s and redirect you to login
responseError: function(response) {
if(response.status === 401) {
$location.path('/login');
// remove any stale tokens
$cookieStore.remove('token');
return $q.reject(response);
}
else {
return $q.reject(response);
}
}
};
})
.run(function ($rootScope, $location, Auth, $state) {
// Redirect to login if route requires auth and you're not logged in
$rootScope.$on('$stateChangeStart', function (event, next) {
Auth.isLoggedInAsync(function(loggedIn) {
if (next.authenticate && !loggedIn) {
$location.path('/login');
}
});
});
$rootScope.isLoggedIn = Auth.isLoggedIn;
$rootScope.isAdmin = Auth.isAdmin;
$rootScope.getCurrentUser = Auth.getCurrentUser;
$rootScope.logout = function() {
Auth.logout();
$location.path('/login');
};
$rootScope.state = function() {
return $state.$current.name;
};
}); | random_line_split |
|
app.js | 'use strict';
angular.module('plankApp', [
'ngCookies',
'ngResource',
'ngSanitize',
'ui.router',
'ui.bootstrap'
])
.config(function ($stateProvider, $urlRouterProvider, $locationProvider, $httpProvider) {
$urlRouterProvider.otherwise('/');
$locationProvider.html5Mode(true); // true mean drop the # on urls
$httpProvider.interceptors.push('authInterceptor');
})
.factory('authInterceptor', function ($rootScope, $q, $cookieStore, $location) {
return {
// Add authorization token to headers
request: function (config) {
config.headers = config.headers || {};
if ($cookieStore.get('token')) {
config.headers.Authorization = 'Bearer ' + $cookieStore.get('token');
}
return config;
},
// Intercept 401s and redirect you to login
responseError: function(response) {
if(response.status === 401) {
$location.path('/login');
// remove any stale tokens
$cookieStore.remove('token');
return $q.reject(response);
}
else |
}
};
})
.run(function ($rootScope, $location, Auth, $state) {
// Redirect to login if route requires auth and you're not logged in
$rootScope.$on('$stateChangeStart', function (event, next) {
Auth.isLoggedInAsync(function(loggedIn) {
if (next.authenticate && !loggedIn) {
$location.path('/login');
}
});
});
$rootScope.isLoggedIn = Auth.isLoggedIn;
$rootScope.isAdmin = Auth.isAdmin;
$rootScope.getCurrentUser = Auth.getCurrentUser;
$rootScope.logout = function() {
Auth.logout();
$location.path('/login');
};
$rootScope.state = function() {
return $state.$current.name;
};
}); | {
return $q.reject(response);
} | conditional_block |
dir_68267d1309a1af8e8297ef4c3efbcdba.js | ] ],
[ "BeagleGooP.cpp", "_beagle_goo_p_8cpp.html", "_beagle_goo_p_8cpp" ],
[ "GPIOoo.cpp", "_g_p_i_ooo_8cpp.html", null ],
[ "gpiotest.c", "gpiotest_8c.html", "gpiotest_8c" ],
[ "HD44780.cpp", "_h_d44780_8cpp.html", null ],
[ "HD44780gpioPhy.cpp", "_h_d44780gpio_phy_8cpp.html", "_h_d44780gpio_phy_8cpp" ],
[ "SPI.cpp", "_s_p_i_8cpp.html", "_s_p_i_8cpp" ],
[ "TLC5946chain.cpp", "_t_l_c5946chain_8cpp.html", null ],
[ "TLC5946phy.cpp", "_t_l_c5946phy_8cpp.html", null ],
[ "TLC5946PRUSSphy.cpp", "_t_l_c5946_p_r_u_s_sphy_8cpp.html", null ]
]; | var dir_68267d1309a1af8e8297ef4c3efbcdba =
[
[ "BeagleGoo.cpp", "_beagle_goo_8cpp.html", [
[ "BeagleGoo", "struct_beagle_goo.html", "struct_beagle_goo" ] | random_line_split |
|
timestamp.ts | import { OperatorFunction, Timestamp as TimestampInterface, TimestampProvider, Timestamp } from '../types';
import { map } from './map';
/**
* Attaches a timestamp to each item emitted by an observable indicating when it was emitted
*
* The `timestamp` operator maps the *source* observable stream to an object of type
* `{value: T, timestamp: R}`. The properties are generically typed. The `value` property contains the value
* and type of the *source* observable. The `timestamp` is generated by the schedulers `now` function. By
* default it uses the *async* scheduler which simply returns `Date.now()` (milliseconds since 1970/01/01
* 00:00:00:000) and therefore is of type `number`.
*
* 
*
* ## Example
*
* In this example there is a timestamp attached to the documents click event.
*
* ```ts
* import { fromEvent } from 'rxjs';
* import { timestamp } from 'rxjs/operators';
*
* const clickWithTimestamp = fromEvent(document, 'click').pipe(
* timestamp()
* );
*
* // Emits data of type {value: MouseEvent, timestamp: number}
* clickWithTimestamp.subscribe(data => {
* console.log(data);
* });
* ```
*
* @param timestampProvider An object with a `now()` method used to get the current timestamp.
*/
export function timestamp<T>(timestampProvider: TimestampProvider = Date): OperatorFunction<T, Timestamp<T>> | {
return map((value: T) => ({ value, timestamp: timestampProvider.now()}));
} | identifier_body |
|
timestamp.ts | import { OperatorFunction, Timestamp as TimestampInterface, TimestampProvider, Timestamp } from '../types';
import { map } from './map';
/**
* Attaches a timestamp to each item emitted by an observable indicating when it was emitted
*
* The `timestamp` operator maps the *source* observable stream to an object of type
* `{value: T, timestamp: R}`. The properties are generically typed. The `value` property contains the value
* and type of the *source* observable. The `timestamp` is generated by the schedulers `now` function. By
* default it uses the *async* scheduler which simply returns `Date.now()` (milliseconds since 1970/01/01
* 00:00:00:000) and therefore is of type `number`.
*
* 
*
* ## Example
*
* In this example there is a timestamp attached to the documents click event.
*
* ```ts
* import { fromEvent } from 'rxjs';
* import { timestamp } from 'rxjs/operators';
*
* const clickWithTimestamp = fromEvent(document, 'click').pipe(
* timestamp()
* );
*
* // Emits data of type {value: MouseEvent, timestamp: number}
* clickWithTimestamp.subscribe(data => {
* console.log(data);
* });
* ```
*
* @param timestampProvider An object with a `now()` method used to get the current timestamp.
*/
export function | <T>(timestampProvider: TimestampProvider = Date): OperatorFunction<T, Timestamp<T>> {
return map((value: T) => ({ value, timestamp: timestampProvider.now()}));
} | timestamp | identifier_name |
timestamp.ts | import { OperatorFunction, Timestamp as TimestampInterface, TimestampProvider, Timestamp } from '../types';
import { map } from './map';
/**
* Attaches a timestamp to each item emitted by an observable indicating when it was emitted
*
* The `timestamp` operator maps the *source* observable stream to an object of type
* `{value: T, timestamp: R}`. The properties are generically typed. The `value` property contains the value
* and type of the *source* observable. The `timestamp` is generated by the schedulers `now` function. By
* default it uses the *async* scheduler which simply returns `Date.now()` (milliseconds since 1970/01/01
* 00:00:00:000) and therefore is of type `number`.
*
* 
*
* ## Example
*
* In this example there is a timestamp attached to the documents click event.
*
* ```ts
* import { fromEvent } from 'rxjs';
* import { timestamp } from 'rxjs/operators';
*
* const clickWithTimestamp = fromEvent(document, 'click').pipe(
* timestamp()
* );
*
* // Emits data of type {value: MouseEvent, timestamp: number}
* clickWithTimestamp.subscribe(data => {
* console.log(data);
* });
* ```
*
* @param timestampProvider An object with a `now()` method used to get the current timestamp.
*/
export function timestamp<T>(timestampProvider: TimestampProvider = Date): OperatorFunction<T, Timestamp<T>> { | } | return map((value: T) => ({ value, timestamp: timestampProvider.now()})); | random_line_split |
regions-free-region-ordering-callee.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that callees correctly infer an ordering between free regions
// that appear in their parameter list. See also
// regions-free-region-ordering-caller.rs
fn ordering1<'a, 'b>(x: &'a &'b uint) -> &'a uint {
// It is safe to assume that 'a <= 'b due to the type of x
let y: &'b uint = &**x;
return y;
}
fn ordering2<'a, 'b>(x: &'a &'b uint, y: &'a uint) -> &'b uint {
// However, it is not safe to assume that 'b <= 'a
&*y //~ ERROR cannot infer
}
fn ordering3<'a, 'b>(x: &'a uint, y: &'b uint) -> &'a &'b uint {
// Do not infer an ordering from the return value.
let z: &'b uint = &*x;
//~^ ERROR cannot infer
fail!();
} |
fn ordering4<'a, 'b>(a: &'a uint, b: &'b uint, x: |&'a &'b uint|) {
// Do not infer ordering from closure argument types.
let z: Option<&'a &'b uint> = None;
//~^ ERROR reference has a longer lifetime than the data it references
}
fn ordering5<'a, 'b>(a: &'a uint, b: &'b uint, x: Option<&'a &'b uint>) {
let z: Option<&'a &'b uint> = None;
}
fn main() {} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.