file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
packed-struct-vec.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-android: FIXME(#9116) Bus error
use std::sys;
#[packed]
#[deriving(Eq)]
struct Foo {
bar: u8,
baz: u64
}
fn main() | {
let foos = [Foo { bar: 1, baz: 2 }, .. 10];
assert_eq!(sys::size_of::<[Foo, .. 10]>(), 90);
for i in range(0u, 10) {
assert_eq!(foos[i], Foo { bar: 1, baz: 2});
}
for &foo in foos.iter() {
assert_eq!(foo, Foo { bar: 1, baz: 2 });
}
} | identifier_body |
|
GroupStore.js | /*
* Copyright (C) 2015 Actor LLC. <https://actor.im>
*/
import { EventEmitter } from 'events';
import ActorClient from 'utils/ActorClient';
import DialogStore from 'stores/DialogStore'
import { register, waitFor } from 'dispatcher/ActorAppDispatcher';
import { ActionTypes, AsyncActionStates } from 'constants/ActorAppConstants';
const CHANGE_EVENT = 'change';
let _integrationToken = null;
class GroupStore extends EventEmitter {
getGroup(gid) {
return ActorClient.getGroup(gid);
}
getIntegrationToken() {
return _integrationToken;
}
emitChange() {
this.emit(CHANGE_EVENT);
}
addChangeListener(callback) {
this.on(CHANGE_EVENT, callback);
}
| (callback) {
this.removeListener(CHANGE_EVENT, callback);
}
}
let GroupStoreInstance = new GroupStore();
GroupStoreInstance.dispatchToken = register(action => {
switch (action.type) {
case ActionTypes.LEFT_GROUP:
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN:
waitFor([DialogStore.dispatchToken]);
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_SUCCESS:
_integrationToken = action.response;
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_ERROR:
_integrationToken = null;
GroupStoreInstance.emitChange();
break;
}
});
export default GroupStoreInstance;
| removeChangeListener | identifier_name |
GroupStore.js | /*
* Copyright (C) 2015 Actor LLC. <https://actor.im>
*/
import { EventEmitter } from 'events';
import ActorClient from 'utils/ActorClient';
import DialogStore from 'stores/DialogStore'
import { register, waitFor } from 'dispatcher/ActorAppDispatcher';
import { ActionTypes, AsyncActionStates } from 'constants/ActorAppConstants';
const CHANGE_EVENT = 'change';
let _integrationToken = null;
class GroupStore extends EventEmitter { | return ActorClient.getGroup(gid);
}
getIntegrationToken() {
return _integrationToken;
}
emitChange() {
this.emit(CHANGE_EVENT);
}
addChangeListener(callback) {
this.on(CHANGE_EVENT, callback);
}
removeChangeListener(callback) {
this.removeListener(CHANGE_EVENT, callback);
}
}
let GroupStoreInstance = new GroupStore();
GroupStoreInstance.dispatchToken = register(action => {
switch (action.type) {
case ActionTypes.LEFT_GROUP:
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN:
waitFor([DialogStore.dispatchToken]);
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_SUCCESS:
_integrationToken = action.response;
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_ERROR:
_integrationToken = null;
GroupStoreInstance.emitChange();
break;
}
});
export default GroupStoreInstance; | getGroup(gid) { | random_line_split |
GroupStore.js | /*
* Copyright (C) 2015 Actor LLC. <https://actor.im>
*/
import { EventEmitter } from 'events';
import ActorClient from 'utils/ActorClient';
import DialogStore from 'stores/DialogStore'
import { register, waitFor } from 'dispatcher/ActorAppDispatcher';
import { ActionTypes, AsyncActionStates } from 'constants/ActorAppConstants';
const CHANGE_EVENT = 'change';
let _integrationToken = null;
class GroupStore extends EventEmitter {
getGroup(gid) {
return ActorClient.getGroup(gid);
}
getIntegrationToken() {
return _integrationToken;
}
emitChange() {
this.emit(CHANGE_EVENT);
}
addChangeListener(callback) |
removeChangeListener(callback) {
this.removeListener(CHANGE_EVENT, callback);
}
}
let GroupStoreInstance = new GroupStore();
GroupStoreInstance.dispatchToken = register(action => {
switch (action.type) {
case ActionTypes.LEFT_GROUP:
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN:
waitFor([DialogStore.dispatchToken]);
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_SUCCESS:
_integrationToken = action.response;
GroupStoreInstance.emitChange();
break;
case ActionTypes.GET_INTEGRATION_TOKEN_ERROR:
_integrationToken = null;
GroupStoreInstance.emitChange();
break;
}
});
export default GroupStoreInstance;
| {
this.on(CHANGE_EVENT, callback);
} | identifier_body |
localeTool.js | var path = require('path');
var fs = require('fs');
var localeDir = path.resolve(__dirname, '../../src/config/i18n');
var distFile = path.resolve(__dirname, '../tmp/i18n.json');
/**
* 查找所有的多语言文件
*/
var findLangs = function(dir) {
if (!fs.existsSync(dir)) {
return null;
}
var dirs = fs.readdirSync(dir);
if (!dirs || dirs.length === 0) {
return null;
}
return dirs.filter(function(fileName) {
return /\.json/.test(fileName);
}).map(function(fileName) {
return path.resolve(localeDir, fileName); | */
var mergeLangs = function(files) {
if (!files || files.length === 0) {
return {cn: {}};
}
return files.reduce(function(ret, item) {
var reg = /(\w+)\.json$/;
var m = item.match(reg);
if (m && m.length === 2) {
var langName = m[1];
var content = fs.readFileSync(item, 'utf-8');
ret[langName] = JSON.parse(content);
}
return ret;
}, {});
};
module.exports = {
make: function() {
var merged = mergeLangs(findLangs(localeDir));
fs.writeFileSync(distFile, JSON.stringify(merged), 'utf-8');
}
}; | });
};
/**
* 合并多语言文件到 locales.json | random_line_split |
localeTool.js | var path = require('path');
var fs = require('fs');
var localeDir = path.resolve(__dirname, '../../src/config/i18n');
var distFile = path.resolve(__dirname, '../tmp/i18n.json');
/**
* 查找所有的多语言文件
*/
var findLangs = function(dir) {
if (!fs.existsSync(dir)) {
return nul | readdirSync(dir);
if (!dirs || dirs.length === 0) {
return null;
}
return dirs.filter(function(fileName) {
return /\.json/.test(fileName);
}).map(function(fileName) {
return path.resolve(localeDir, fileName);
});
};
/**
* 合并多语言文件到 locales.json
*/
var mergeLangs = function(files) {
if (!files || files.length === 0) {
return {cn: {}};
}
return files.reduce(function(ret, item) {
var reg = /(\w+)\.json$/;
var m = item.match(reg);
if (m && m.length === 2) {
var langName = m[1];
var content = fs.readFileSync(item, 'utf-8');
ret[langName] = JSON.parse(content);
}
return ret;
}, {});
};
module.exports = {
make: function() {
var merged = mergeLangs(findLangs(localeDir));
fs.writeFileSync(distFile, JSON.stringify(merged), 'utf-8');
}
};
| l;
}
var dirs = fs. | conditional_block |
index.d.ts | // Type definitions for jsUri 1.3
// Project: https://github.com/derek-watson/jsUri
// Definitions by: Chris Charabaruk <https://github.com/coldacid>, Florian Wagner <https://github.com/flqw>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare namespace jsuri {
type Primitive = string | number | boolean;
export class Uri {
/**
* Creates a new Uri object
* @constructor
* @param {string} str
*/
constructor(str?: string);
/**
* Define getter/setter methods
*/
protocol(val?: string): string;
userInfo(val?: string): string;
host(val?: string): string;
port(val?: number): number;
path(val?: string): string;
anchor(val?: string): string;
/**
* if there is no protocol, the leading // can be enabled or disabled
* @param {Boolean} val
* @return {Boolean}
*/
hasAuthorityPrefix(val?: boolean): boolean;
isColonUri(val?: boolean): boolean;
/**
* Serializes the internal state of the query pairs
* @param {string} [val] set a new query string
* @return {string} query string
*/
query(val?: string): string;
/**
* returns the first query param value found for the key
* @param {string} key query key
* @return {string} first value found for key
*/
getQueryParamValue(key: string): string;
/**
* returns an array of query param values for the key
* @param {string} key query key
* @return {array} array of values
*/
getQueryParamValues(key: string): string[];
/**
* removes query parameters
* @param {string} key remove values for key
* @param {val} [val] remove a specific value, otherwise removes all
* @return {Uri} returns self for fluent chaining
*/
deleteQueryParam(key: string, val?: string): Uri;
/**
* adds a query parameter
* @param {string} key add values for key
* @param {string} val value to add
* @param {integer} [index] specific index to add the value at
* @return {Uri} returns self for fluent chaining
*/
addQueryParam(key: string, val: Primitive, index?: number): Uri;
/**
* test for the existence of a query parameter
* @param {string} key check values for key
* @return {Boolean} true if key exists, otherwise false
*/
hasQueryParam(key: string): boolean;
/** | * @param {string} [oldVal] replace only one specific value (otherwise replaces all)
* @return {Uri} returns self for fluent chaining
*/
replaceQueryParam(key: string, newVal: Primitive, oldVal?: Primitive): Uri;
/**
* Define fluent setter methods (setProtocol, setHasAuthorityPrefix, etc)
*/
setProtocol(val: string): Uri;
setHasAuthorityPrefix(val: boolean): Uri;
setIsColonUri(val: boolean): Uri;
setUserInfo(val: string): Uri;
setHost(val: string): Uri;
setPort(val: number): Uri;
setPath(val: string): Uri;
setQuery(val: string): Uri;
setAnchor(val: string): Uri;
/**
* Scheme name, colon and doubleslash, as required
* @return {string} http:// or possibly just //
*/
scheme(): string;
/**
* Same as Mozilla nsIURI.prePath
* @return {string} scheme://user:password@host:port
* @see https://developer.mozilla.org/en/nsIURI
*/
origin(): string;
/**
* Adds a trailing slash to the path
*/
addTrailingSlash(): Uri;
/**
* Serializes the internal state of the Uri object
* @return {string}
*/
toString(): string;
/**
* Clone a Uri object
* @return {Uri} duplicate copy of the Uri
*/
clone(): Uri;
}
}
declare type Uri = jsuri.Uri;
declare module 'jsuri' {
export = jsuri.Uri;
}
declare module 'jsUri' {
export = jsuri.Uri;
} | * replaces query param values
* @param {string} key key to replace value for
* @param {string} newVal new value | random_line_split |
index.d.ts | // Type definitions for jsUri 1.3
// Project: https://github.com/derek-watson/jsUri
// Definitions by: Chris Charabaruk <https://github.com/coldacid>, Florian Wagner <https://github.com/flqw>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare namespace jsuri {
type Primitive = string | number | boolean;
export class | {
/**
* Creates a new Uri object
* @constructor
* @param {string} str
*/
constructor(str?: string);
/**
* Define getter/setter methods
*/
protocol(val?: string): string;
userInfo(val?: string): string;
host(val?: string): string;
port(val?: number): number;
path(val?: string): string;
anchor(val?: string): string;
/**
* if there is no protocol, the leading // can be enabled or disabled
* @param {Boolean} val
* @return {Boolean}
*/
hasAuthorityPrefix(val?: boolean): boolean;
isColonUri(val?: boolean): boolean;
/**
* Serializes the internal state of the query pairs
* @param {string} [val] set a new query string
* @return {string} query string
*/
query(val?: string): string;
/**
* returns the first query param value found for the key
* @param {string} key query key
* @return {string} first value found for key
*/
getQueryParamValue(key: string): string;
/**
* returns an array of query param values for the key
* @param {string} key query key
* @return {array} array of values
*/
getQueryParamValues(key: string): string[];
/**
* removes query parameters
* @param {string} key remove values for key
* @param {val} [val] remove a specific value, otherwise removes all
* @return {Uri} returns self for fluent chaining
*/
deleteQueryParam(key: string, val?: string): Uri;
/**
* adds a query parameter
* @param {string} key add values for key
* @param {string} val value to add
* @param {integer} [index] specific index to add the value at
* @return {Uri} returns self for fluent chaining
*/
addQueryParam(key: string, val: Primitive, index?: number): Uri;
/**
* test for the existence of a query parameter
* @param {string} key check values for key
* @return {Boolean} true if key exists, otherwise false
*/
hasQueryParam(key: string): boolean;
/**
* replaces query param values
* @param {string} key key to replace value for
* @param {string} newVal new value
* @param {string} [oldVal] replace only one specific value (otherwise replaces all)
* @return {Uri} returns self for fluent chaining
*/
replaceQueryParam(key: string, newVal: Primitive, oldVal?: Primitive): Uri;
/**
* Define fluent setter methods (setProtocol, setHasAuthorityPrefix, etc)
*/
setProtocol(val: string): Uri;
setHasAuthorityPrefix(val: boolean): Uri;
setIsColonUri(val: boolean): Uri;
setUserInfo(val: string): Uri;
setHost(val: string): Uri;
setPort(val: number): Uri;
setPath(val: string): Uri;
setQuery(val: string): Uri;
setAnchor(val: string): Uri;
/**
* Scheme name, colon and doubleslash, as required
* @return {string} http:// or possibly just //
*/
scheme(): string;
/**
* Same as Mozilla nsIURI.prePath
* @return {string} scheme://user:password@host:port
* @see https://developer.mozilla.org/en/nsIURI
*/
origin(): string;
/**
* Adds a trailing slash to the path
*/
addTrailingSlash(): Uri;
/**
* Serializes the internal state of the Uri object
* @return {string}
*/
toString(): string;
/**
* Clone a Uri object
* @return {Uri} duplicate copy of the Uri
*/
clone(): Uri;
}
}
declare type Uri = jsuri.Uri;
declare module 'jsuri' {
export = jsuri.Uri;
}
declare module 'jsUri' {
export = jsuri.Uri;
}
| Uri | identifier_name |
references.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
paths::{self, Path},
shared::*,
};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
fmt,
fmt::Debug,
};
//**************************************************************************************************
// Definitions
//**************************************************************************************************
/// Unique identifier for the reference
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct RefID(pub(crate) usize);
impl RefID {
/// Creates a new reference id from the given number
pub const fn new(x: usize) -> Self {
RefID(x)
}
/// Returns the number representing this reference id.
pub fn number(&self) -> usize {
self.0
}
}
/// An edge in the borrow graph
#[derive(Clone)]
pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> {
/// true if it is an exact (strong) edge,
/// false if it is a prefix (weak) edge
pub(crate) strong: bool,
/// The path (either exact/prefix strong/weak) for the borrow relationship of this edge
pub(crate) path: Path<Lbl>,
/// Location information for the edge
pub(crate) loc: Loc,
}
/// Represents outgoing edges in the borrow graph
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>(
pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>,
);
/// Represents the borrow relationships and information for a node in the borrow graph, i.e
/// for a single reference
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> {
/// Parent to child
/// 'self' is borrowed by _
pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>,
/// Child to parent
/// 'self' borrows from _
/// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by
/// i.e. x is borrowed by y IFF y borrows from x
pub(crate) borrows_from: BTreeSet<RefID>,
/// true if mutable, false otherwise
pub(crate) mutable: bool,
}
//**************************************************************************************************
// Impls
//**************************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> {
pub(crate) fn leq(&self, other: &Self) -> bool {
self == other || (!self.strong && paths::leq(&self.path, &other.path))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
pub(crate) fn new() -> Self {
Self(BTreeMap::new())
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
pub(crate) fn new(mutable: bool) -> Self {
let borrowed_by = BorrowEdges::new();
let borrows_from = BTreeSet::new();
Self {
borrowed_by,
borrows_from,
mutable,
}
}
}
//**********************************************************************************************
// Remap
//**********************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
for (old, new) in id_map {
if let Some(edges) = self.0.remove(old) |
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
self.borrowed_by.remap_refs(id_map);
remap_set(&mut self.borrows_from, id_map)
}
}
//**********************************************************************************************
// Traits
//**********************************************************************************************
/// Dummy struct used to implement traits for BorrowEdge that skips over the loc field
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
struct BorrowEdgeNoLoc<'a, Lbl: Clone> {
strong: bool,
path: &'a Path<Lbl>,
}
impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> {
fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self {
BorrowEdgeNoLoc {
strong: e.strong,
path: &e.path,
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> {
fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool {
BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other)
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {}
impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> {
fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> {
BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> {
fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering {
BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
BorrowEdgeNoLoc::new(self).fmt(f)
}
}
| {
self.0.insert(*new, edges);
} | conditional_block |
references.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
paths::{self, Path},
shared::*,
};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
fmt,
fmt::Debug,
};
//**************************************************************************************************
// Definitions
//**************************************************************************************************
/// Unique identifier for the reference
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct RefID(pub(crate) usize);
impl RefID {
/// Creates a new reference id from the given number
pub const fn new(x: usize) -> Self {
RefID(x)
}
/// Returns the number representing this reference id.
pub fn number(&self) -> usize {
self.0
}
}
/// An edge in the borrow graph
#[derive(Clone)]
pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> {
/// true if it is an exact (strong) edge,
/// false if it is a prefix (weak) edge
pub(crate) strong: bool,
/// The path (either exact/prefix strong/weak) for the borrow relationship of this edge
pub(crate) path: Path<Lbl>,
/// Location information for the edge
pub(crate) loc: Loc,
}
/// Represents outgoing edges in the borrow graph
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>(
pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>,
);
/// Represents the borrow relationships and information for a node in the borrow graph, i.e
/// for a single reference
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> {
/// Parent to child
/// 'self' is borrowed by _
pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>,
/// Child to parent
/// 'self' borrows from _
/// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by
/// i.e. x is borrowed by y IFF y borrows from x
pub(crate) borrows_from: BTreeSet<RefID>,
/// true if mutable, false otherwise
pub(crate) mutable: bool,
}
//**************************************************************************************************
// Impls
//**************************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> {
pub(crate) fn leq(&self, other: &Self) -> bool {
self == other || (!self.strong && paths::leq(&self.path, &other.path))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
pub(crate) fn new() -> Self {
Self(BTreeMap::new())
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
pub(crate) fn new(mutable: bool) -> Self {
let borrowed_by = BorrowEdges::new();
let borrows_from = BTreeSet::new();
Self {
borrowed_by,
borrows_from,
mutable,
}
}
}
//**********************************************************************************************
// Remap
//**********************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { | for (old, new) in id_map {
if let Some(edges) = self.0.remove(old) {
self.0.insert(*new, edges);
}
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
self.borrowed_by.remap_refs(id_map);
remap_set(&mut self.borrows_from, id_map)
}
}
//**********************************************************************************************
// Traits
//**********************************************************************************************
/// Dummy struct used to implement traits for BorrowEdge that skips over the loc field
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
struct BorrowEdgeNoLoc<'a, Lbl: Clone> {
strong: bool,
path: &'a Path<Lbl>,
}
impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> {
fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self {
BorrowEdgeNoLoc {
strong: e.strong,
path: &e.path,
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> {
fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool {
BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other)
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {}
impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> {
fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> {
BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> {
fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering {
BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
BorrowEdgeNoLoc::new(self).fmt(f)
}
} | /// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { | random_line_split |
references.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
paths::{self, Path},
shared::*,
};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
fmt,
fmt::Debug,
};
//**************************************************************************************************
// Definitions
//**************************************************************************************************
/// Unique identifier for the reference
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct RefID(pub(crate) usize);
impl RefID {
/// Creates a new reference id from the given number
pub const fn new(x: usize) -> Self {
RefID(x)
}
/// Returns the number representing this reference id.
pub fn number(&self) -> usize {
self.0
}
}
/// An edge in the borrow graph
#[derive(Clone)]
pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> {
/// true if it is an exact (strong) edge,
/// false if it is a prefix (weak) edge
pub(crate) strong: bool,
/// The path (either exact/prefix strong/weak) for the borrow relationship of this edge
pub(crate) path: Path<Lbl>,
/// Location information for the edge
pub(crate) loc: Loc,
}
/// Represents outgoing edges in the borrow graph
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>(
pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>,
);
/// Represents the borrow relationships and information for a node in the borrow graph, i.e
/// for a single reference
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> {
/// Parent to child
/// 'self' is borrowed by _
pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>,
/// Child to parent
/// 'self' borrows from _
/// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by
/// i.e. x is borrowed by y IFF y borrows from x
pub(crate) borrows_from: BTreeSet<RefID>,
/// true if mutable, false otherwise
pub(crate) mutable: bool,
}
//**************************************************************************************************
// Impls
//**************************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> {
pub(crate) fn leq(&self, other: &Self) -> bool {
self == other || (!self.strong && paths::leq(&self.path, &other.path))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
pub(crate) fn new() -> Self {
Self(BTreeMap::new())
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
pub(crate) fn new(mutable: bool) -> Self {
let borrowed_by = BorrowEdges::new();
let borrows_from = BTreeSet::new();
Self {
borrowed_by,
borrows_from,
mutable,
}
}
}
//**********************************************************************************************
// Remap
//**********************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
for (old, new) in id_map {
if let Some(edges) = self.0.remove(old) {
self.0.insert(*new, edges);
}
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
self.borrowed_by.remap_refs(id_map);
remap_set(&mut self.borrows_from, id_map)
}
}
//**********************************************************************************************
// Traits
//**********************************************************************************************
/// Dummy struct used to implement traits for BorrowEdge that skips over the loc field
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
struct BorrowEdgeNoLoc<'a, Lbl: Clone> {
strong: bool,
path: &'a Path<Lbl>,
}
impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> {
fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self |
}
impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> {
fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool {
BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other)
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {}
impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> {
fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> {
BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> {
fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering {
BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
BorrowEdgeNoLoc::new(self).fmt(f)
}
}
| {
BorrowEdgeNoLoc {
strong: e.strong,
path: &e.path,
}
} | identifier_body |
references.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
paths::{self, Path},
shared::*,
};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
fmt,
fmt::Debug,
};
//**************************************************************************************************
// Definitions
//**************************************************************************************************
/// Unique identifier for the reference
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct RefID(pub(crate) usize);
impl RefID {
/// Creates a new reference id from the given number
pub const fn new(x: usize) -> Self {
RefID(x)
}
/// Returns the number representing this reference id.
pub fn number(&self) -> usize {
self.0
}
}
/// An edge in the borrow graph
#[derive(Clone)]
pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> {
/// true if it is an exact (strong) edge,
/// false if it is a prefix (weak) edge
pub(crate) strong: bool,
/// The path (either exact/prefix strong/weak) for the borrow relationship of this edge
pub(crate) path: Path<Lbl>,
/// Location information for the edge
pub(crate) loc: Loc,
}
/// Represents outgoing edges in the borrow graph
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>(
pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>,
);
/// Represents the borrow relationships and information for a node in the borrow graph, i.e
/// for a single reference
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct | <Loc: Copy, Lbl: Clone + Ord> {
/// Parent to child
/// 'self' is borrowed by _
pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>,
/// Child to parent
/// 'self' borrows from _
/// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by
/// i.e. x is borrowed by y IFF y borrows from x
pub(crate) borrows_from: BTreeSet<RefID>,
/// true if mutable, false otherwise
pub(crate) mutable: bool,
}
//**************************************************************************************************
// Impls
//**************************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> {
pub(crate) fn leq(&self, other: &Self) -> bool {
self == other || (!self.strong && paths::leq(&self.path, &other.path))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
pub(crate) fn new() -> Self {
Self(BTreeMap::new())
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
pub(crate) fn new(mutable: bool) -> Self {
let borrowed_by = BorrowEdges::new();
let borrows_from = BTreeSet::new();
Self {
borrowed_by,
borrows_from,
mutable,
}
}
}
//**********************************************************************************************
// Remap
//**********************************************************************************************
impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
for (old, new) in id_map {
if let Some(edges) = self.0.remove(old) {
self.0.insert(*new, edges);
}
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> {
/// Utility for remapping the reference ids according the `id_map` provided
/// If it is not in the map, the id remains the same
pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
self.borrowed_by.remap_refs(id_map);
remap_set(&mut self.borrows_from, id_map)
}
}
//**********************************************************************************************
// Traits
//**********************************************************************************************
/// Dummy struct used to implement traits for BorrowEdge that skips over the loc field
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
struct BorrowEdgeNoLoc<'a, Lbl: Clone> {
strong: bool,
path: &'a Path<Lbl>,
}
impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> {
fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self {
BorrowEdgeNoLoc {
strong: e.strong,
path: &e.path,
}
}
}
impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> {
fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool {
BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other)
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {}
impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> {
fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> {
BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> {
fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering {
BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other))
}
}
impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
BorrowEdgeNoLoc::new(self).fmt(f)
}
}
| Ref | identifier_name |
artifact.rs | use byteorder::*;
use core::io::{BinaryComponent, DecodeError, WrResult};
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct ArtifactData {
spec: u16, // Artifact code. Big-endian 0xXXXY, where X is the namespace and Y is the subtype.
body: Vec<u8> // Actual artifact format is specified in a higher layer.
}
impl ArtifactData {
pub fn new(spec: u16, body: Vec<u8>) -> ArtifactData {
ArtifactData {
spec: spec,
body: body
}
}
}
impl BinaryComponent for ArtifactData {
fn from_reader<R: ReadBytesExt>(read: &mut R) -> Result<Self, DecodeError> {
let sp = read.read_u16::<BigEndian>()?;
let mut b = vec![0; read.read_u64::<BigEndian>()? as usize];
read.read(b.as_mut_slice())?; | Ok(ArtifactData {
spec: sp,
body: b
})
}
fn to_writer<W: WriteBytesExt>(&self, write: &mut W) -> WrResult {
write.write_u16::<BigEndian>(self.spec)?;
write.write_u64::<BigEndian>(self.body.len() as u64)?;
write.write_all(self.body.as_slice())?;
Ok(())
}
} | random_line_split |
|
artifact.rs | use byteorder::*;
use core::io::{BinaryComponent, DecodeError, WrResult};
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct ArtifactData {
spec: u16, // Artifact code. Big-endian 0xXXXY, where X is the namespace and Y is the subtype.
body: Vec<u8> // Actual artifact format is specified in a higher layer.
}
impl ArtifactData {
pub fn new(spec: u16, body: Vec<u8>) -> ArtifactData {
ArtifactData {
spec: spec,
body: body
}
}
}
impl BinaryComponent for ArtifactData {
fn | <R: ReadBytesExt>(read: &mut R) -> Result<Self, DecodeError> {
let sp = read.read_u16::<BigEndian>()?;
let mut b = vec![0; read.read_u64::<BigEndian>()? as usize];
read.read(b.as_mut_slice())?;
Ok(ArtifactData {
spec: sp,
body: b
})
}
fn to_writer<W: WriteBytesExt>(&self, write: &mut W) -> WrResult {
write.write_u16::<BigEndian>(self.spec)?;
write.write_u64::<BigEndian>(self.body.len() as u64)?;
write.write_all(self.body.as_slice())?;
Ok(())
}
}
| from_reader | identifier_name |
global-data.js | * Created by Rodrigo on 25/01/2017.
*/
const data = {};
const subscribers = {};
const EVENTS = {
SET: 'set',
SUBSCRIBE : 'subscribe',
UNSUBSCRIBE: 'unsubscribe'
};
/**
* Notify to all the subscriber the new value.
* A subscriber must implement a _onEvent method.
* @param event
* @param detail
* @param path
* @private
*/
const _notify = (event, detail, path = '') => {
// This method will notify to elements subscribed to the path,
// as well as the element who are subscribed to every single action, as the global-data element.
const pathSubscribers = subscribers[path] || [];
const wildCardSubscribers = subscribers['*'] || [];
[...pathSubscribers, ...wildCardSubscribers].forEach(subscriber => {
subscriber._onEvent(event, detail);
});
};
const _addSubscriber = (path, subscriber) => {
subscribers[path] = subscribers[path] || [];
subscribers[path].push(subscriber);
};
const _deleteSubscriber = (path, subscriber) => {
const index = subscribers[path].indexOf(subscriber);
if (index >= 0) subscribers[path].splice(index, 1);
if (subscribers[path].length === 0) delete subscribers[path];
};
const get = path => data[path];
const set = (path, value) => {
if (data[path] === value) {
return
}
const { SET } = EVENTS;
data[path] = value;
_notify(SET, {
path,
value
}, path);
};
const subscribe = (path, subscriber) => {
const { SUBSCRIBE } = EVENTS;
_addSubscriber(path, subscriber)
_notify(SUBSCRIBE, {
path,
element: subscriber
})
};
const unsubscribe = (path, subscriber) => {
if (!subscribers[path]) {
return
}
const { UNSUBSCRIBE } = EVENTS;
_deleteSubscriber(path, subscriber);
_notify(UNSUBSCRIBE, {
path,
element: subscriber
});
};
/**
* If the element is subscribed, will return the path of the subscription. If not, will return `false`
* @param subscriber
* @returns {String | false}
*/
const isSubcribed = subscriber =>
Object
.entries(subscribers)
.reduce((acc, [path, subs]) =>
subs.contains(subscriber) ? path : acc
, false)
export default {
EVENTS,
get,
set,
subscribe,
unsubscribe,
isSubcribed
}; | /** | random_line_split |
|
linear_gradient.rs | // svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::{
Document,
Node,
};
use task::short::{EId, AId};
pub fn remove_dupl_linear_gradients(doc: &Document) | return false;
}
true
});
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions};
use task;
macro_rules! test {
($name:ident, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let doc = Document::from_str($in_text).unwrap();
task::resolve_linear_gradient_attributes(&doc);
remove_dupl_linear_gradients(&doc);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
test!(rm_1,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_2,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg3'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
<rect fill='url(#lg1)'/>
</svg>
");
// Different default attributes.
test!(rm_3,
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
<linearGradient id='lg2' x2='100%'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
// No 'stop' elements.
test!(rm_4,
"<svg>
<defs>
<linearGradient id='lg1'/>
<linearGradient id='lg2'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_5,
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<linearGradient id='lg3' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg2)'/>
</svg>
");
test!(rm_6,
"<svg>
<linearGradient id='lg1' xlink:href='#lg2'/>
<linearGradient id='lg2'/>
</svg>",
"<svg>
<linearGradient id='lg1'/>
</svg>
");
}
| {
let attrs = [
AId::X1,
AId::Y1,
AId::X2,
AId::Y2,
AId::GradientUnits,
AId::SpreadMethod,
];
let mut nodes = doc.descendants()
.filter(|n| n.is_tag_name(EId::LinearGradient))
.collect::<Vec<Node>>();
super::rm_loop(&mut nodes, |node1, node2| {
if !super::is_gradient_attrs_equal(node1, node2, &attrs) {
return false;
}
if !super::is_equal_stops(node1, node2) { | identifier_body |
linear_gradient.rs | // svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version. | //
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::{
Document,
Node,
};
use task::short::{EId, AId};
pub fn remove_dupl_linear_gradients(doc: &Document) {
let attrs = [
AId::X1,
AId::Y1,
AId::X2,
AId::Y2,
AId::GradientUnits,
AId::SpreadMethod,
];
let mut nodes = doc.descendants()
.filter(|n| n.is_tag_name(EId::LinearGradient))
.collect::<Vec<Node>>();
super::rm_loop(&mut nodes, |node1, node2| {
if !super::is_gradient_attrs_equal(node1, node2, &attrs) {
return false;
}
if !super::is_equal_stops(node1, node2) {
return false;
}
true
});
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions};
use task;
macro_rules! test {
($name:ident, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let doc = Document::from_str($in_text).unwrap();
task::resolve_linear_gradient_attributes(&doc);
remove_dupl_linear_gradients(&doc);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
test!(rm_1,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_2,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg3'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
<rect fill='url(#lg1)'/>
</svg>
");
// Different default attributes.
test!(rm_3,
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
<linearGradient id='lg2' x2='100%'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
// No 'stop' elements.
test!(rm_4,
"<svg>
<defs>
<linearGradient id='lg1'/>
<linearGradient id='lg2'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_5,
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<linearGradient id='lg3' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg2)'/>
</svg>
");
test!(rm_6,
"<svg>
<linearGradient id='lg1' xlink:href='#lg2'/>
<linearGradient id='lg2'/>
</svg>",
"<svg>
<linearGradient id='lg1'/>
</svg>
");
} | //
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details. | random_line_split |
linear_gradient.rs | // svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::{
Document,
Node,
};
use task::short::{EId, AId};
pub fn | (doc: &Document) {
let attrs = [
AId::X1,
AId::Y1,
AId::X2,
AId::Y2,
AId::GradientUnits,
AId::SpreadMethod,
];
let mut nodes = doc.descendants()
.filter(|n| n.is_tag_name(EId::LinearGradient))
.collect::<Vec<Node>>();
super::rm_loop(&mut nodes, |node1, node2| {
if !super::is_gradient_attrs_equal(node1, node2, &attrs) {
return false;
}
if !super::is_equal_stops(node1, node2) {
return false;
}
true
});
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions};
use task;
macro_rules! test {
($name:ident, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let doc = Document::from_str($in_text).unwrap();
task::resolve_linear_gradient_attributes(&doc);
remove_dupl_linear_gradients(&doc);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
test!(rm_1,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_2,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg3'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
<rect fill='url(#lg1)'/>
</svg>
");
// Different default attributes.
test!(rm_3,
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
<linearGradient id='lg2' x2='100%'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
// No 'stop' elements.
test!(rm_4,
"<svg>
<defs>
<linearGradient id='lg1'/>
<linearGradient id='lg2'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_5,
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<linearGradient id='lg3' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg2)'/>
</svg>
");
test!(rm_6,
"<svg>
<linearGradient id='lg1' xlink:href='#lg2'/>
<linearGradient id='lg2'/>
</svg>",
"<svg>
<linearGradient id='lg1'/>
</svg>
");
}
| remove_dupl_linear_gradients | identifier_name |
linear_gradient.rs | // svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::{
Document,
Node,
};
use task::short::{EId, AId};
pub fn remove_dupl_linear_gradients(doc: &Document) {
let attrs = [
AId::X1,
AId::Y1,
AId::X2,
AId::Y2,
AId::GradientUnits,
AId::SpreadMethod,
];
let mut nodes = doc.descendants()
.filter(|n| n.is_tag_name(EId::LinearGradient))
.collect::<Vec<Node>>();
super::rm_loop(&mut nodes, |node1, node2| {
if !super::is_gradient_attrs_equal(node1, node2, &attrs) {
return false;
}
if !super::is_equal_stops(node1, node2) |
true
});
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions};
use task;
macro_rules! test {
($name:ident, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let doc = Document::from_str($in_text).unwrap();
task::resolve_linear_gradient_attributes(&doc);
remove_dupl_linear_gradients(&doc);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
test!(rm_1,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_2,
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg2'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
<linearGradient id='lg3'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'>
<stop offset='0' stop-color='#ff0000'/>
<stop offset='1' stop-color='#0000ff'/>
</linearGradient>
</defs>
<rect fill='url(#lg1)'/>
<rect fill='url(#lg1)'/>
</svg>
");
// Different default attributes.
test!(rm_3,
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
<linearGradient id='lg2' x2='100%'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1' x1='0%'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
// No 'stop' elements.
test!(rm_4,
"<svg>
<defs>
<linearGradient id='lg1'/>
<linearGradient id='lg2'/>
</defs>
<rect fill='url(#lg2)'/>
</svg>",
"<svg>
<defs>
<linearGradient id='lg1'/>
</defs>
<rect fill='url(#lg1)'/>
</svg>
");
test!(rm_5,
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<linearGradient id='lg3' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg3)'/>
</svg>",
"<svg>
<linearGradient id='lg1'>
<stop/>
</linearGradient>
<linearGradient id='lg2' xlink:href='#lg1'/>
<rect fill='url(#lg2)'/>
<rect fill='url(#lg2)'/>
</svg>
");
test!(rm_6,
"<svg>
<linearGradient id='lg1' xlink:href='#lg2'/>
<linearGradient id='lg2'/>
</svg>",
"<svg>
<linearGradient id='lg1'/>
</svg>
");
}
| {
return false;
} | conditional_block |
index.js | /**
* webdriverio
* https://github.com/Camme/webdriverio
*
* A WebDriver module for nodejs. Either use the super easy help commands or use the base
* Webdriver wire protocol commands. Its totally inspired by jellyfishs webdriver, but the
* goal is to make all the webdriver protocol items available, as near the original as possible.
*
* Copyright (c) 2013 Camilo Tapia <[email protected]>
* Licensed under the MIT license.
*
* Contributors: | * Vincent Voyer <[email protected]>
*/
import WebdriverIO from './lib/webdriverio'
import Multibrowser from './lib/multibrowser'
import ErrorHandler from './lib/utils/ErrorHandler'
import getImplementedCommands from './lib/helpers/getImplementedCommands'
import pkg from './package.json'
const IMPLEMENTED_COMMANDS = getImplementedCommands()
const VERSION = pkg.version
let remote = function (options = {}, modifier) {
/**
* initialise monad
*/
let wdio = WebdriverIO(options, modifier)
/**
* build prototype: commands
*/
for (let commandName of Object.keys(IMPLEMENTED_COMMANDS)) {
wdio.lift(commandName, IMPLEMENTED_COMMANDS[commandName])
}
let prototype = wdio()
prototype.defer.resolve()
return prototype
}
let multiremote = function (options) {
let multibrowser = new Multibrowser()
for (let browserName of Object.keys(options)) {
multibrowser.addInstance(
browserName,
remote(options[browserName], multibrowser.getInstanceModifier())
)
}
return remote(options, multibrowser.getModifier())
}
export { remote, multiremote, VERSION, ErrorHandler } | * Dan Jenkins <[email protected]>
* Christian Bromann <[email protected]> | random_line_split |
update.rs | use std::collections::HashMap;
use util::H256;
use header::BlockNumber;
use blockchain::block_info::BlockInfo;
use blooms::BloomGroup;
use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition};
/// Block extras update info.
pub struct | <'a> {
/// Block info.
pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: &'a [u8],
/// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details.
pub block_details: HashMap<H256, BlockDetails>,
/// Modified block receipts.
pub block_receipts: HashMap<H256, BlockReceipts>,
/// Modified blocks blooms.
pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>,
/// Modified transaction addresses (None signifies removed transactions).
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
}
| ExtrasUpdate | identifier_name |
update.rs | use std::collections::HashMap;
use util::H256;
use header::BlockNumber;
use blockchain::block_info::BlockInfo;
use blooms::BloomGroup;
use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition};
/// Block extras update info.
pub struct ExtrasUpdate<'a> {
/// Block info.
pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: &'a [u8],
/// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details.
pub block_details: HashMap<H256, BlockDetails>,
/// Modified block receipts.
pub block_receipts: HashMap<H256, BlockReceipts>,
/// Modified blocks blooms.
pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>,
/// Modified transaction addresses (None signifies removed transactions). | } | pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>, | random_line_split |
meshopt-compression.ts | * Meshopt compression (based on the [meshoptimizer](https://github.com/zeux/meshoptimizer)
* library) offers a lightweight decoder with very fast runtime decompression, and is
* appropriate for models of any size. Meshopt can reduce the transmission sizes of geometry,
* morph targets, animation, and other numeric data stored in buffer views. When textures are
* large, other complementary compression methods should be used as well.
*
* For the full benefits of meshopt compression, **apply gzip, brotli, or another lossless
* compression method** to the resulting .glb, .gltf, or .bin files. Meshopt specifically
* pre-optimizes assets for this purpose — without this secondary compression, the size
* reduction is considerably less.
*
* Be aware that decompression happens before uploading to the GPU. While Meshopt decoding is
* considerably faster than Draco decoding, neither compression method will improve runtime
* performance directly. To improve framerate, you'll need to simplify the geometry by reducing
* vertex count or draw calls — not just compress it. Finally, be aware that Meshopt compression is
* lossy: repeatedly compressing and decompressing a model in a pipeline will lose precision, so
* compression should generally be the last stage of an art workflow, and uncompressed original
* files should be kept.
*
* The meshoptimizer library ([github](https://github.com/zeux/meshoptimizer/tree/master/js),
* [npm](https://www.npmjs.com/package/meshoptimizer)) is a required dependency for reading or
* writing files, and must be provided by the application. Compression may alternatively be applied
* with the [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf) tool.
*
* ### Example
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { MeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptDecoder } from 'meshoptimizer';
*
* await MeshoptDecoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([MeshoptCompression])
* .registerDependencies({
* 'meshopt.decoder': MeshoptDecoder,
* 'meshopt.encoder': MeshoptEncoder,
* });
*
* // Read and decode.
* const document = io.read('compressed.glb');
*
* // Write and encode. (Medium, -c)
* await document.transform(reorder(), quantize());
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.QUANTIZE });
* io.write('compressed-medium.glb', document);
*
* // Write and encode. (High, -cc)
* await document.transform(
* reorder(),
* quantize({pattern: /^(POSITION|TEXCOORD|JOINTS|WEIGHTS)(_\d+)?$/}),
* );
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.FILTER });
* io.write('compressed-high.glb', document);
* ```
*/
export class MeshoptCompression extends Extension {
public readonly extensionName = NAME;
public readonly prereadTypes = [PropertyType.BUFFER, PropertyType.PRIMITIVE];
public readonly prewriteTypes = [PropertyType.BUFFER, PropertyType.ACCESSOR];
public readonly readDependencies = ['meshopt.decoder'];
public readonly writeDependencies = ['meshopt.encoder'];
public static readonly EXTENSION_NAME = NAME;
public static readonly EncoderMethod = EncoderMethod;
private _decoder: typeof MeshoptDecoder | null = null;
private _decoderFallbackBufferMap = new Map<Buffer, Buffer>();
private _encoder: typeof MeshoptEncoder | null = null;
private _encoderOptions: Required<EncoderOptions> = DEFAULT_ENCODER_OPTIONS;
private _encoderFallbackBuffer: Buffer | null = null;
private _encoderBufferViews: {[key: string]: EncodedBufferView} = {};
private _encoderBufferViewData: {[key: string]: ArrayBuffer[]} = {};
private _encoderBufferViewAccessors: {[key: string]: GLTF.IAccessor[]} = {};
public install(key: string, dependency: unknown): this {
if (key === 'meshopt.decoder') {
this._decoder = dependency as typeof MeshoptDecoder;
}
if (key === 'meshopt.encoder') {
this._encoder = dependency as typeof MeshoptEncoder;
}
return this;
}
/**
* Configures Meshopt options for quality/compression tuning. The two methods rely on different
* pre-processing before compression, and should be compared on the basis of (a) quality/loss
* and (b) final asset size after _also_ applying a lossless compression such as gzip or brotli.
*
* - QUANTIZE: Default. Pre-process with {@link quantize quantize()} (lossy to specified
* precision) before applying lossless Meshopt compression. Offers a considerable compression
* ratio with or without further supercompression. Equivalent to `gltfpack -c`.
* - FILTER: Pre-process with lossy filters to improve compression, before applying lossless
* Meshopt compression. While output may initially be larger than with the QUANTIZE method,
* this method will benefit more from supercompression (e.g. gzip or brotli). Equivalent to
* `gltfpack -cc`.
*
* Output with the FILTER method will generally be smaller after supercompression (e.g. gzip or
* brotli) is applied, but may be larger than QUANTIZE output without it. Decoding is very fast
* with both methods.
*
* Example:
*
* ```ts
* doc.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({
* method: MeshoptCompression.EncoderMethod.QUANTIZE
* });
* ```
*/
public setEncoderOptions(options: EncoderOptions): this {
this._encoderOptions = {...DEFAULT_ENCODER_OPTIONS, ...options};
return this;
}
/**********************************************************************************************
* Decoding.
*/
/** @internal Checks preconditions, decodes buffer views, and creates decoded primitives. */
public preread(context: ReaderContext, propertyType: PropertyType): this {
if (!this._decoder) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}] Please install extension dependency, "meshopt.decoder".`);
}
if (!this._decoder.supported) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}]: Missing WASM support.`);
}
if (propertyType === PropertyType.BUFFER) {
this._prereadBuffers(context);
} else if (propertyType === PropertyType.PRIMITIVE) {
this._prereadPrimitives(context);
}
return this;
}
/** @internal Decode buffer views. */
private _prereadBuffers(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
viewDefs.forEach((viewDef, index) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const byteOffset = meshoptDef.byteOffset || 0;
const byteLength = meshoptDef.byteLength || 0;
const count = meshoptDef.count;
const stride = meshoptDef.byteStride;
const result = new Uint8Array(new ArrayBuffer(count * stride));
const bufferDef = jsonDoc.json.buffers![viewDef.buffer];
const resource = bufferDef.uri
? jsonDoc.resources[bufferDef.uri]
: jsonDoc.resources[GLB_BUFFER];
const source = new Uint8Array(resource, byteOffset, byteLength);
this._decoder!.decodeGltfBuffer(
result, count, stride, source, meshoptDef.mode, meshoptDef.filter
);
context.bufferViews[index] = result;
});
}
/**
* Mark fallback buffers and replacements.
*
* Note: Alignment with primitives is arbitrary; this just needs to happen
* after Buffers have been parsed.
* @internal
*/
private _prereadPrimitives(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
//
viewDefs.forEach((viewDef) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const buffer = context.buffers[meshoptDef.buffer];
const fallbackBuffer = context.buffers[viewDef.buffer];
const fallbackBufferDef = jsonDoc.json.buffers![viewDef.buffer];
if (isFallbackBuffer(fallbackBufferDef)) {
this._decoderFallbackBufferMap.set(fallbackBuffer, buffer);
}
});
}
/** @hidden Removes Fallback buffers, if extension is required. */
public read(_context: ReaderContext): this {
if (!this.isRequired()) return this;
// Replace fallback buffers.
for (const [fallbackBuffer, buffer] of this._decoderFallbackBufferMap) {
for (const parent of fallbackBuffer.listParents()) {
if (parent instanceof Accessor) | *
* [[include:VENDOR_EXTENSIONS_NOTE.md]]
* | random_line_split |
|
meshopt-compression.ts | should generally be the last stage of an art workflow, and uncompressed original
* files should be kept.
*
* The meshoptimizer library ([github](https://github.com/zeux/meshoptimizer/tree/master/js),
* [npm](https://www.npmjs.com/package/meshoptimizer)) is a required dependency for reading or
* writing files, and must be provided by the application. Compression may alternatively be applied
* with the [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf) tool.
*
* ### Example
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { MeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptDecoder } from 'meshoptimizer';
*
* await MeshoptDecoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([MeshoptCompression])
* .registerDependencies({
* 'meshopt.decoder': MeshoptDecoder,
* 'meshopt.encoder': MeshoptEncoder,
* });
*
* // Read and decode.
* const document = io.read('compressed.glb');
*
* // Write and encode. (Medium, -c)
* await document.transform(reorder(), quantize());
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.QUANTIZE });
* io.write('compressed-medium.glb', document);
*
* // Write and encode. (High, -cc)
* await document.transform(
* reorder(),
* quantize({pattern: /^(POSITION|TEXCOORD|JOINTS|WEIGHTS)(_\d+)?$/}),
* );
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.FILTER });
* io.write('compressed-high.glb', document);
* ```
*/
export class Mesh | ends Extension {
public readonly extensionName = NAME;
public readonly prereadTypes = [PropertyType.BUFFER, PropertyType.PRIMITIVE];
public readonly prewriteTypes = [PropertyType.BUFFER, PropertyType.ACCESSOR];
public readonly readDependencies = ['meshopt.decoder'];
public readonly writeDependencies = ['meshopt.encoder'];
public static readonly EXTENSION_NAME = NAME;
public static readonly EncoderMethod = EncoderMethod;
private _decoder: typeof MeshoptDecoder | null = null;
private _decoderFallbackBufferMap = new Map<Buffer, Buffer>();
private _encoder: typeof MeshoptEncoder | null = null;
private _encoderOptions: Required<EncoderOptions> = DEFAULT_ENCODER_OPTIONS;
private _encoderFallbackBuffer: Buffer | null = null;
private _encoderBufferViews: {[key: string]: EncodedBufferView} = {};
private _encoderBufferViewData: {[key: string]: ArrayBuffer[]} = {};
private _encoderBufferViewAccessors: {[key: string]: GLTF.IAccessor[]} = {};
public install(key: string, dependency: unknown): this {
if (key === 'meshopt.decoder') {
this._decoder = dependency as typeof MeshoptDecoder;
}
if (key === 'meshopt.encoder') {
this._encoder = dependency as typeof MeshoptEncoder;
}
return this;
}
/**
* Configures Meshopt options for quality/compression tuning. The two methods rely on different
* pre-processing before compression, and should be compared on the basis of (a) quality/loss
* and (b) final asset size after _also_ applying a lossless compression such as gzip or brotli.
*
* - QUANTIZE: Default. Pre-process with {@link quantize quantize()} (lossy to specified
* precision) before applying lossless Meshopt compression. Offers a considerable compression
* ratio with or without further supercompression. Equivalent to `gltfpack -c`.
* - FILTER: Pre-process with lossy filters to improve compression, before applying lossless
* Meshopt compression. While output may initially be larger than with the QUANTIZE method,
* this method will benefit more from supercompression (e.g. gzip or brotli). Equivalent to
* `gltfpack -cc`.
*
* Output with the FILTER method will generally be smaller after supercompression (e.g. gzip or
* brotli) is applied, but may be larger than QUANTIZE output without it. Decoding is very fast
* with both methods.
*
* Example:
*
* ```ts
* doc.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({
* method: MeshoptCompression.EncoderMethod.QUANTIZE
* });
* ```
*/
public setEncoderOptions(options: EncoderOptions): this {
this._encoderOptions = {...DEFAULT_ENCODER_OPTIONS, ...options};
return this;
}
/**********************************************************************************************
* Decoding.
*/
/** @internal Checks preconditions, decodes buffer views, and creates decoded primitives. */
public preread(context: ReaderContext, propertyType: PropertyType): this {
if (!this._decoder) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}] Please install extension dependency, "meshopt.decoder".`);
}
if (!this._decoder.supported) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}]: Missing WASM support.`);
}
if (propertyType === PropertyType.BUFFER) {
this._prereadBuffers(context);
} else if (propertyType === PropertyType.PRIMITIVE) {
this._prereadPrimitives(context);
}
return this;
}
/** @internal Decode buffer views. */
private _prereadBuffers(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
viewDefs.forEach((viewDef, index) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const byteOffset = meshoptDef.byteOffset || 0;
const byteLength = meshoptDef.byteLength || 0;
const count = meshoptDef.count;
const stride = meshoptDef.byteStride;
const result = new Uint8Array(new ArrayBuffer(count * stride));
const bufferDef = jsonDoc.json.buffers![viewDef.buffer];
const resource = bufferDef.uri
? jsonDoc.resources[bufferDef.uri]
: jsonDoc.resources[GLB_BUFFER];
const source = new Uint8Array(resource, byteOffset, byteLength);
this._decoder!.decodeGltfBuffer(
result, count, stride, source, meshoptDef.mode, meshoptDef.filter
);
context.bufferViews[index] = result;
});
}
/**
* Mark fallback buffers and replacements.
*
* Note: Alignment with primitives is arbitrary; this just needs to happen
* after Buffers have been parsed.
* @internal
*/
private _prereadPrimitives(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
//
viewDefs.forEach((viewDef) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const buffer = context.buffers[meshoptDef.buffer];
const fallbackBuffer = context.buffers[viewDef.buffer];
const fallbackBufferDef = jsonDoc.json.buffers![viewDef.buffer];
if (isFallbackBuffer(fallbackBufferDef)) {
this._decoderFallbackBufferMap.set(fallbackBuffer, buffer);
}
});
}
/** @hidden Removes Fallback buffers, if extension is required. */
public read(_context: ReaderContext): this {
if (!this.isRequired()) return this;
// Replace fallback buffers.
for (const [fallbackBuffer, buffer] of this._decoderFallbackBufferMap) {
for (const parent of fallbackBuffer.listParents()) {
if (parent instanceof Accessor) {
parent.swap(fallbackBuffer, buffer);
}
}
fallbackBuffer.dispose();
}
return this;
}
/**********************************************************************************************
* Encoding.
*/
/** @internal Claims accessors that can be compressed and writes compressed buffer views. */
public prewrite(context: WriterContext, propertyType: PropertyType): this {
if (propertyType === PropertyType.ACCESSOR) {
this._prewriteAccessors(context);
} else if (propertyType === PropertyType.BUFFER) {
this._prewriteBuffers(context);
}
return this;
}
/** @internal Claims accessors that can be compressed. */
private _prewriteAccessors(context: WriterContext): void {
const json = context.jsonDoc.json;
const encoder = this._encoder!;
const options = this._encoderOptions;
const fallbackBuffer = this.doc.createBuffer(); // Disposed on write.
const fallbackBufferIndex = this.doc.getRoot().listBuffers().indexOf(fallbackBuffer);
this._encoderFallbackBuffer = fallbackBuffer;
this._encoderBufferViews = {};
this._encoderBufferViewData = {};
this._encoderBufferViewAccessors = {};
for (const accessor of this.doc.getRoot().listAccessors()) {
// See: https | optCompression ext | identifier_name |
meshopt-compression.ts | should generally be the last stage of an art workflow, and uncompressed original
* files should be kept.
*
* The meshoptimizer library ([github](https://github.com/zeux/meshoptimizer/tree/master/js),
* [npm](https://www.npmjs.com/package/meshoptimizer)) is a required dependency for reading or
* writing files, and must be provided by the application. Compression may alternatively be applied
* with the [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf) tool.
*
* ### Example
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { MeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptDecoder } from 'meshoptimizer';
*
* await MeshoptDecoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([MeshoptCompression])
* .registerDependencies({
* 'meshopt.decoder': MeshoptDecoder,
* 'meshopt.encoder': MeshoptEncoder,
* });
*
* // Read and decode.
* const document = io.read('compressed.glb');
*
* // Write and encode. (Medium, -c)
* await document.transform(reorder(), quantize());
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.QUANTIZE });
* io.write('compressed-medium.glb', document);
*
* // Write and encode. (High, -cc)
* await document.transform(
* reorder(),
* quantize({pattern: /^(POSITION|TEXCOORD|JOINTS|WEIGHTS)(_\d+)?$/}),
* );
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.FILTER });
* io.write('compressed-high.glb', document);
* ```
*/
export class MeshoptCompression extends Extension {
public readonly extensionName = NAME;
public readonly prereadTypes = [PropertyType.BUFFER, PropertyType.PRIMITIVE];
public readonly prewriteTypes = [PropertyType.BUFFER, PropertyType.ACCESSOR];
public readonly readDependencies = ['meshopt.decoder'];
public readonly writeDependencies = ['meshopt.encoder'];
public static readonly EXTENSION_NAME = NAME;
public static readonly EncoderMethod = EncoderMethod;
private _decoder: typeof MeshoptDecoder | null = null;
private _decoderFallbackBufferMap = new Map<Buffer, Buffer>();
private _encoder: typeof MeshoptEncoder | null = null;
private _encoderOptions: Required<EncoderOptions> = DEFAULT_ENCODER_OPTIONS;
private _encoderFallbackBuffer: Buffer | null = null;
private _encoderBufferViews: {[key: string]: EncodedBufferView} = {};
private _encoderBufferViewData: {[key: string]: ArrayBuffer[]} = {};
private _encoderBufferViewAccessors: {[key: string]: GLTF.IAccessor[]} = {};
public install(key: string, dependency: unknown): this {
if (key === 'meshopt.decoder') {
this._decoder = dependency as typeof MeshoptDecoder;
}
if (key === 'meshopt.encoder') {
this._encoder = dependency as typeof MeshoptEncoder;
}
return this;
}
/**
* Configures Meshopt options for quality/compression tuning. The two methods rely on different
* pre-processing before compression, and should be compared on the basis of (a) quality/loss
* and (b) final asset size after _also_ applying a lossless compression such as gzip or brotli.
*
* - QUANTIZE: Default. Pre-process with {@link quantize quantize()} (lossy to specified
* precision) before applying lossless Meshopt compression. Offers a considerable compression
* ratio with or without further supercompression. Equivalent to `gltfpack -c`.
* - FILTER: Pre-process with lossy filters to improve compression, before applying lossless
* Meshopt compression. While output may initially be larger than with the QUANTIZE method,
* this method will benefit more from supercompression (e.g. gzip or brotli). Equivalent to
* `gltfpack -cc`.
*
* Output with the FILTER method will generally be smaller after supercompression (e.g. gzip or
* brotli) is applied, but may be larger than QUANTIZE output without it. Decoding is very fast
* with both methods.
*
* Example:
*
* ```ts
* doc.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({
* method: MeshoptCompression.EncoderMethod.QUANTIZE
* });
* ```
*/
public setEncoderOptions(options: EncoderOptions): this {
this._encoderOptions = {...DEFAULT_ENCODER_OPTIONS, ...options};
return this;
}
/**********************************************************************************************
* Decoding.
*/
/** @internal Checks preconditions, decodes buffer views, and creates decoded primitives. */
public preread(context: ReaderContext, propertyType: PropertyType): this {
| ** @internal Decode buffer views. */
private _prereadBuffers(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
viewDefs.forEach((viewDef, index) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const byteOffset = meshoptDef.byteOffset || 0;
const byteLength = meshoptDef.byteLength || 0;
const count = meshoptDef.count;
const stride = meshoptDef.byteStride;
const result = new Uint8Array(new ArrayBuffer(count * stride));
const bufferDef = jsonDoc.json.buffers![viewDef.buffer];
const resource = bufferDef.uri
? jsonDoc.resources[bufferDef.uri]
: jsonDoc.resources[GLB_BUFFER];
const source = new Uint8Array(resource, byteOffset, byteLength);
this._decoder!.decodeGltfBuffer(
result, count, stride, source, meshoptDef.mode, meshoptDef.filter
);
context.bufferViews[index] = result;
});
}
/**
* Mark fallback buffers and replacements.
*
* Note: Alignment with primitives is arbitrary; this just needs to happen
* after Buffers have been parsed.
* @internal
*/
private _prereadPrimitives(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
//
viewDefs.forEach((viewDef) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const buffer = context.buffers[meshoptDef.buffer];
const fallbackBuffer = context.buffers[viewDef.buffer];
const fallbackBufferDef = jsonDoc.json.buffers![viewDef.buffer];
if (isFallbackBuffer(fallbackBufferDef)) {
this._decoderFallbackBufferMap.set(fallbackBuffer, buffer);
}
});
}
/** @hidden Removes Fallback buffers, if extension is required. */
public read(_context: ReaderContext): this {
if (!this.isRequired()) return this;
// Replace fallback buffers.
for (const [fallbackBuffer, buffer] of this._decoderFallbackBufferMap) {
for (const parent of fallbackBuffer.listParents()) {
if (parent instanceof Accessor) {
parent.swap(fallbackBuffer, buffer);
}
}
fallbackBuffer.dispose();
}
return this;
}
/**********************************************************************************************
* Encoding.
*/
/** @internal Claims accessors that can be compressed and writes compressed buffer views. */
public prewrite(context: WriterContext, propertyType: PropertyType): this {
if (propertyType === PropertyType.ACCESSOR) {
this._prewriteAccessors(context);
} else if (propertyType === PropertyType.BUFFER) {
this._prewriteBuffers(context);
}
return this;
}
/** @internal Claims accessors that can be compressed. */
private _prewriteAccessors(context: WriterContext): void {
const json = context.jsonDoc.json;
const encoder = this._encoder!;
const options = this._encoderOptions;
const fallbackBuffer = this.doc.createBuffer(); // Disposed on write.
const fallbackBufferIndex = this.doc.getRoot().listBuffers().indexOf(fallbackBuffer);
this._encoderFallbackBuffer = fallbackBuffer;
this._encoderBufferViews = {};
this._encoderBufferViewData = {};
this._encoderBufferViewAccessors = {};
for (const accessor of this.doc.getRoot().listAccessors()) {
// See: https | if (!this._decoder) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}] Please install extension dependency, "meshopt.decoder".`);
}
if (!this._decoder.supported) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}]: Missing WASM support.`);
}
if (propertyType === PropertyType.BUFFER) {
this._prereadBuffers(context);
} else if (propertyType === PropertyType.PRIMITIVE) {
this._prereadPrimitives(context);
}
return this;
}
/ | identifier_body |
meshopt-compression.ts | should generally be the last stage of an art workflow, and uncompressed original
* files should be kept.
*
* The meshoptimizer library ([github](https://github.com/zeux/meshoptimizer/tree/master/js),
* [npm](https://www.npmjs.com/package/meshoptimizer)) is a required dependency for reading or
* writing files, and must be provided by the application. Compression may alternatively be applied
* with the [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf) tool.
*
* ### Example
*
* ```typescript
* import { NodeIO } from '@gltf-transform/core';
* import { MeshoptCompression } from '@gltf-transform/extensions';
* import { MeshoptDecoder } from 'meshoptimizer';
*
* await MeshoptDecoder.ready;
*
* const io = new NodeIO()
* .registerExtensions([MeshoptCompression])
* .registerDependencies({
* 'meshopt.decoder': MeshoptDecoder,
* 'meshopt.encoder': MeshoptEncoder,
* });
*
* // Read and decode.
* const document = io.read('compressed.glb');
*
* // Write and encode. (Medium, -c)
* await document.transform(reorder(), quantize());
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.QUANTIZE });
* io.write('compressed-medium.glb', document);
*
* // Write and encode. (High, -cc)
* await document.transform(
* reorder(),
* quantize({pattern: /^(POSITION|TEXCOORD|JOINTS|WEIGHTS)(_\d+)?$/}),
* );
* document.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({ method: MeshoptCompression.EncoderMethod.FILTER });
* io.write('compressed-high.glb', document);
* ```
*/
export class MeshoptCompression extends Extension {
public readonly extensionName = NAME;
public readonly prereadTypes = [PropertyType.BUFFER, PropertyType.PRIMITIVE];
public readonly prewriteTypes = [PropertyType.BUFFER, PropertyType.ACCESSOR];
public readonly readDependencies = ['meshopt.decoder'];
public readonly writeDependencies = ['meshopt.encoder'];
public static readonly EXTENSION_NAME = NAME;
public static readonly EncoderMethod = EncoderMethod;
private _decoder: typeof MeshoptDecoder | null = null;
private _decoderFallbackBufferMap = new Map<Buffer, Buffer>();
private _encoder: typeof MeshoptEncoder | null = null;
private _encoderOptions: Required<EncoderOptions> = DEFAULT_ENCODER_OPTIONS;
private _encoderFallbackBuffer: Buffer | null = null;
private _encoderBufferViews: {[key: string]: EncodedBufferView} = {};
private _encoderBufferViewData: {[key: string]: ArrayBuffer[]} = {};
private _encoderBufferViewAccessors: {[key: string]: GLTF.IAccessor[]} = {};
public install(key: string, dependency: unknown): this {
if (key === 'meshopt.decoder') {
| f (key === 'meshopt.encoder') {
this._encoder = dependency as typeof MeshoptEncoder;
}
return this;
}
/**
* Configures Meshopt options for quality/compression tuning. The two methods rely on different
* pre-processing before compression, and should be compared on the basis of (a) quality/loss
* and (b) final asset size after _also_ applying a lossless compression such as gzip or brotli.
*
* - QUANTIZE: Default. Pre-process with {@link quantize quantize()} (lossy to specified
* precision) before applying lossless Meshopt compression. Offers a considerable compression
* ratio with or without further supercompression. Equivalent to `gltfpack -c`.
* - FILTER: Pre-process with lossy filters to improve compression, before applying lossless
* Meshopt compression. While output may initially be larger than with the QUANTIZE method,
* this method will benefit more from supercompression (e.g. gzip or brotli). Equivalent to
* `gltfpack -cc`.
*
* Output with the FILTER method will generally be smaller after supercompression (e.g. gzip or
* brotli) is applied, but may be larger than QUANTIZE output without it. Decoding is very fast
* with both methods.
*
* Example:
*
* ```ts
* doc.createExtension(MeshoptCompression)
* .setRequired(true)
* .setEncoderOptions({
* method: MeshoptCompression.EncoderMethod.QUANTIZE
* });
* ```
*/
public setEncoderOptions(options: EncoderOptions): this {
this._encoderOptions = {...DEFAULT_ENCODER_OPTIONS, ...options};
return this;
}
/**********************************************************************************************
* Decoding.
*/
/** @internal Checks preconditions, decodes buffer views, and creates decoded primitives. */
public preread(context: ReaderContext, propertyType: PropertyType): this {
if (!this._decoder) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}] Please install extension dependency, "meshopt.decoder".`);
}
if (!this._decoder.supported) {
if (!this.isRequired()) return this;
throw new Error(`[${NAME}]: Missing WASM support.`);
}
if (propertyType === PropertyType.BUFFER) {
this._prereadBuffers(context);
} else if (propertyType === PropertyType.PRIMITIVE) {
this._prereadPrimitives(context);
}
return this;
}
/** @internal Decode buffer views. */
private _prereadBuffers(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
viewDefs.forEach((viewDef, index) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const byteOffset = meshoptDef.byteOffset || 0;
const byteLength = meshoptDef.byteLength || 0;
const count = meshoptDef.count;
const stride = meshoptDef.byteStride;
const result = new Uint8Array(new ArrayBuffer(count * stride));
const bufferDef = jsonDoc.json.buffers![viewDef.buffer];
const resource = bufferDef.uri
? jsonDoc.resources[bufferDef.uri]
: jsonDoc.resources[GLB_BUFFER];
const source = new Uint8Array(resource, byteOffset, byteLength);
this._decoder!.decodeGltfBuffer(
result, count, stride, source, meshoptDef.mode, meshoptDef.filter
);
context.bufferViews[index] = result;
});
}
/**
* Mark fallback buffers and replacements.
*
* Note: Alignment with primitives is arbitrary; this just needs to happen
* after Buffers have been parsed.
* @internal
*/
private _prereadPrimitives(context: ReaderContext): void {
const jsonDoc = context.jsonDoc;
const viewDefs = jsonDoc.json.bufferViews || [];
//
viewDefs.forEach((viewDef) => {
if (!viewDef.extensions || !viewDef.extensions[NAME]) return;
const meshoptDef = viewDef.extensions[NAME] as MeshoptBufferViewExtension;
const buffer = context.buffers[meshoptDef.buffer];
const fallbackBuffer = context.buffers[viewDef.buffer];
const fallbackBufferDef = jsonDoc.json.buffers![viewDef.buffer];
if (isFallbackBuffer(fallbackBufferDef)) {
this._decoderFallbackBufferMap.set(fallbackBuffer, buffer);
}
});
}
/** @hidden Removes Fallback buffers, if extension is required. */
public read(_context: ReaderContext): this {
if (!this.isRequired()) return this;
// Replace fallback buffers.
for (const [fallbackBuffer, buffer] of this._decoderFallbackBufferMap) {
for (const parent of fallbackBuffer.listParents()) {
if (parent instanceof Accessor) {
parent.swap(fallbackBuffer, buffer);
}
}
fallbackBuffer.dispose();
}
return this;
}
/**********************************************************************************************
* Encoding.
*/
/** @internal Claims accessors that can be compressed and writes compressed buffer views. */
public prewrite(context: WriterContext, propertyType: PropertyType): this {
if (propertyType === PropertyType.ACCESSOR) {
this._prewriteAccessors(context);
} else if (propertyType === PropertyType.BUFFER) {
this._prewriteBuffers(context);
}
return this;
}
/** @internal Claims accessors that can be compressed. */
private _prewriteAccessors(context: WriterContext): void {
const json = context.jsonDoc.json;
const encoder = this._encoder!;
const options = this._encoderOptions;
const fallbackBuffer = this.doc.createBuffer(); // Disposed on write.
const fallbackBufferIndex = this.doc.getRoot().listBuffers().indexOf(fallbackBuffer);
this._encoderFallbackBuffer = fallbackBuffer;
this._encoderBufferViews = {};
this._encoderBufferViewData = {};
this._encoderBufferViewAccessors = {};
for (const accessor of this.doc.getRoot().listAccessors()) {
// See: https | this._decoder = dependency as typeof MeshoptDecoder;
}
i | conditional_block |
platform_directives_and_pipes.ts | * available in every component of the application.
*
* ### Example
*
* ```typescript
* import {PLATFORM_DIRECTIVES} from '@angular/core';
* import {OtherDirective} from './myDirectives';
*
* @Component({
* selector: 'my-component',
* template: `
* <!-- can use other directive even though the component does not list it in `directives` -->
* <other-directive></other-directive>
* `
* })
* export class MyComponent {
* ...
* }
*
* bootstrap(MyComponent, [provide(PLATFORM_DIRECTIVES, {useValue: [OtherDirective], multi:true})]);
* ```
*/
export const PLATFORM_DIRECTIVES: OpaqueToken =
/*@ts2dart_const*/ new OpaqueToken("Platform Directives");
/**
* A token that can be provided when bootstraping an application to make an array of pipes
* available in every component of the application.
*
* ### Example
*
* ```typescript
* import {PLATFORM_PIPES} from '@angular/core';
* import {OtherPipe} from './myPipe';
*
* @Component({
* selector: 'my-component',
* template: `
* {{123 | other-pipe}}
* `
* })
* export class MyComponent {
* ...
* }
*
* bootstrap(MyComponent, [provide(PLATFORM_PIPES, {useValue: [OtherPipe], multi:true})]);
* ```
*/
export const PLATFORM_PIPES: OpaqueToken = /*@ts2dart_const*/ new OpaqueToken("Platform Pipes"); | import {OpaqueToken} from './di';
/**
* A token that can be provided when bootstraping an application to make an array of directives | random_line_split |
|
extract_and_upload_iris_classifier.py | #!/usr/bin/env python
# Load common imports and system envs to build the core object |
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "Extract and Upload IRIS Models to S3"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-u', '--url', help='URL to Download', dest='url')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
data_dir = str(os.getenv("ENV_DATA_DST_DIR", "/opt/work/data/dst"))
if not os.path.exists(data_dir):
os.mkdir(data_dir, 0777)
ds_name = "iris_classifier"
cur_date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
s3_bucket = "unique-bucket-name-for-datasets"
s3_key = "dataset_" + core.to_upper(ds_name) + ".cache.pickle.zlib"
s3_loc = ""
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
#
# End Arg Processing
#
#####################################################################
s3_loc = str(s3_bucket) + ":" + str(s3_key)
lg("-------------------------------------------------", 6)
lg("Extracting and Uploading Models from CACHE to S3Loc(" + str(s3_loc) + ")", 6)
lg("", 6)
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
"DSName" : str(ds_name), # Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6)
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
sys.exit(1)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
sys.exit(0) | import sys, os | random_line_split |
extract_and_upload_iris_classifier.py | #!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "Extract and Upload IRIS Models to S3"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-u', '--url', help='URL to Download', dest='url')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
data_dir = str(os.getenv("ENV_DATA_DST_DIR", "/opt/work/data/dst"))
if not os.path.exists(data_dir):
os.mkdir(data_dir, 0777)
ds_name = "iris_classifier"
cur_date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
s3_bucket = "unique-bucket-name-for-datasets"
s3_key = "dataset_" + core.to_upper(ds_name) + ".cache.pickle.zlib"
s3_loc = ""
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
#
# End Arg Processing
#
#####################################################################
s3_loc = str(s3_bucket) + ":" + str(s3_key)
lg("-------------------------------------------------", 6)
lg("Extracting and Uploading Models from CACHE to S3Loc(" + str(s3_loc) + ")", 6)
lg("", 6)
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
"DSName" : str(ds_name), # Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
|
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
sys.exit(1)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
sys.exit(0)
| lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6) | conditional_block |
styles.js | 'use strict';
var path = require('path');
var gulp = require('gulp');
var conf = require('./conf');
var browserSync = require('browser-sync');
var $ = require('gulp-load-plugins')();
var wiredep = require('wiredep').stream;
var _ = require('lodash');
gulp.task('styles', function () {
var sassOptions = {
style: 'expanded'
};
var injectFiles = gulp.src([
path.join(conf.paths.src, '/app/**/*.scss'),
path.join('!' + conf.paths.src, '/app/index.scss')
], { read: false }); | },
starttag: '// injector',
endtag: '// endinjector',
addRootSlash: false
};
return gulp.src([
path.join(conf.paths.src, '/app/index.scss')
])
.pipe($.inject(injectFiles, injectOptions))
.pipe(wiredep(_.extend({}, conf.wiredep)))
.pipe($.sourcemaps.init())
.pipe($.sass(sassOptions)).on('error', conf.errorHandler('Sass'))
.pipe($.autoprefixer()).on('error', conf.errorHandler('Autoprefixer'))
.pipe($.sourcemaps.write())
.pipe(gulp.dest(path.join(conf.paths.build, '/serve/app/')))
.pipe(browserSync.reload({ stream: true }));
}); |
var injectOptions = {
transform: function(filePath) {
filePath = filePath.replace(conf.paths.src + '/app/', '');
return '@import "' + filePath + '";'; | random_line_split |
queries.ts | import Api = require('../api')
import Filters = require('./filters');
import Selects = require('./selects');
import QueryBuilder = require('./query-builder');
import Q = require('q');
import request = require('superagent');
import _ = require('underscore');
module Queries {
export class ConnectQuery {
_client: Api.Client;
_collection: string;
_selects: Selects.QuerySelects;
_filters: Filters.QueryFilter[];
_groups: string[];
_timeframe: Api.Timeframe;
_interval: string;
_timezone: Api.Timezone;
_customQueryOptions: any;
_runningRequests: Array<Api.ClientDeferredQuery>;
constructor(
client: Api.Client,
collection: string,
selects?: Selects.QuerySelects,
filters?: Filters.QueryFilter[],
groups?: string[],
timeframe?: Api.Timeframe,
interval?: string,
timezone?: Api.Timezone,
customQueryOptions?: any) {
this._client = client;
this._collection = collection;
this._selects = selects || {};
this._filters = filters || [];
this._groups = groups || [];
this._timeframe = timeframe || null;
this._interval = interval || null;
this._timezone = timezone || null;
this._customQueryOptions = customQueryOptions || {};
this._runningRequests = new Array<Api.ClientDeferredQuery>();
}
public collection(): string {
return this._collection;
}
public select(selects: Selects.QuerySelects): ConnectQuery {
for(var key in selects) {
var select = selects[key];
if(!_.isString(select) && Object.keys(select).length > 1)
throw new Error('You can only provide one aggregation function per select.');
}
return new ConnectQuery(this._client, this._collection, selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public filter(filterSpecification: any): ConnectQuery {
var filters = _.chain(filterSpecification)
.map(Filters.queryFilterBuilder)
.flatten()
.value()
.concat(this._filters);
filters = _.uniq(filters, filter => filter.field + '|' + filter.operator);
return new ConnectQuery(this._client, this._collection, this._selects, filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public groupBy(field: string|string[]) {
var groups;
if(typeof field === 'string') {
groups = this._groups.concat([field]);
} else |
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public timeframe(timeframe: Api.Timeframe): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public interval(interval: string): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, interval, this._timezone, this._customQueryOptions);
}
public timezone(timezone: Api.Timezone): ConnectQuery {
if(!this._timeframe && !this._interval)
throw new Error('You can only set a timezone when a valid timeframe or interval has been set.');
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, timezone, this._customQueryOptions);
}
public custom(options: any): ConnectQuery {
var newOptions = {};
for (var name in this._customQueryOptions)
newOptions[name] = this._customQueryOptions[name];
for (var name in options)
newOptions[name] = options[name];
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, newOptions);
}
public execute(): Q.IPromise<Api.QueryResults> {
var queryBuilder = new QueryBuilder(),
apiQuery = queryBuilder.build(this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
var executeQuery = this._client.query(this._collection, apiQuery);
this._addToRunningQueries(executeQuery);
return executeQuery.deferred.promise;
}
public abort() {
var length = this._runningRequests.length;
_.each(this._runningRequests, request => {
request.request.abort();
request.deferred.reject('request aborted');
});
this._runningRequests.splice(0, length);
}
public isExecuting() {
return this._runningRequests.length > 0;
}
private _addToRunningQueries(executeQuery:Api.ClientDeferredQuery) {
this._runningRequests.push(executeQuery);
var removeFromRunningQueries = () => {
var finishedQueryIndex = this._runningRequests.indexOf(executeQuery);
if(finishedQueryIndex < 0) return;
this._runningRequests.splice(finishedQueryIndex, 1);
};
executeQuery.deferred.promise.then(removeFromRunningQueries, removeFromRunningQueries);
}
}
}
export = Queries;
| {
groups = this._groups.concat(field);
} | conditional_block |
queries.ts | import Api = require('../api')
import Filters = require('./filters');
import Selects = require('./selects');
import QueryBuilder = require('./query-builder');
import Q = require('q');
import request = require('superagent');
import _ = require('underscore');
module Queries {
export class ConnectQuery {
_client: Api.Client;
_collection: string;
_selects: Selects.QuerySelects;
_filters: Filters.QueryFilter[];
_groups: string[];
_timeframe: Api.Timeframe;
_interval: string;
_timezone: Api.Timezone;
_customQueryOptions: any;
_runningRequests: Array<Api.ClientDeferredQuery>;
constructor(
client: Api.Client,
collection: string,
selects?: Selects.QuerySelects,
filters?: Filters.QueryFilter[],
groups?: string[],
timeframe?: Api.Timeframe,
interval?: string,
timezone?: Api.Timezone,
customQueryOptions?: any) {
this._client = client;
this._collection = collection;
this._selects = selects || {};
this._filters = filters || [];
this._groups = groups || [];
this._timeframe = timeframe || null;
this._interval = interval || null;
this._timezone = timezone || null;
this._customQueryOptions = customQueryOptions || {};
this._runningRequests = new Array<Api.ClientDeferredQuery>();
}
public collection(): string {
return this._collection;
}
public select(selects: Selects.QuerySelects): ConnectQuery {
for(var key in selects) {
var select = selects[key];
if(!_.isString(select) && Object.keys(select).length > 1)
throw new Error('You can only provide one aggregation function per select.');
}
return new ConnectQuery(this._client, this._collection, selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public filter(filterSpecification: any): ConnectQuery {
var filters = _.chain(filterSpecification)
.map(Filters.queryFilterBuilder)
.flatten()
.value()
.concat(this._filters);
filters = _.uniq(filters, filter => filter.field + '|' + filter.operator);
return new ConnectQuery(this._client, this._collection, this._selects, filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public groupBy(field: string|string[]) {
var groups;
if(typeof field === 'string') {
groups = this._groups.concat([field]);
} else {
groups = this._groups.concat(field);
}
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public timeframe(timeframe: Api.Timeframe): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public interval(interval: string): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, interval, this._timezone, this._customQueryOptions);
}
public timezone(timezone: Api.Timezone): ConnectQuery {
if(!this._timeframe && !this._interval)
throw new Error('You can only set a timezone when a valid timeframe or interval has been set.');
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, timezone, this._customQueryOptions);
}
public custom(options: any): ConnectQuery {
var newOptions = {};
for (var name in this._customQueryOptions)
newOptions[name] = this._customQueryOptions[name];
for (var name in options)
newOptions[name] = options[name];
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, newOptions);
}
public execute(): Q.IPromise<Api.QueryResults> {
var queryBuilder = new QueryBuilder(),
apiQuery = queryBuilder.build(this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
var executeQuery = this._client.query(this._collection, apiQuery);
this._addToRunningQueries(executeQuery);
return executeQuery.deferred.promise;
}
public abort() {
var length = this._runningRequests.length;
_.each(this._runningRequests, request => {
request.request.abort();
request.deferred.reject('request aborted');
});
this._runningRequests.splice(0, length);
}
public isExecuting() {
return this._runningRequests.length > 0; |
private _addToRunningQueries(executeQuery:Api.ClientDeferredQuery) {
this._runningRequests.push(executeQuery);
var removeFromRunningQueries = () => {
var finishedQueryIndex = this._runningRequests.indexOf(executeQuery);
if(finishedQueryIndex < 0) return;
this._runningRequests.splice(finishedQueryIndex, 1);
};
executeQuery.deferred.promise.then(removeFromRunningQueries, removeFromRunningQueries);
}
}
}
export = Queries; | } | random_line_split |
queries.ts | import Api = require('../api')
import Filters = require('./filters');
import Selects = require('./selects');
import QueryBuilder = require('./query-builder');
import Q = require('q');
import request = require('superagent');
import _ = require('underscore');
module Queries {
export class ConnectQuery {
_client: Api.Client;
_collection: string;
_selects: Selects.QuerySelects;
_filters: Filters.QueryFilter[];
_groups: string[];
_timeframe: Api.Timeframe;
_interval: string;
_timezone: Api.Timezone;
_customQueryOptions: any;
_runningRequests: Array<Api.ClientDeferredQuery>;
constructor(
client: Api.Client,
collection: string,
selects?: Selects.QuerySelects,
filters?: Filters.QueryFilter[],
groups?: string[],
timeframe?: Api.Timeframe,
interval?: string,
timezone?: Api.Timezone,
customQueryOptions?: any) {
this._client = client;
this._collection = collection;
this._selects = selects || {};
this._filters = filters || [];
this._groups = groups || [];
this._timeframe = timeframe || null;
this._interval = interval || null;
this._timezone = timezone || null;
this._customQueryOptions = customQueryOptions || {};
this._runningRequests = new Array<Api.ClientDeferredQuery>();
}
public collection(): string {
return this._collection;
}
public select(selects: Selects.QuerySelects): ConnectQuery {
for(var key in selects) {
var select = selects[key];
if(!_.isString(select) && Object.keys(select).length > 1)
throw new Error('You can only provide one aggregation function per select.');
}
return new ConnectQuery(this._client, this._collection, selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public filter(filterSpecification: any): ConnectQuery {
var filters = _.chain(filterSpecification)
.map(Filters.queryFilterBuilder)
.flatten()
.value()
.concat(this._filters);
filters = _.uniq(filters, filter => filter.field + '|' + filter.operator);
return new ConnectQuery(this._client, this._collection, this._selects, filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public groupBy(field: string|string[]) {
var groups;
if(typeof field === 'string') {
groups = this._groups.concat([field]);
} else {
groups = this._groups.concat(field);
}
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public timeframe(timeframe: Api.Timeframe): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public interval(interval: string): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, interval, this._timezone, this._customQueryOptions);
}
public timezone(timezone: Api.Timezone): ConnectQuery {
if(!this._timeframe && !this._interval)
throw new Error('You can only set a timezone when a valid timeframe or interval has been set.');
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, timezone, this._customQueryOptions);
}
public custom(options: any): ConnectQuery {
var newOptions = {};
for (var name in this._customQueryOptions)
newOptions[name] = this._customQueryOptions[name];
for (var name in options)
newOptions[name] = options[name];
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, newOptions);
}
public execute(): Q.IPromise<Api.QueryResults> {
var queryBuilder = new QueryBuilder(),
apiQuery = queryBuilder.build(this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
var executeQuery = this._client.query(this._collection, apiQuery);
this._addToRunningQueries(executeQuery);
return executeQuery.deferred.promise;
}
public abort() |
public isExecuting() {
return this._runningRequests.length > 0;
}
private _addToRunningQueries(executeQuery:Api.ClientDeferredQuery) {
this._runningRequests.push(executeQuery);
var removeFromRunningQueries = () => {
var finishedQueryIndex = this._runningRequests.indexOf(executeQuery);
if(finishedQueryIndex < 0) return;
this._runningRequests.splice(finishedQueryIndex, 1);
};
executeQuery.deferred.promise.then(removeFromRunningQueries, removeFromRunningQueries);
}
}
}
export = Queries;
| {
var length = this._runningRequests.length;
_.each(this._runningRequests, request => {
request.request.abort();
request.deferred.reject('request aborted');
});
this._runningRequests.splice(0, length);
} | identifier_body |
queries.ts | import Api = require('../api')
import Filters = require('./filters');
import Selects = require('./selects');
import QueryBuilder = require('./query-builder');
import Q = require('q');
import request = require('superagent');
import _ = require('underscore');
module Queries {
export class ConnectQuery {
_client: Api.Client;
_collection: string;
_selects: Selects.QuerySelects;
_filters: Filters.QueryFilter[];
_groups: string[];
_timeframe: Api.Timeframe;
_interval: string;
_timezone: Api.Timezone;
_customQueryOptions: any;
_runningRequests: Array<Api.ClientDeferredQuery>;
constructor(
client: Api.Client,
collection: string,
selects?: Selects.QuerySelects,
filters?: Filters.QueryFilter[],
groups?: string[],
timeframe?: Api.Timeframe,
interval?: string,
timezone?: Api.Timezone,
customQueryOptions?: any) {
this._client = client;
this._collection = collection;
this._selects = selects || {};
this._filters = filters || [];
this._groups = groups || [];
this._timeframe = timeframe || null;
this._interval = interval || null;
this._timezone = timezone || null;
this._customQueryOptions = customQueryOptions || {};
this._runningRequests = new Array<Api.ClientDeferredQuery>();
}
public collection(): string {
return this._collection;
}
public select(selects: Selects.QuerySelects): ConnectQuery {
for(var key in selects) {
var select = selects[key];
if(!_.isString(select) && Object.keys(select).length > 1)
throw new Error('You can only provide one aggregation function per select.');
}
return new ConnectQuery(this._client, this._collection, selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public | (filterSpecification: any): ConnectQuery {
var filters = _.chain(filterSpecification)
.map(Filters.queryFilterBuilder)
.flatten()
.value()
.concat(this._filters);
filters = _.uniq(filters, filter => filter.field + '|' + filter.operator);
return new ConnectQuery(this._client, this._collection, this._selects, filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public groupBy(field: string|string[]) {
var groups;
if(typeof field === 'string') {
groups = this._groups.concat([field]);
} else {
groups = this._groups.concat(field);
}
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public timeframe(timeframe: Api.Timeframe): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, timeframe, this._interval, this._timezone, this._customQueryOptions);
}
public interval(interval: string): ConnectQuery {
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, interval, this._timezone, this._customQueryOptions);
}
public timezone(timezone: Api.Timezone): ConnectQuery {
if(!this._timeframe && !this._interval)
throw new Error('You can only set a timezone when a valid timeframe or interval has been set.');
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, timezone, this._customQueryOptions);
}
public custom(options: any): ConnectQuery {
var newOptions = {};
for (var name in this._customQueryOptions)
newOptions[name] = this._customQueryOptions[name];
for (var name in options)
newOptions[name] = options[name];
return new ConnectQuery(this._client, this._collection, this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, newOptions);
}
public execute(): Q.IPromise<Api.QueryResults> {
var queryBuilder = new QueryBuilder(),
apiQuery = queryBuilder.build(this._selects, this._filters, this._groups, this._timeframe, this._interval, this._timezone, this._customQueryOptions);
var executeQuery = this._client.query(this._collection, apiQuery);
this._addToRunningQueries(executeQuery);
return executeQuery.deferred.promise;
}
public abort() {
var length = this._runningRequests.length;
_.each(this._runningRequests, request => {
request.request.abort();
request.deferred.reject('request aborted');
});
this._runningRequests.splice(0, length);
}
public isExecuting() {
return this._runningRequests.length > 0;
}
private _addToRunningQueries(executeQuery:Api.ClientDeferredQuery) {
this._runningRequests.push(executeQuery);
var removeFromRunningQueries = () => {
var finishedQueryIndex = this._runningRequests.indexOf(executeQuery);
if(finishedQueryIndex < 0) return;
this._runningRequests.splice(finishedQueryIndex, 1);
};
executeQuery.deferred.promise.then(removeFromRunningQueries, removeFromRunningQueries);
}
}
}
export = Queries;
| filter | identifier_name |
main.js | ;(function() {
"use strict";
var app = angular.module("AddressApp",[]);
app.controller("AddressBookController", function() {
var vm = this;
vm.contacts = [
{
name: "Hank",
email: "[email protected]",
phone: 9876543210,
address: "Know Your Role Blvd "
},
{
name: "Cookie Monster",
email: "[email protected]",
phone: 1234567890,
address: "Sesame Street"
},
{
name: "Clifford",
email: "[email protected]",
phone: 127361239,
address: "Too big for an address"
},
{
name: "Hank",
email: "[email protected]",
phone: 9876543210,
address: "Know Your Role Blvd "
},
{ | email: "[email protected]",
phone: 1234567890,
address: "Sesame Street"
},
{
name: "Clifford",
email: "[email protected]",
phone: 127361239,
address: "Too big for an address"
}
];
vm.addContact = function() {
vm.contacts.push(vm.newContact);
vm.newContact = null;
};
vm.removeContact = function(contact) {
var index = vm.contacts.indexOf(contact);
vm.contacts.splice(index,1);
};
});
})(); | name: "Cookie Monster", | random_line_split |
invalidation.py | import collections
import functools
import hashlib
import logging
import socket
from django.conf import settings
from django.core.cache import cache as default_cache, get_cache, parse_backend_uri
from django.core.cache.backends.base import InvalidCacheBackendError
from django.utils import encoding, translation
try:
import redis as redislib
except ImportError:
redislib = None
# Look for an own cache first before falling back to the default cache
try:
cache = get_cache('cache_machine')
except (InvalidCacheBackendError, ValueError):
cache = default_cache
CACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')
FETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)
FLUSH = CACHE_PREFIX + ':flush:'
log = logging.getLogger('caching.invalidation')
def make_key(k, with_locale=True):
"""Generate the full key for ``k``, with a prefix."""
key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))
if with_locale:
key += encoding.smart_str(translation.get_language())
# memcached keys must be < 250 bytes and w/o whitespace, but it's nice
# to see the keys when using locmem.
return hashlib.md5(key).hexdigest()
def flush_key(obj):
"""We put flush lists in the flush: namespace."""
key = obj if isinstance(obj, basestring) else obj.cache_key
return FLUSH + make_key(key, with_locale=False)
def byid(obj):
key = obj if isinstance(obj, basestring) else obj.cache_key
return make_key('byid:' + key)
def safe_redis(return_type):
"""
Decorator to catch and log any redis errors.
return_type (optionally a callable) will be returned if there is an error.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except (socket.error, redislib.RedisError), e:
log.error('redis error: %s' % e)
# log.error('%r\n%r : %r' % (f.__name__, args[1:], kw))
if hasattr(return_type, '__call__'):
return return_type()
else:
return return_type
return wrapper
return decorator
class Invalidator(object):
def invalidate_keys(self, keys):
"""Invalidate all the flush lists named by the list of ``keys``."""
if not keys:
return
flush, flush_keys = self.find_flush_lists(keys)
if flush:
cache.delete_many(flush)
if flush_keys:
self.clear_flush_lists(flush_keys)
def cache_objects(self, objects, query_key, query_flush):
# Add this query to the flush list of each object. We include
# query_flush so that other things can be cached against the queryset
# and still participate in invalidation.
flush_keys = [o.flush_key() for o in objects]
flush_lists = collections.defaultdict(set)
for key in flush_keys:
flush_lists[key].add(query_flush)
flush_lists[query_flush].add(query_key)
# Add each object to the flush lists of its foreign keys.
for obj in objects:
obj_flush = obj.flush_key()
for key in map(flush_key, obj._cache_keys()):
if key != obj_flush:
flush_lists[key].add(obj_flush)
if FETCH_BY_ID:
flush_lists[key].add(byid(obj))
self.add_to_flush_list(flush_lists)
def | (self, keys):
"""
Recursively search for flush lists and objects to invalidate.
The search starts with the lists in `keys` and expands to any flush
lists found therein. Returns ({objects to flush}, {flush keys found}).
"""
new_keys = keys = set(map(flush_key, keys))
flush = set(keys)
# Add other flush keys from the lists, which happens when a parent
# object includes a foreign key.
while 1:
to_flush = self.get_flush_lists(new_keys)
flush.update(to_flush)
new_keys = set(k for k in to_flush if k.startswith(FLUSH))
diff = new_keys.difference(keys)
if diff:
keys.update(new_keys)
else:
return flush, keys
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
flush_lists = collections.defaultdict(set)
flush_lists.update(cache.get_many(mapping.keys()))
for key, list_ in mapping.items():
if flush_lists[key] is None:
flush_lists[key] = set(list_)
else:
flush_lists[key].update(list_)
cache.set_many(flush_lists)
def get_flush_lists(self, keys):
"""Return a set of object keys from the lists in `keys`."""
return set(e for flush_list in
filter(None, cache.get_many(keys).values())
for e in flush_list)
def clear_flush_lists(self, keys):
"""Remove the given keys from the database."""
cache.delete_many(keys)
class RedisInvalidator(Invalidator):
def safe_key(self, key):
if ' ' in key or '\n' in key:
log.warning('BAD KEY: "%s"' % key)
return ''
return key
@safe_redis(None)
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
pipe = redis.pipeline(transaction=False)
for key, list_ in mapping.items():
for query_key in list_:
pipe.sadd(self.safe_key(key), query_key)
pipe.execute()
@safe_redis(set)
def get_flush_lists(self, keys):
return redis.sunion(map(self.safe_key, keys))
@safe_redis(None)
def clear_flush_lists(self, keys):
redis.delete(*map(self.safe_key, keys))
class NullInvalidator(Invalidator):
def add_to_flush_list(self, mapping):
return
def get_redis_backend():
"""Connect to redis from a string like CACHE_BACKEND."""
# From django-redis-cache.
_, server, params = parse_backend_uri(settings.REDIS_BACKEND)
db = params.pop('db', 1)
try:
db = int(db)
except (ValueError, TypeError):
db = 1
try:
socket_timeout = float(params.pop('socket_timeout'))
except (KeyError, ValueError):
socket_timeout = None
password = params.pop('password', None)
if ':' in server:
host, port = server.split(':')
try:
port = int(port)
except (ValueError, TypeError):
port = 6379
else:
host = 'localhost'
port = 6379
return redislib.Redis(host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
if getattr(settings, 'CACHE_MACHINE_NO_INVALIDATION', False):
invalidator = NullInvalidator()
elif getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
redis = get_redis_backend()
invalidator = RedisInvalidator()
else:
invalidator = Invalidator()
| find_flush_lists | identifier_name |
invalidation.py | import collections
import functools
import hashlib
import logging
import socket
from django.conf import settings
from django.core.cache import cache as default_cache, get_cache, parse_backend_uri
from django.core.cache.backends.base import InvalidCacheBackendError
from django.utils import encoding, translation
try:
import redis as redislib
except ImportError:
redislib = None
# Look for an own cache first before falling back to the default cache
try:
cache = get_cache('cache_machine')
except (InvalidCacheBackendError, ValueError):
cache = default_cache
CACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')
FETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)
FLUSH = CACHE_PREFIX + ':flush:'
log = logging.getLogger('caching.invalidation')
def make_key(k, with_locale=True):
"""Generate the full key for ``k``, with a prefix."""
key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))
if with_locale:
key += encoding.smart_str(translation.get_language())
# memcached keys must be < 250 bytes and w/o whitespace, but it's nice
# to see the keys when using locmem.
return hashlib.md5(key).hexdigest()
def flush_key(obj):
"""We put flush lists in the flush: namespace."""
key = obj if isinstance(obj, basestring) else obj.cache_key
return FLUSH + make_key(key, with_locale=False)
def byid(obj):
key = obj if isinstance(obj, basestring) else obj.cache_key
return make_key('byid:' + key)
def safe_redis(return_type):
"""
Decorator to catch and log any redis errors.
return_type (optionally a callable) will be returned if there is an error.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except (socket.error, redislib.RedisError), e:
log.error('redis error: %s' % e)
# log.error('%r\n%r : %r' % (f.__name__, args[1:], kw))
if hasattr(return_type, '__call__'):
return return_type()
else:
return return_type
return wrapper
return decorator
class Invalidator(object):
def invalidate_keys(self, keys):
"""Invalidate all the flush lists named by the list of ``keys``."""
if not keys:
return
flush, flush_keys = self.find_flush_lists(keys)
if flush:
cache.delete_many(flush)
if flush_keys:
self.clear_flush_lists(flush_keys)
def cache_objects(self, objects, query_key, query_flush):
# Add this query to the flush list of each object. We include
# query_flush so that other things can be cached against the queryset
# and still participate in invalidation.
flush_keys = [o.flush_key() for o in objects]
flush_lists = collections.defaultdict(set)
for key in flush_keys:
flush_lists[key].add(query_flush)
flush_lists[query_flush].add(query_key)
# Add each object to the flush lists of its foreign keys.
for obj in objects:
obj_flush = obj.flush_key()
for key in map(flush_key, obj._cache_keys()):
if key != obj_flush:
flush_lists[key].add(obj_flush)
if FETCH_BY_ID:
flush_lists[key].add(byid(obj))
self.add_to_flush_list(flush_lists)
def find_flush_lists(self, keys):
"""
Recursively search for flush lists and objects to invalidate.
The search starts with the lists in `keys` and expands to any flush
lists found therein. Returns ({objects to flush}, {flush keys found}).
"""
new_keys = keys = set(map(flush_key, keys))
flush = set(keys)
# Add other flush keys from the lists, which happens when a parent
# object includes a foreign key.
while 1:
to_flush = self.get_flush_lists(new_keys)
flush.update(to_flush)
new_keys = set(k for k in to_flush if k.startswith(FLUSH))
diff = new_keys.difference(keys)
if diff:
keys.update(new_keys)
else:
return flush, keys
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
flush_lists = collections.defaultdict(set)
flush_lists.update(cache.get_many(mapping.keys()))
for key, list_ in mapping.items():
if flush_lists[key] is None:
flush_lists[key] = set(list_)
else:
flush_lists[key].update(list_)
cache.set_many(flush_lists)
def get_flush_lists(self, keys):
"""Return a set of object keys from the lists in `keys`."""
return set(e for flush_list in
filter(None, cache.get_many(keys).values())
for e in flush_list)
def clear_flush_lists(self, keys):
"""Remove the given keys from the database."""
cache.delete_many(keys)
class RedisInvalidator(Invalidator):
def safe_key(self, key):
if ' ' in key or '\n' in key:
log.warning('BAD KEY: "%s"' % key)
return ''
return key
@safe_redis(None)
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
pipe = redis.pipeline(transaction=False)
for key, list_ in mapping.items():
for query_key in list_:
pipe.sadd(self.safe_key(key), query_key)
pipe.execute()
@safe_redis(set)
def get_flush_lists(self, keys):
return redis.sunion(map(self.safe_key, keys))
@safe_redis(None)
def clear_flush_lists(self, keys):
|
class NullInvalidator(Invalidator):
def add_to_flush_list(self, mapping):
return
def get_redis_backend():
"""Connect to redis from a string like CACHE_BACKEND."""
# From django-redis-cache.
_, server, params = parse_backend_uri(settings.REDIS_BACKEND)
db = params.pop('db', 1)
try:
db = int(db)
except (ValueError, TypeError):
db = 1
try:
socket_timeout = float(params.pop('socket_timeout'))
except (KeyError, ValueError):
socket_timeout = None
password = params.pop('password', None)
if ':' in server:
host, port = server.split(':')
try:
port = int(port)
except (ValueError, TypeError):
port = 6379
else:
host = 'localhost'
port = 6379
return redislib.Redis(host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
if getattr(settings, 'CACHE_MACHINE_NO_INVALIDATION', False):
invalidator = NullInvalidator()
elif getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
redis = get_redis_backend()
invalidator = RedisInvalidator()
else:
invalidator = Invalidator()
| redis.delete(*map(self.safe_key, keys)) | identifier_body |
invalidation.py | import collections
import functools
import hashlib
import logging
import socket
from django.conf import settings
from django.core.cache import cache as default_cache, get_cache, parse_backend_uri
from django.core.cache.backends.base import InvalidCacheBackendError
from django.utils import encoding, translation
try:
import redis as redislib
except ImportError:
redislib = None
# Look for an own cache first before falling back to the default cache
try:
cache = get_cache('cache_machine')
except (InvalidCacheBackendError, ValueError):
cache = default_cache
CACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')
FETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)
FLUSH = CACHE_PREFIX + ':flush:'
log = logging.getLogger('caching.invalidation')
def make_key(k, with_locale=True):
"""Generate the full key for ``k``, with a prefix."""
key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))
if with_locale:
|
# memcached keys must be < 250 bytes and w/o whitespace, but it's nice
# to see the keys when using locmem.
return hashlib.md5(key).hexdigest()
def flush_key(obj):
"""We put flush lists in the flush: namespace."""
key = obj if isinstance(obj, basestring) else obj.cache_key
return FLUSH + make_key(key, with_locale=False)
def byid(obj):
key = obj if isinstance(obj, basestring) else obj.cache_key
return make_key('byid:' + key)
def safe_redis(return_type):
"""
Decorator to catch and log any redis errors.
return_type (optionally a callable) will be returned if there is an error.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except (socket.error, redislib.RedisError), e:
log.error('redis error: %s' % e)
# log.error('%r\n%r : %r' % (f.__name__, args[1:], kw))
if hasattr(return_type, '__call__'):
return return_type()
else:
return return_type
return wrapper
return decorator
class Invalidator(object):
def invalidate_keys(self, keys):
"""Invalidate all the flush lists named by the list of ``keys``."""
if not keys:
return
flush, flush_keys = self.find_flush_lists(keys)
if flush:
cache.delete_many(flush)
if flush_keys:
self.clear_flush_lists(flush_keys)
def cache_objects(self, objects, query_key, query_flush):
# Add this query to the flush list of each object. We include
# query_flush so that other things can be cached against the queryset
# and still participate in invalidation.
flush_keys = [o.flush_key() for o in objects]
flush_lists = collections.defaultdict(set)
for key in flush_keys:
flush_lists[key].add(query_flush)
flush_lists[query_flush].add(query_key)
# Add each object to the flush lists of its foreign keys.
for obj in objects:
obj_flush = obj.flush_key()
for key in map(flush_key, obj._cache_keys()):
if key != obj_flush:
flush_lists[key].add(obj_flush)
if FETCH_BY_ID:
flush_lists[key].add(byid(obj))
self.add_to_flush_list(flush_lists)
def find_flush_lists(self, keys):
"""
Recursively search for flush lists and objects to invalidate.
The search starts with the lists in `keys` and expands to any flush
lists found therein. Returns ({objects to flush}, {flush keys found}).
"""
new_keys = keys = set(map(flush_key, keys))
flush = set(keys)
# Add other flush keys from the lists, which happens when a parent
# object includes a foreign key.
while 1:
to_flush = self.get_flush_lists(new_keys)
flush.update(to_flush)
new_keys = set(k for k in to_flush if k.startswith(FLUSH))
diff = new_keys.difference(keys)
if diff:
keys.update(new_keys)
else:
return flush, keys
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
flush_lists = collections.defaultdict(set)
flush_lists.update(cache.get_many(mapping.keys()))
for key, list_ in mapping.items():
if flush_lists[key] is None:
flush_lists[key] = set(list_)
else:
flush_lists[key].update(list_)
cache.set_many(flush_lists)
def get_flush_lists(self, keys):
"""Return a set of object keys from the lists in `keys`."""
return set(e for flush_list in
filter(None, cache.get_many(keys).values())
for e in flush_list)
def clear_flush_lists(self, keys):
"""Remove the given keys from the database."""
cache.delete_many(keys)
class RedisInvalidator(Invalidator):
def safe_key(self, key):
if ' ' in key or '\n' in key:
log.warning('BAD KEY: "%s"' % key)
return ''
return key
@safe_redis(None)
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
pipe = redis.pipeline(transaction=False)
for key, list_ in mapping.items():
for query_key in list_:
pipe.sadd(self.safe_key(key), query_key)
pipe.execute()
@safe_redis(set)
def get_flush_lists(self, keys):
return redis.sunion(map(self.safe_key, keys))
@safe_redis(None)
def clear_flush_lists(self, keys):
redis.delete(*map(self.safe_key, keys))
class NullInvalidator(Invalidator):
def add_to_flush_list(self, mapping):
return
def get_redis_backend():
"""Connect to redis from a string like CACHE_BACKEND."""
# From django-redis-cache.
_, server, params = parse_backend_uri(settings.REDIS_BACKEND)
db = params.pop('db', 1)
try:
db = int(db)
except (ValueError, TypeError):
db = 1
try:
socket_timeout = float(params.pop('socket_timeout'))
except (KeyError, ValueError):
socket_timeout = None
password = params.pop('password', None)
if ':' in server:
host, port = server.split(':')
try:
port = int(port)
except (ValueError, TypeError):
port = 6379
else:
host = 'localhost'
port = 6379
return redislib.Redis(host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
if getattr(settings, 'CACHE_MACHINE_NO_INVALIDATION', False):
invalidator = NullInvalidator()
elif getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
redis = get_redis_backend()
invalidator = RedisInvalidator()
else:
invalidator = Invalidator()
| key += encoding.smart_str(translation.get_language()) | conditional_block |
invalidation.py | import collections
import functools
import hashlib
import logging
import socket
from django.conf import settings
from django.core.cache import cache as default_cache, get_cache, parse_backend_uri
from django.core.cache.backends.base import InvalidCacheBackendError
from django.utils import encoding, translation
try:
import redis as redislib
except ImportError: | redislib = None
# Look for an own cache first before falling back to the default cache
try:
cache = get_cache('cache_machine')
except (InvalidCacheBackendError, ValueError):
cache = default_cache
CACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')
FETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)
FLUSH = CACHE_PREFIX + ':flush:'
log = logging.getLogger('caching.invalidation')
def make_key(k, with_locale=True):
"""Generate the full key for ``k``, with a prefix."""
key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))
if with_locale:
key += encoding.smart_str(translation.get_language())
# memcached keys must be < 250 bytes and w/o whitespace, but it's nice
# to see the keys when using locmem.
return hashlib.md5(key).hexdigest()
def flush_key(obj):
"""We put flush lists in the flush: namespace."""
key = obj if isinstance(obj, basestring) else obj.cache_key
return FLUSH + make_key(key, with_locale=False)
def byid(obj):
key = obj if isinstance(obj, basestring) else obj.cache_key
return make_key('byid:' + key)
def safe_redis(return_type):
"""
Decorator to catch and log any redis errors.
return_type (optionally a callable) will be returned if there is an error.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except (socket.error, redislib.RedisError), e:
log.error('redis error: %s' % e)
# log.error('%r\n%r : %r' % (f.__name__, args[1:], kw))
if hasattr(return_type, '__call__'):
return return_type()
else:
return return_type
return wrapper
return decorator
class Invalidator(object):
def invalidate_keys(self, keys):
"""Invalidate all the flush lists named by the list of ``keys``."""
if not keys:
return
flush, flush_keys = self.find_flush_lists(keys)
if flush:
cache.delete_many(flush)
if flush_keys:
self.clear_flush_lists(flush_keys)
def cache_objects(self, objects, query_key, query_flush):
# Add this query to the flush list of each object. We include
# query_flush so that other things can be cached against the queryset
# and still participate in invalidation.
flush_keys = [o.flush_key() for o in objects]
flush_lists = collections.defaultdict(set)
for key in flush_keys:
flush_lists[key].add(query_flush)
flush_lists[query_flush].add(query_key)
# Add each object to the flush lists of its foreign keys.
for obj in objects:
obj_flush = obj.flush_key()
for key in map(flush_key, obj._cache_keys()):
if key != obj_flush:
flush_lists[key].add(obj_flush)
if FETCH_BY_ID:
flush_lists[key].add(byid(obj))
self.add_to_flush_list(flush_lists)
def find_flush_lists(self, keys):
"""
Recursively search for flush lists and objects to invalidate.
The search starts with the lists in `keys` and expands to any flush
lists found therein. Returns ({objects to flush}, {flush keys found}).
"""
new_keys = keys = set(map(flush_key, keys))
flush = set(keys)
# Add other flush keys from the lists, which happens when a parent
# object includes a foreign key.
while 1:
to_flush = self.get_flush_lists(new_keys)
flush.update(to_flush)
new_keys = set(k for k in to_flush if k.startswith(FLUSH))
diff = new_keys.difference(keys)
if diff:
keys.update(new_keys)
else:
return flush, keys
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
flush_lists = collections.defaultdict(set)
flush_lists.update(cache.get_many(mapping.keys()))
for key, list_ in mapping.items():
if flush_lists[key] is None:
flush_lists[key] = set(list_)
else:
flush_lists[key].update(list_)
cache.set_many(flush_lists)
def get_flush_lists(self, keys):
"""Return a set of object keys from the lists in `keys`."""
return set(e for flush_list in
filter(None, cache.get_many(keys).values())
for e in flush_list)
def clear_flush_lists(self, keys):
"""Remove the given keys from the database."""
cache.delete_many(keys)
class RedisInvalidator(Invalidator):
def safe_key(self, key):
if ' ' in key or '\n' in key:
log.warning('BAD KEY: "%s"' % key)
return ''
return key
@safe_redis(None)
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
pipe = redis.pipeline(transaction=False)
for key, list_ in mapping.items():
for query_key in list_:
pipe.sadd(self.safe_key(key), query_key)
pipe.execute()
@safe_redis(set)
def get_flush_lists(self, keys):
return redis.sunion(map(self.safe_key, keys))
@safe_redis(None)
def clear_flush_lists(self, keys):
redis.delete(*map(self.safe_key, keys))
class NullInvalidator(Invalidator):
def add_to_flush_list(self, mapping):
return
def get_redis_backend():
"""Connect to redis from a string like CACHE_BACKEND."""
# From django-redis-cache.
_, server, params = parse_backend_uri(settings.REDIS_BACKEND)
db = params.pop('db', 1)
try:
db = int(db)
except (ValueError, TypeError):
db = 1
try:
socket_timeout = float(params.pop('socket_timeout'))
except (KeyError, ValueError):
socket_timeout = None
password = params.pop('password', None)
if ':' in server:
host, port = server.split(':')
try:
port = int(port)
except (ValueError, TypeError):
port = 6379
else:
host = 'localhost'
port = 6379
return redislib.Redis(host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
if getattr(settings, 'CACHE_MACHINE_NO_INVALIDATION', False):
invalidator = NullInvalidator()
elif getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
redis = get_redis_backend()
invalidator = RedisInvalidator()
else:
invalidator = Invalidator() | random_line_split |
|
textutil.py | #!/usr/bin/env python
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
| :param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text | def text_to_id(text):
"""
Convert input text to id.
| random_line_split |
textutil.py | #!/usr/bin/env python
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def strip_accents(text):
|
def text_to_id(text):
"""
Convert input text to id.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text
| """
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text) | identifier_body |
textutil.py | #!/usr/bin/env python
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def | (text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def text_to_id(text):
"""
Convert input text to id.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text
| strip_accents | identifier_name |
test_data_source.py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class SaharaDataSourceTest(common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
|
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['data-source']['properties'].copy()
del props['credentials']['user']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex))
| ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['data-source']['properties'].copy()
props['type'] = 'hdfs'
props['url'] = 'my/path'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state) | identifier_body |
test_data_source.py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class | (common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['data-source']['properties'].copy()
props['type'] = 'hdfs'
props['url'] = 'my/path'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state)
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['data-source']['properties'].copy()
del props['credentials']['user']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex))
| SaharaDataSourceTest | identifier_name |
test_data_source.py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class SaharaDataSourceTest(common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['data-source']['properties'].copy()
props['type'] = 'hdfs'
props['url'] = 'my/path'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state)
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['data-source']['properties'].copy()
del props['credentials']['user']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex)) | # http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
server.js | const fs = require('fs')
const path = require('path')
const LRU = require('lru-cache')
const express = require('express')
const favicon = require('serve-favicon')
const compression = require('compression')
const resolve = file => path.resolve(__dirname, file)
const { createBundleRenderer } = require('vue-server-renderer')
const isProd = process.env.NODE_ENV === 'production'
const useMicroCache = process.env.MICRO_CACHE !== 'false'
const serverInfo =
`express/${require('express/package.json').version} ` +
`vue-server-renderer/${require('vue-server-renderer/package.json').version}`
const app = express()
const template = fs.readFileSync(resolve('./src/index.html'), 'utf-8');
function createRenderer (bundle, options) {
// https://github.com/vuejs/vue/blob/dev/packages/vue-server-renderer/README.md#why-use-bundlerenderer
return createBundleRenderer(bundle, Object.assign(options, {
template,
// for component caching
cache: LRU({
max: 1000,
maxAge: 1000 * 60 * 15
}),
// this is only needed when vue-server-renderer is npm-linked
basedir: resolve('./dist'),
// recommended for performance
runInNewContext: false
}))
}
let renderer
let readyPromise
if (isProd) {
// In production: create server renderer using built server bundle.
// The server bundle is generated by vue-ssr-webpack-plugin.
const bundle = require('./dist/vue-ssr-server-bundle.json')
// The client manifests are optional, but it allows the renderer
// to automatically infer preload/prefetch links and directly add <script>
// tags for any async chunks used during render, avoiding waterfall requests.
const clientManifest = require('./dist/vue-ssr-client-manifest.json')
renderer = createRenderer(bundle, {
clientManifest
})
} else {
// In development: setup the dev server with watch and hot-reload,
// and create a new renderer on bundle / index template update.
readyPromise = require('./build/setup-dev-server')(app, (bundle, options) => {
renderer = createRenderer(bundle, options)
})
}
const serve = (path, cache) => express.static(resolve(path), {
maxAge: cache && isProd ? 1000 * 60 * 60 * 24 * 30 : 0
})
app.use(compression({ threshold: 0 }))
//app.use(favicon('./public/logo-48.png'))
app.use('/dist', serve('./dist', true))
app.use('/public', serve('./public', true)) | // 1-second microcache.
// https://www.nginx.com/blog/benefits-of-microcaching-nginx/
const microCache = LRU({
max: 100,
maxAge: 1000
})
// since this app has no user-specific content, every page is micro-cacheable.
// if your app involves user-specific content, you need to implement custom
// logic to determine whether a request is cacheable based on its url and
// headers.
const isCacheable = req => useMicroCache
function render (req, res) {
const s = Date.now()
res.setHeader("Content-Type", "text/html")
res.setHeader("Server", serverInfo)
const handleError = err => {
if (err.url) {
res.redirect(err.url)
} else if(err.code === 404) {
res.status(404).end('404 | Page Not Found')
} else {
// Render Error Page or Redirect
res.status(500).end('500 | Internal Server Error')
console.error(`error during render : ${req.url}`)
console.error(err.stack)
}
}
const cacheable = isCacheable(req)
if (cacheable) {
const hit = microCache.get(req.url)
if (hit) {
if (!isProd) {
console.log(`cache hit!`)
}
return res.end(hit)
}
}
const context = {
title: '交易虎_手机游戏交易平台_手游交易_帐号交易_游戏币交易_装备交易_道具交易_jiaoyihu', // default title
url: req.url
}
renderer.renderToString(context, (err, html) => {
debugger;
if (err) {
return handleError(err)
}
res.end(html)
if (cacheable) {
microCache.set(req.url, html)
}
if (!isProd) {
console.log(`whole request: ${Date.now() - s}ms`)
}
})
}
app.get('*', isProd ? render : (req, res) => {
readyPromise.then(() => render(req, res))
})
const port = process.env.PORT || 80;
app.listen(port, () => {
console.log(`server started at localhost:${port}`)
}) | app.use('/manifest.json', serve('./manifest.json', true))
app.use('/service-worker.js', serve('./dist/service-worker.js'))
| random_line_split |
server.js | const fs = require('fs')
const path = require('path')
const LRU = require('lru-cache')
const express = require('express')
const favicon = require('serve-favicon')
const compression = require('compression')
const resolve = file => path.resolve(__dirname, file)
const { createBundleRenderer } = require('vue-server-renderer')
const isProd = process.env.NODE_ENV === 'production'
const useMicroCache = process.env.MICRO_CACHE !== 'false'
const serverInfo =
`express/${require('express/package.json').version} ` +
`vue-server-renderer/${require('vue-server-renderer/package.json').version}`
const app = express()
const template = fs.readFileSync(resolve('./src/index.html'), 'utf-8');
function createRenderer (bundle, options) |
let renderer
let readyPromise
if (isProd) {
// In production: create server renderer using built server bundle.
// The server bundle is generated by vue-ssr-webpack-plugin.
const bundle = require('./dist/vue-ssr-server-bundle.json')
// The client manifests are optional, but it allows the renderer
// to automatically infer preload/prefetch links and directly add <script>
// tags for any async chunks used during render, avoiding waterfall requests.
const clientManifest = require('./dist/vue-ssr-client-manifest.json')
renderer = createRenderer(bundle, {
clientManifest
})
} else {
// In development: setup the dev server with watch and hot-reload,
// and create a new renderer on bundle / index template update.
readyPromise = require('./build/setup-dev-server')(app, (bundle, options) => {
renderer = createRenderer(bundle, options)
})
}
const serve = (path, cache) => express.static(resolve(path), {
maxAge: cache && isProd ? 1000 * 60 * 60 * 24 * 30 : 0
})
app.use(compression({ threshold: 0 }))
//app.use(favicon('./public/logo-48.png'))
app.use('/dist', serve('./dist', true))
app.use('/public', serve('./public', true))
app.use('/manifest.json', serve('./manifest.json', true))
app.use('/service-worker.js', serve('./dist/service-worker.js'))
// 1-second microcache.
// https://www.nginx.com/blog/benefits-of-microcaching-nginx/
const microCache = LRU({
max: 100,
maxAge: 1000
})
// since this app has no user-specific content, every page is micro-cacheable.
// if your app involves user-specific content, you need to implement custom
// logic to determine whether a request is cacheable based on its url and
// headers.
const isCacheable = req => useMicroCache
function render (req, res) {
const s = Date.now()
res.setHeader("Content-Type", "text/html")
res.setHeader("Server", serverInfo)
const handleError = err => {
if (err.url) {
res.redirect(err.url)
} else if(err.code === 404) {
res.status(404).end('404 | Page Not Found')
} else {
// Render Error Page or Redirect
res.status(500).end('500 | Internal Server Error')
console.error(`error during render : ${req.url}`)
console.error(err.stack)
}
}
const cacheable = isCacheable(req)
if (cacheable) {
const hit = microCache.get(req.url)
if (hit) {
if (!isProd) {
console.log(`cache hit!`)
}
return res.end(hit)
}
}
const context = {
title: '交易虎_手机游戏交易平台_手游交易_帐号交易_游戏币交易_装备交易_道具交易_jiaoyihu', // default title
url: req.url
}
renderer.renderToString(context, (err, html) => {
debugger;
if (err) {
return handleError(err)
}
res.end(html)
if (cacheable) {
microCache.set(req.url, html)
}
if (!isProd) {
console.log(`whole request: ${Date.now() - s}ms`)
}
})
}
app.get('*', isProd ? render : (req, res) => {
readyPromise.then(() => render(req, res))
})
const port = process.env.PORT || 80;
app.listen(port, () => {
console.log(`server started at localhost:${port}`)
})
| {
// https://github.com/vuejs/vue/blob/dev/packages/vue-server-renderer/README.md#why-use-bundlerenderer
return createBundleRenderer(bundle, Object.assign(options, {
template,
// for component caching
cache: LRU({
max: 1000,
maxAge: 1000 * 60 * 15
}),
// this is only needed when vue-server-renderer is npm-linked
basedir: resolve('./dist'),
// recommended for performance
runInNewContext: false
}))
} | identifier_body |
server.js | const fs = require('fs')
const path = require('path')
const LRU = require('lru-cache')
const express = require('express')
const favicon = require('serve-favicon')
const compression = require('compression')
const resolve = file => path.resolve(__dirname, file)
const { createBundleRenderer } = require('vue-server-renderer')
const isProd = process.env.NODE_ENV === 'production'
const useMicroCache = process.env.MICRO_CACHE !== 'false'
const serverInfo =
`express/${require('express/package.json').version} ` +
`vue-server-renderer/${require('vue-server-renderer/package.json').version}`
const app = express()
const template = fs.readFileSync(resolve('./src/index.html'), 'utf-8');
function createRenderer (bundle, options) {
// https://github.com/vuejs/vue/blob/dev/packages/vue-server-renderer/README.md#why-use-bundlerenderer
return createBundleRenderer(bundle, Object.assign(options, {
template,
// for component caching
cache: LRU({
max: 1000,
maxAge: 1000 * 60 * 15
}),
// this is only needed when vue-server-renderer is npm-linked
basedir: resolve('./dist'),
// recommended for performance
runInNewContext: false
}))
}
let renderer
let readyPromise
if (isProd) {
// In production: create server renderer using built server bundle.
// The server bundle is generated by vue-ssr-webpack-plugin.
const bundle = require('./dist/vue-ssr-server-bundle.json')
// The client manifests are optional, but it allows the renderer
// to automatically infer preload/prefetch links and directly add <script>
// tags for any async chunks used during render, avoiding waterfall requests.
const clientManifest = require('./dist/vue-ssr-client-manifest.json')
renderer = createRenderer(bundle, {
clientManifest
})
} else {
// In development: setup the dev server with watch and hot-reload,
// and create a new renderer on bundle / index template update.
readyPromise = require('./build/setup-dev-server')(app, (bundle, options) => {
renderer = createRenderer(bundle, options)
})
}
const serve = (path, cache) => express.static(resolve(path), {
maxAge: cache && isProd ? 1000 * 60 * 60 * 24 * 30 : 0
})
app.use(compression({ threshold: 0 }))
//app.use(favicon('./public/logo-48.png'))
app.use('/dist', serve('./dist', true))
app.use('/public', serve('./public', true))
app.use('/manifest.json', serve('./manifest.json', true))
app.use('/service-worker.js', serve('./dist/service-worker.js'))
// 1-second microcache.
// https://www.nginx.com/blog/benefits-of-microcaching-nginx/
const microCache = LRU({
max: 100,
maxAge: 1000
})
// since this app has no user-specific content, every page is micro-cacheable.
// if your app involves user-specific content, you need to implement custom
// logic to determine whether a request is cacheable based on its url and
// headers.
const isCacheable = req => useMicroCache
function | (req, res) {
const s = Date.now()
res.setHeader("Content-Type", "text/html")
res.setHeader("Server", serverInfo)
const handleError = err => {
if (err.url) {
res.redirect(err.url)
} else if(err.code === 404) {
res.status(404).end('404 | Page Not Found')
} else {
// Render Error Page or Redirect
res.status(500).end('500 | Internal Server Error')
console.error(`error during render : ${req.url}`)
console.error(err.stack)
}
}
const cacheable = isCacheable(req)
if (cacheable) {
const hit = microCache.get(req.url)
if (hit) {
if (!isProd) {
console.log(`cache hit!`)
}
return res.end(hit)
}
}
const context = {
title: '交易虎_手机游戏交易平台_手游交易_帐号交易_游戏币交易_装备交易_道具交易_jiaoyihu', // default title
url: req.url
}
renderer.renderToString(context, (err, html) => {
debugger;
if (err) {
return handleError(err)
}
res.end(html)
if (cacheable) {
microCache.set(req.url, html)
}
if (!isProd) {
console.log(`whole request: ${Date.now() - s}ms`)
}
})
}
app.get('*', isProd ? render : (req, res) => {
readyPromise.then(() => render(req, res))
})
const port = process.env.PORT || 80;
app.listen(port, () => {
console.log(`server started at localhost:${port}`)
})
| render | identifier_name |
setext_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap();
static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap();
}
if lines.len() > 1 && !lines[0].is_empty() {
if HORIZONTAL_RULE_1.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 1), 2));
} else if HORIZONTAL_RULE_2.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 2), 2));
}
}
None
}
#[cfg(test)]
mod test {
use super::parse_setext_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn | () {
assert_eq!(
parse_setext_header(&vec!["Test", "=========="]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["Test", "----------"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 2), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "==="]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "---"]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 2), 2)
);
}
}
| finds_atx_header | identifier_name |
setext_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> |
#[cfg(test)]
mod test {
use super::parse_setext_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_setext_header(&vec!["Test", "=========="]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["Test", "----------"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 2), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "==="]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "---"]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 2), 2)
);
}
}
| {
lazy_static! {
static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap();
static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap();
}
if lines.len() > 1 && !lines[0].is_empty() {
if HORIZONTAL_RULE_1.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 1), 2));
} else if HORIZONTAL_RULE_2.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 2), 2));
}
}
None
} | identifier_body |
setext_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap();
static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap();
}
if lines.len() > 1 && !lines[0].is_empty() {
if HORIZONTAL_RULE_1.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 1), 2));
} else if HORIZONTAL_RULE_2.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 2), 2));
}
}
None
}
#[cfg(test)]
mod test {
use super::parse_setext_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_setext_header(&vec!["Test", "=========="]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["Test", "----------"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 2), 2) |
assert_eq!(
parse_setext_header(&vec!["This is a test", "==="]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "---"]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 2), 2)
);
}
} | ); | random_line_split |
setext_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap();
static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap();
}
if lines.len() > 1 && !lines[0].is_empty() {
if HORIZONTAL_RULE_1.is_match(lines[1]) | else if HORIZONTAL_RULE_2.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 2), 2));
}
}
None
}
#[cfg(test)]
mod test {
use super::parse_setext_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_setext_header(&vec!["Test", "=========="]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["Test", "----------"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 2), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "==="]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "---"]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 2), 2)
);
}
}
| {
return Some((Header(parse_spans(lines[0]), 1), 2));
} | conditional_block |
vec-matching-legal-tail-element-borrow.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() | {
let x = &[1i, 2, 3, 4, 5];
let x: &[int] = &[1, 2, 3, 4, 5];
if !x.is_empty() {
let el = match x {
[1, ..ref tail] => &tail[0],
_ => unreachable!()
};
println!("{}", *el);
}
} | identifier_body |
|
vec-matching-legal-tail-element-borrow.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn | () {
let x = &[1i, 2, 3, 4, 5];
let x: &[int] = &[1, 2, 3, 4, 5];
if !x.is_empty() {
let el = match x {
[1, ..ref tail] => &tail[0],
_ => unreachable!()
};
println!("{}", *el);
}
}
| main | identifier_name |
vec-matching-legal-tail-element-borrow.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let x = &[1i, 2, 3, 4, 5];
let x: &[int] = &[1, 2, 3, 4, 5];
if !x.is_empty() {
let el = match x {
[1, ..ref tail] => &tail[0],
_ => unreachable!()
};
println!("{}", *el); | }
} | random_line_split |
|
app.js | const Koa = require('koa')
const app = new Koa()
const views = require('koa-views')
const json = require('koa-json')
const onerror = require('koa-onerror')
const bodyparser = require('koa-bodyparser')
const logger = require('koa-logger')
const index = require('./routes/index')
const users = require('./routes/users') | // api代理
const proxy = require('koa-proxies')
const httpsProxyAgent = require('https-proxy-agent')
app.use(proxy('/api', {
target: 'http://172.16.8.197:8081',
changeOrigin: true,
logs: true,
rewrite: path => path.replace(/^\/api/g, '')
// agent: new httpsProxyAgent('http://172.16.8.197:8081'),
// rewrite: path => path.replace(/^\/api(\/|\/\w+)?$/, '')
}))
// 链接数据库
const db = require('mysql');
// var connection = db.createConnection({
// host: '172.16.8.191:3306',
// // host: '172.16.8.191:3306/pudong',
// user: 'root',
// password: 'ipudong',
// database: 'ceshi'
// })
var connection = db.createConnection(config.mysql)
connection.connect(function(err) {
if (err) {
console.error('error connecting: ' + err.stack);
return;
}
console.log('connected as id ' + connection.threadId);
});
global.connection = connection;
global.sf = require('./boot.js');
// error handler
onerror(app)
// middlewares
app.use(bodyparser({
enableTypes:['json', 'form', 'text']
}))
app.use(json())
app.use(logger())
// 使用端路由渲染页面
// app.use(require('koa-static')(__dirname + '/public'))
// app.use(views(__dirname + '/views', {
// extension: 'pug'
// }))
// 使用前端路由分离渲染页面
// app.use(require('koa-static')(__dirname + '/dist'))
// app.use(views(__dirname + '/dist'))
// 使用前端路由,前后端,node端做后端,代理访问其他数据员后台api
app.use(require('koa-static')(__dirname + '/fronts'))
app.use(views(__dirname + '/fronts'))
// logger
app.use(async (ctx, next) => {
const start = new Date()
await next()
const ms = new Date() - start
console.log(`${ctx.method} ${ctx.url} - ${ms}ms`)
})
// routes
app.use(index.routes(), index.allowedMethods())
app.use(users.routes(), users.allowedMethods())
module.exports = app |
const config = require('./config.js')
| random_line_split |
app.js | const Koa = require('koa')
const app = new Koa()
const views = require('koa-views')
const json = require('koa-json')
const onerror = require('koa-onerror')
const bodyparser = require('koa-bodyparser')
const logger = require('koa-logger')
const index = require('./routes/index')
const users = require('./routes/users')
const config = require('./config.js')
// api代理
const proxy = require('koa-proxies')
const httpsProxyAgent = require('https-proxy-agent')
app.use(proxy('/api', {
target: 'http://172.16.8.197:8081',
changeOrigin: true,
logs: true,
rewrite: path => path.replace(/^\/api/g, '')
// agent: new httpsProxyAgent('http://172.16.8.197:8081'),
// rewrite: path => path.replace(/^\/api(\/|\/\w+)?$/, '')
}))
// 链接数据库
const db = require('mysql');
// var connection = db.createConnection({
// host: '172.16.8.191:3306',
// // host: '172.16.8.191:3306/pudong',
// user: 'root',
// password: 'ipudong',
// database: 'ceshi'
// })
var connection = db.createConnection(config.mysql)
connection.connect(function(err) {
if (err) {
console. | ('connected as id ' + connection.threadId);
});
global.connection = connection;
global.sf = require('./boot.js');
// error handler
onerror(app)
// middlewares
app.use(bodyparser({
enableTypes:['json', 'form', 'text']
}))
app.use(json())
app.use(logger())
// 使用端路由渲染页面
// app.use(require('koa-static')(__dirname + '/public'))
// app.use(views(__dirname + '/views', {
// extension: 'pug'
// }))
// 使用前端路由分离渲染页面
// app.use(require('koa-static')(__dirname + '/dist'))
// app.use(views(__dirname + '/dist'))
// 使用前端路由,前后端,node端做后端,代理访问其他数据员后台api
app.use(require('koa-static')(__dirname + '/fronts'))
app.use(views(__dirname + '/fronts'))
// logger
app.use(async (ctx, next) => {
const start = new Date()
await next()
const ms = new Date() - start
console.log(`${ctx.method} ${ctx.url} - ${ms}ms`)
})
// routes
app.use(index.routes(), index.allowedMethods())
app.use(users.routes(), users.allowedMethods())
module.exports = app
| error('error connecting: ' + err.stack);
return;
}
console.log | conditional_block |
test_lib_trees.py |
from synapse.tests.common import *
import synapse.lib.trees as s_trees
class TreeTest(SynTest):
def test_lib_tree_interval(self):
| names = [ival[1].get('name') for ival in itree.get(80)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on max
names = [ival[1].get('name') for ival in itree.get(100)]
self.eq(names, ['bar', 'baz'])
self.eq(itree.get(-31), [])
self.eq(itree.get(101), [])
self.eq(itree.get(0xffffffff), [])
| ivals = (
((-30, 50), {'name': 'foo'}),
((30, 100), {'name': 'bar'}),
((80, 100), {'name': 'baz'}),
)
itree = s_trees.IntervalTree(ivals)
#import pprint
#pprint.pprint(itree.root)
# test a multi-level overlap
names = [ival[1].get('name') for ival in itree.get(32)]
self.eq(names, ['foo', 'bar'])
# 90 ends up as a center in the tree...
names = [ival[1].get('name') for ival in itree.get(90)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on min | identifier_body |
test_lib_trees.py |
from synapse.tests.common import *
import synapse.lib.trees as s_trees
class TreeTest(SynTest):
def | (self):
ivals = (
((-30, 50), {'name': 'foo'}),
((30, 100), {'name': 'bar'}),
((80, 100), {'name': 'baz'}),
)
itree = s_trees.IntervalTree(ivals)
#import pprint
#pprint.pprint(itree.root)
# test a multi-level overlap
names = [ival[1].get('name') for ival in itree.get(32)]
self.eq(names, ['foo', 'bar'])
# 90 ends up as a center in the tree...
names = [ival[1].get('name') for ival in itree.get(90)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on min
names = [ival[1].get('name') for ival in itree.get(80)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on max
names = [ival[1].get('name') for ival in itree.get(100)]
self.eq(names, ['bar', 'baz'])
self.eq(itree.get(-31), [])
self.eq(itree.get(101), [])
self.eq(itree.get(0xffffffff), [])
| test_lib_tree_interval | identifier_name |
test_lib_trees.py | from synapse.tests.common import *
import synapse.lib.trees as s_trees
class TreeTest(SynTest):
def test_lib_tree_interval(self):
ivals = (
((-30, 50), {'name': 'foo'}),
((30, 100), {'name': 'bar'}),
((80, 100), {'name': 'baz'}),
)
itree = s_trees.IntervalTree(ivals)
#import pprint
#pprint.pprint(itree.root)
# test a multi-level overlap
names = [ival[1].get('name') for ival in itree.get(32)]
self.eq(names, ['foo', 'bar'])
# 90 ends up as a center in the tree...
names = [ival[1].get('name') for ival in itree.get(90)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on min
names = [ival[1].get('name') for ival in itree.get(80)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on max
names = [ival[1].get('name') for ival in itree.get(100)]
self.eq(names, ['bar', 'baz'])
self.eq(itree.get(-31), []) | self.eq(itree.get(101), [])
self.eq(itree.get(0xffffffff), []) | random_line_split |
|
hangouts.py | """
Hangouts notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.hangouts/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (ATTR_TARGET, PLATFORM_SCHEMA,
BaseNotificationService,
ATTR_MESSAGE, ATTR_DATA)
from homeassistant.components.hangouts.const \
import (DOMAIN, SERVICE_SEND_MESSAGE, TARGETS_SCHEMA,
CONF_DEFAULT_CONVERSATIONS)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [DOMAIN]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEFAULT_CONVERSATIONS): [TARGETS_SCHEMA]
})
def get_service(hass, config, discovery_info=None):
"""Get the Hangouts notification service."""
return HangoutsNotificationService(config.get(CONF_DEFAULT_CONVERSATIONS))
class HangoutsNotificationService(BaseNotificationService):
"""Send Notifications to Hangouts conversations."""
def __init__(self, default_conversations):
"""Set up the notification service."""
self._default_conversations = default_conversations
def send_message(self, message="", **kwargs):
"""Send the message to the Google Hangouts server."""
target_conversations = None
if ATTR_TARGET in kwargs:
target_conversations = [] |
messages = []
if 'title' in kwargs:
messages.append({'text': kwargs['title'], 'is_bold': True})
messages.append({'text': message, 'parse_str': True})
service_data = {
ATTR_TARGET: target_conversations,
ATTR_MESSAGE: messages,
}
if kwargs[ATTR_DATA]:
service_data[ATTR_DATA] = kwargs[ATTR_DATA]
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data) | for target in kwargs.get(ATTR_TARGET):
target_conversations.append({'id': target})
else:
target_conversations = self._default_conversations | random_line_split |
hangouts.py | """
Hangouts notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.hangouts/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (ATTR_TARGET, PLATFORM_SCHEMA,
BaseNotificationService,
ATTR_MESSAGE, ATTR_DATA)
from homeassistant.components.hangouts.const \
import (DOMAIN, SERVICE_SEND_MESSAGE, TARGETS_SCHEMA,
CONF_DEFAULT_CONVERSATIONS)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [DOMAIN]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEFAULT_CONVERSATIONS): [TARGETS_SCHEMA]
})
def | (hass, config, discovery_info=None):
"""Get the Hangouts notification service."""
return HangoutsNotificationService(config.get(CONF_DEFAULT_CONVERSATIONS))
class HangoutsNotificationService(BaseNotificationService):
"""Send Notifications to Hangouts conversations."""
def __init__(self, default_conversations):
"""Set up the notification service."""
self._default_conversations = default_conversations
def send_message(self, message="", **kwargs):
"""Send the message to the Google Hangouts server."""
target_conversations = None
if ATTR_TARGET in kwargs:
target_conversations = []
for target in kwargs.get(ATTR_TARGET):
target_conversations.append({'id': target})
else:
target_conversations = self._default_conversations
messages = []
if 'title' in kwargs:
messages.append({'text': kwargs['title'], 'is_bold': True})
messages.append({'text': message, 'parse_str': True})
service_data = {
ATTR_TARGET: target_conversations,
ATTR_MESSAGE: messages,
}
if kwargs[ATTR_DATA]:
service_data[ATTR_DATA] = kwargs[ATTR_DATA]
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data)
| get_service | identifier_name |
hangouts.py | """
Hangouts notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.hangouts/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (ATTR_TARGET, PLATFORM_SCHEMA,
BaseNotificationService,
ATTR_MESSAGE, ATTR_DATA)
from homeassistant.components.hangouts.const \
import (DOMAIN, SERVICE_SEND_MESSAGE, TARGETS_SCHEMA,
CONF_DEFAULT_CONVERSATIONS)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [DOMAIN]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEFAULT_CONVERSATIONS): [TARGETS_SCHEMA]
})
def get_service(hass, config, discovery_info=None):
"""Get the Hangouts notification service."""
return HangoutsNotificationService(config.get(CONF_DEFAULT_CONVERSATIONS))
class HangoutsNotificationService(BaseNotificationService):
"""Send Notifications to Hangouts conversations."""
def __init__(self, default_conversations):
"""Set up the notification service."""
self._default_conversations = default_conversations
def send_message(self, message="", **kwargs):
|
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data)
| """Send the message to the Google Hangouts server."""
target_conversations = None
if ATTR_TARGET in kwargs:
target_conversations = []
for target in kwargs.get(ATTR_TARGET):
target_conversations.append({'id': target})
else:
target_conversations = self._default_conversations
messages = []
if 'title' in kwargs:
messages.append({'text': kwargs['title'], 'is_bold': True})
messages.append({'text': message, 'parse_str': True})
service_data = {
ATTR_TARGET: target_conversations,
ATTR_MESSAGE: messages,
}
if kwargs[ATTR_DATA]:
service_data[ATTR_DATA] = kwargs[ATTR_DATA] | identifier_body |
hangouts.py | """
Hangouts notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.hangouts/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (ATTR_TARGET, PLATFORM_SCHEMA,
BaseNotificationService,
ATTR_MESSAGE, ATTR_DATA)
from homeassistant.components.hangouts.const \
import (DOMAIN, SERVICE_SEND_MESSAGE, TARGETS_SCHEMA,
CONF_DEFAULT_CONVERSATIONS)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [DOMAIN]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEFAULT_CONVERSATIONS): [TARGETS_SCHEMA]
})
def get_service(hass, config, discovery_info=None):
"""Get the Hangouts notification service."""
return HangoutsNotificationService(config.get(CONF_DEFAULT_CONVERSATIONS))
class HangoutsNotificationService(BaseNotificationService):
"""Send Notifications to Hangouts conversations."""
def __init__(self, default_conversations):
"""Set up the notification service."""
self._default_conversations = default_conversations
def send_message(self, message="", **kwargs):
"""Send the message to the Google Hangouts server."""
target_conversations = None
if ATTR_TARGET in kwargs:
target_conversations = []
for target in kwargs.get(ATTR_TARGET):
|
else:
target_conversations = self._default_conversations
messages = []
if 'title' in kwargs:
messages.append({'text': kwargs['title'], 'is_bold': True})
messages.append({'text': message, 'parse_str': True})
service_data = {
ATTR_TARGET: target_conversations,
ATTR_MESSAGE: messages,
}
if kwargs[ATTR_DATA]:
service_data[ATTR_DATA] = kwargs[ATTR_DATA]
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data)
| target_conversations.append({'id': target}) | conditional_block |
list_companies.py | #!/usr/bin/env python
# Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from googleapiclient.discovery import build
from googleapiclient.errors import Error
client_service = build('jobs', 'v3')
project_id = 'projects/' + os.environ['GOOGLE_CLOUD_PROJECT']
try:
# Execute a query to list all companies. Only query parameter in this case
# is the parent project.
response = client_service.projects().companies().list(
parent=project_id).execute()
if response.get('companies') is not None:
print('Companies:')
for company in response.get('companies'):
print('%s: %s, %s' % (company.get('displayName'),
company.get('name'), company.get('size')))
else:
|
except Error as e:
print('Got exception while listing companies')
raise e
| print('No companies') | conditional_block |
list_companies.py | #!/usr/bin/env python
# Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from googleapiclient.discovery import build
from googleapiclient.errors import Error
client_service = build('jobs', 'v3')
project_id = 'projects/' + os.environ['GOOGLE_CLOUD_PROJECT']
try:
# Execute a query to list all companies. Only query parameter in this case
# is the parent project.
response = client_service.projects().companies().list(
parent=project_id).execute()
if response.get('companies') is not None:
print('Companies:')
for company in response.get('companies'):
print('%s: %s, %s' % (company.get('displayName'),
company.get('name'), company.get('size')))
else:
print('No companies')
except Error as e:
print('Got exception while listing companies')
raise e | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
health-check.ts | /*
* This file is part of ndb-core.
*
* ndb-core is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ndb-core is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ndb-core. If not, see <http://www.gnu.org/licenses/>.
*/
import { Entity } from "../../../core/entity/model/entity";
import { DatabaseEntity } from "../../../core/entity/database-entity.decorator";
import { DatabaseField } from "../../../core/entity/database-field.decorator";
import { WarningLevel } from "../../../core/entity/model/warning-level";
/**
* Model Class for the Health Checks that are taken for a Child.
* It stores the Child's ID in a String and both, the height and weight in cm as a number, and the Date
*/
@DatabaseEntity("HealthCheck")
export class HealthCheck extends Entity {
@DatabaseField() child: string;
@DatabaseField({ label: $localize`:Label for date of a health check:Date` })
date: Date;
/** height measurement in cm **/
@DatabaseField({
label: $localize`:Label for height in cm of a health check:Height [cm]`,
viewComponent: "DisplayUnit",
additional: "cm",
})
height: number;
/** weight measurement in kg **/
@DatabaseField({
label: $localize`:Label for weight in kg of a health check:Weight [kg]`,
viewComponent: "DisplayUnit",
additional: "kg",
})
weight: number;
get bmi(): number {
return this.weight / ((this.height / 100) * (this.height / 100)); | } else if (this.bmi >= 18 && this.bmi <= 25) {
return WarningLevel.OK;
} else {
return WarningLevel.WARNING;
}
}
} | }
getWarningLevel(): WarningLevel {
if (this.bmi <= 16 || this.bmi >= 30) {
return WarningLevel.URGENT; | random_line_split |
health-check.ts | /*
* This file is part of ndb-core.
*
* ndb-core is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ndb-core is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ndb-core. If not, see <http://www.gnu.org/licenses/>.
*/
import { Entity } from "../../../core/entity/model/entity";
import { DatabaseEntity } from "../../../core/entity/database-entity.decorator";
import { DatabaseField } from "../../../core/entity/database-field.decorator";
import { WarningLevel } from "../../../core/entity/model/warning-level";
/**
* Model Class for the Health Checks that are taken for a Child.
* It stores the Child's ID in a String and both, the height and weight in cm as a number, and the Date
*/
@DatabaseEntity("HealthCheck")
export class HealthCheck extends Entity {
@DatabaseField() child: string;
@DatabaseField({ label: $localize`:Label for date of a health check:Date` })
date: Date;
/** height measurement in cm **/
@DatabaseField({
label: $localize`:Label for height in cm of a health check:Height [cm]`,
viewComponent: "DisplayUnit",
additional: "cm",
})
height: number;
/** weight measurement in kg **/
@DatabaseField({
label: $localize`:Label for weight in kg of a health check:Weight [kg]`,
viewComponent: "DisplayUnit",
additional: "kg",
})
weight: number;
get bmi(): number {
return this.weight / ((this.height / 100) * (this.height / 100));
}
| (): WarningLevel {
if (this.bmi <= 16 || this.bmi >= 30) {
return WarningLevel.URGENT;
} else if (this.bmi >= 18 && this.bmi <= 25) {
return WarningLevel.OK;
} else {
return WarningLevel.WARNING;
}
}
}
| getWarningLevel | identifier_name |
health-check.ts | /*
* This file is part of ndb-core.
*
* ndb-core is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ndb-core is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ndb-core. If not, see <http://www.gnu.org/licenses/>.
*/
import { Entity } from "../../../core/entity/model/entity";
import { DatabaseEntity } from "../../../core/entity/database-entity.decorator";
import { DatabaseField } from "../../../core/entity/database-field.decorator";
import { WarningLevel } from "../../../core/entity/model/warning-level";
/**
* Model Class for the Health Checks that are taken for a Child.
* It stores the Child's ID in a String and both, the height and weight in cm as a number, and the Date
*/
@DatabaseEntity("HealthCheck")
export class HealthCheck extends Entity {
@DatabaseField() child: string;
@DatabaseField({ label: $localize`:Label for date of a health check:Date` })
date: Date;
/** height measurement in cm **/
@DatabaseField({
label: $localize`:Label for height in cm of a health check:Height [cm]`,
viewComponent: "DisplayUnit",
additional: "cm",
})
height: number;
/** weight measurement in kg **/
@DatabaseField({
label: $localize`:Label for weight in kg of a health check:Weight [kg]`,
viewComponent: "DisplayUnit",
additional: "kg",
})
weight: number;
get bmi(): number {
return this.weight / ((this.height / 100) * (this.height / 100));
}
getWarningLevel(): WarningLevel {
if (this.bmi <= 16 || this.bmi >= 30) {
return WarningLevel.URGENT;
} else if (this.bmi >= 18 && this.bmi <= 25) | else {
return WarningLevel.WARNING;
}
}
}
| {
return WarningLevel.OK;
} | conditional_block |
health-check.ts | /*
* This file is part of ndb-core.
*
* ndb-core is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ndb-core is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ndb-core. If not, see <http://www.gnu.org/licenses/>.
*/
import { Entity } from "../../../core/entity/model/entity";
import { DatabaseEntity } from "../../../core/entity/database-entity.decorator";
import { DatabaseField } from "../../../core/entity/database-field.decorator";
import { WarningLevel } from "../../../core/entity/model/warning-level";
/**
* Model Class for the Health Checks that are taken for a Child.
* It stores the Child's ID in a String and both, the height and weight in cm as a number, and the Date
*/
@DatabaseEntity("HealthCheck")
export class HealthCheck extends Entity {
@DatabaseField() child: string;
@DatabaseField({ label: $localize`:Label for date of a health check:Date` })
date: Date;
/** height measurement in cm **/
@DatabaseField({
label: $localize`:Label for height in cm of a health check:Height [cm]`,
viewComponent: "DisplayUnit",
additional: "cm",
})
height: number;
/** weight measurement in kg **/
@DatabaseField({
label: $localize`:Label for weight in kg of a health check:Weight [kg]`,
viewComponent: "DisplayUnit",
additional: "kg",
})
weight: number;
get bmi(): number |
getWarningLevel(): WarningLevel {
if (this.bmi <= 16 || this.bmi >= 30) {
return WarningLevel.URGENT;
} else if (this.bmi >= 18 && this.bmi <= 25) {
return WarningLevel.OK;
} else {
return WarningLevel.WARNING;
}
}
}
| {
return this.weight / ((this.height / 100) * (this.height / 100));
} | identifier_body |
graph.py | """
Unit tests for nyx.panel.graph.
"""
import datetime
import unittest
import stem.control
import nyx.curses
import nyx.panel.graph
import test
from test import require_curses
from mock import patch
EXPECTED_BLANK_GRAPH = """
Download:
0 b
0 b
5s 10 15
""".rstrip()
EXPECTED_ACCOUNTING = """
Accounting (awake) Time to reset: 01:02
37.7 Kb / 842.0 Kb 16.0 Kb / 74.1 Kb
""".strip()
EXPECTED_GRAPH = """
Download:
5 Kb *
*
2 Kb ** *
* ****
0 b *********
5s 10 15
""".rstrip()
class TestGraphPanel(unittest.TestCase):
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80))
def test_y_axis_labels(self):
data = nyx.panel.graph.ConnectionStats()
# check with both even and odd height since that determines an offset in the middle
self.assertEqual({2: '10', 4: '7', 6: '5', 9: '2', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 10))
self.assertEqual({2: '10', 4: '6', 6: '3', 8: '0'}, nyx.panel.graph._y_axis_labels(9, data.primary, 0, 10))
# check where the min and max are the same
self.assertEqual({2: '0', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 0))
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content)
| tor_controller_mock().is_alive.return_value = False
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, None)
self.assertEqual('Accounting: Connection Closed...', rendered.content) | @require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats_disconnected(self, tor_controller_mock): | random_line_split |
graph.py | """
Unit tests for nyx.panel.graph.
"""
import datetime
import unittest
import stem.control
import nyx.curses
import nyx.panel.graph
import test
from test import require_curses
from mock import patch
EXPECTED_BLANK_GRAPH = """
Download:
0 b
0 b
5s 10 15
""".rstrip()
EXPECTED_ACCOUNTING = """
Accounting (awake) Time to reset: 01:02
37.7 Kb / 842.0 Kb 16.0 Kb / 74.1 Kb
""".strip()
EXPECTED_GRAPH = """
Download:
5 Kb *
*
2 Kb ** *
* ****
0 b *********
5s 10 15
""".rstrip()
class TestGraphPanel(unittest.TestCase):
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80))
def test_y_axis_labels(self):
data = nyx.panel.graph.ConnectionStats()
# check with both even and odd height since that determines an offset in the middle
self.assertEqual({2: '10', 4: '7', 6: '5', 9: '2', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 10))
self.assertEqual({2: '10', 4: '6', 6: '3', 8: '0'}, nyx.panel.graph._y_axis_labels(9, data.primary, 0, 10))
# check where the min and max are the same
self.assertEqual({2: '0', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 0))
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph(self, tor_controller_mock):
|
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats_disconnected(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = False
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, None)
self.assertEqual('Accounting: Connection Closed...', rendered.content)
| tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content) | identifier_body |
graph.py | """
Unit tests for nyx.panel.graph.
"""
import datetime
import unittest
import stem.control
import nyx.curses
import nyx.panel.graph
import test
from test import require_curses
from mock import patch
EXPECTED_BLANK_GRAPH = """
Download:
0 b
0 b
5s 10 15
""".rstrip()
EXPECTED_ACCOUNTING = """
Accounting (awake) Time to reset: 01:02
37.7 Kb / 842.0 Kb 16.0 Kb / 74.1 Kb
""".strip()
EXPECTED_GRAPH = """
Download:
5 Kb *
*
2 Kb ** *
* ****
0 b *********
5s 10 15
""".rstrip()
class TestGraphPanel(unittest.TestCase):
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
|
def test_y_axis_labels(self):
data = nyx.panel.graph.ConnectionStats()
# check with both even and odd height since that determines an offset in the middle
self.assertEqual({2: '10', 4: '7', 6: '5', 9: '2', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 10))
self.assertEqual({2: '10', 4: '6', 6: '3', 8: '0'}, nyx.panel.graph._y_axis_labels(9, data.primary, 0, 10))
# check where the min and max are the same
self.assertEqual({2: '0', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 0))
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats_disconnected(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = False
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, None)
self.assertEqual('Accounting: Connection Closed...', rendered.content)
| self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80)) | conditional_block |
graph.py | """
Unit tests for nyx.panel.graph.
"""
import datetime
import unittest
import stem.control
import nyx.curses
import nyx.panel.graph
import test
from test import require_curses
from mock import patch
EXPECTED_BLANK_GRAPH = """
Download:
0 b
0 b
5s 10 15
""".rstrip()
EXPECTED_ACCOUNTING = """
Accounting (awake) Time to reset: 01:02
37.7 Kb / 842.0 Kb 16.0 Kb / 74.1 Kb
""".strip()
EXPECTED_GRAPH = """
Download:
5 Kb *
*
2 Kb ** *
* ****
0 b *********
5s 10 15
""".rstrip()
class | (unittest.TestCase):
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80))
def test_y_axis_labels(self):
data = nyx.panel.graph.ConnectionStats()
# check with both even and odd height since that determines an offset in the middle
self.assertEqual({2: '10', 4: '7', 6: '5', 9: '2', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 10))
self.assertEqual({2: '10', 4: '6', 6: '3', 8: '0'}, nyx.panel.graph._y_axis_labels(9, data.primary, 0, 10))
# check where the min and max are the same
self.assertEqual({2: '0', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 0))
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats_disconnected(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = False
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, None)
self.assertEqual('Accounting: Connection Closed...', rendered.content)
| TestGraphPanel | identifier_name |
pageContainer.tsx | import * as React from 'react';
import * as toastr from 'toastr';
import {hashHistory} from 'react-router';
import {routeConstants} from '../../common/constants/routeConstants';
import {loginAPI} from '../../rest-api/login/loginAPI';
import {LoginCredentials} from '../../models/loginCredentials';
import {UserProfile} from '../../models/userProfile';
import {LoginPage} from './page';
interface State {
loginCredentials: LoginCredentials;
}
export class LoginPageContainer extends React.Component <{}, State> {
constructor() {
super();
this.state = { | // Other way to assign new object to loginCredentials to avoid mutation is:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign
/*
var newLoginCredentiasl = Object.assign({}, this.state.loginCredentials, {
[fieldName]: value,
});
*/
// We are use a JavaScript proposal named object spread operator
// https://github.com/sebmarkbage/ecmascript-rest-spread
// http://stackoverflow.com/questions/32925460/spread-operator-vs-object-assign
private updateLoginInfo(fieldName: string, value: string) {
this.setState({
loginCredentials: {
...this.state.loginCredentials,
[fieldName]: value,
}
});
}
private loginRequest(loginCredentials: LoginCredentials) {
toastr.remove();
loginAPI.login(loginCredentials)
.then((userProfile: UserProfile) => {
toastr.success(`Success login ${userProfile.fullname}`);
hashHistory.push(routeConstants.training.list);
})
.catch((error) => {
toastr.error(error);
});
}
public render() {
return (
<LoginPage
loginCredentials={this.state.loginCredentials}
updateLoginInfo={this.updateLoginInfo.bind(this)}
loginRequest={this.loginRequest.bind(this)}
/>
);
}
} | loginCredentials: new LoginCredentials(),
};
}
| random_line_split |
pageContainer.tsx | import * as React from 'react';
import * as toastr from 'toastr';
import {hashHistory} from 'react-router';
import {routeConstants} from '../../common/constants/routeConstants';
import {loginAPI} from '../../rest-api/login/loginAPI';
import {LoginCredentials} from '../../models/loginCredentials';
import {UserProfile} from '../../models/userProfile';
import {LoginPage} from './page';
interface State {
loginCredentials: LoginCredentials;
}
export class LoginPageContainer extends React.Component <{}, State> {
constructor() {
super();
this.state = {
loginCredentials: new LoginCredentials(),
};
}
// Other way to assign new object to loginCredentials to avoid mutation is:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign
/*
var newLoginCredentiasl = Object.assign({}, this.state.loginCredentials, {
[fieldName]: value,
});
*/
// We are use a JavaScript proposal named object spread operator
// https://github.com/sebmarkbage/ecmascript-rest-spread
// http://stackoverflow.com/questions/32925460/spread-operator-vs-object-assign
private updateLoginInfo(fieldName: string, value: string) {
this.setState({
loginCredentials: {
...this.state.loginCredentials,
[fieldName]: value,
}
});
}
private loginRequest(loginCredentials: LoginCredentials) {
toastr.remove();
loginAPI.login(loginCredentials)
.then((userProfile: UserProfile) => {
toastr.success(`Success login ${userProfile.fullname}`);
hashHistory.push(routeConstants.training.list);
})
.catch((error) => {
toastr.error(error);
});
}
public | () {
return (
<LoginPage
loginCredentials={this.state.loginCredentials}
updateLoginInfo={this.updateLoginInfo.bind(this)}
loginRequest={this.loginRequest.bind(this)}
/>
);
}
}
| render | identifier_name |
pageContainer.tsx | import * as React from 'react';
import * as toastr from 'toastr';
import {hashHistory} from 'react-router';
import {routeConstants} from '../../common/constants/routeConstants';
import {loginAPI} from '../../rest-api/login/loginAPI';
import {LoginCredentials} from '../../models/loginCredentials';
import {UserProfile} from '../../models/userProfile';
import {LoginPage} from './page';
interface State {
loginCredentials: LoginCredentials;
}
export class LoginPageContainer extends React.Component <{}, State> {
constructor() |
// Other way to assign new object to loginCredentials to avoid mutation is:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign
/*
var newLoginCredentiasl = Object.assign({}, this.state.loginCredentials, {
[fieldName]: value,
});
*/
// We are use a JavaScript proposal named object spread operator
// https://github.com/sebmarkbage/ecmascript-rest-spread
// http://stackoverflow.com/questions/32925460/spread-operator-vs-object-assign
private updateLoginInfo(fieldName: string, value: string) {
this.setState({
loginCredentials: {
...this.state.loginCredentials,
[fieldName]: value,
}
});
}
private loginRequest(loginCredentials: LoginCredentials) {
toastr.remove();
loginAPI.login(loginCredentials)
.then((userProfile: UserProfile) => {
toastr.success(`Success login ${userProfile.fullname}`);
hashHistory.push(routeConstants.training.list);
})
.catch((error) => {
toastr.error(error);
});
}
public render() {
return (
<LoginPage
loginCredentials={this.state.loginCredentials}
updateLoginInfo={this.updateLoginInfo.bind(this)}
loginRequest={this.loginRequest.bind(this)}
/>
);
}
}
| {
super();
this.state = {
loginCredentials: new LoginCredentials(),
};
} | identifier_body |
config.py | #! /usr/bin/env python
# coding:utf-8
# レコードファイルがあるディレクトリ
# デフォルトではこのスクリプトファイルが存在する
# ディレクトリにある zones/ 以下に配置する
zone_dir = "testzones"
# 生成した HTML をおくディレクトリ
html_dir = "build"
# レコードに関する情報をおいているディレクトリ
record_info_dir = "testzones" | a_record_filenames = [
"example.jp.zone",
]
# PTR レコードのゾーンファイル名そのネットワーク
ptr_record_filename_networks = [
('192.168.0.rev', '192.168.0.0/24'),
]
# レコード情報を納めたファイル
record_info_filenames = [
'192.168.0.info',
] |
# A レコードのゾーンファイル名 | random_line_split |
config-test.ts | /* tslint:disable:no-sync-functions */
import { expect } from 'chai'
import { Repository } from '../../../src/models/repository'
import {
getConfigValue,
getGlobalConfigPath,
getGlobalConfigValue,
setGlobalConfigValue,
} from '../../../src/lib/git'
import { GitProcess } from 'dugite'
import { setupFixtureRepository } from '../../fixture-helper'
describe('git/config', () => {
let repository: Repository | null = null
beforeEach(() => {
const testRepoPath = setupFixtureRepository('test-repo')
repository = new Repository(testRepoPath, -1, null, false)
})
describe('config', () => {
it('looks up config values', async () => {
const bare = await getConfigValue(repository!, 'core.bare')
expect(bare).to.equal('false')
})
it('returns null for undefined values', async () => {
const value = await getConfigValue(
repository!,
'core.the-meaning-of-life'
)
expect(value).to.equal(null)
})
})
describe('getGlobalConfigPath', () => {
it('gets the config path', async () => {
const path = await getGlobalConfigPath()
expect(path).not.to.equal(null)
expect(path!.length).to.be.greaterThan(0)
})
})
describe('setGlobalConfigValue', () => {
const key = 'foo.bar'
beforeEach(async () => {
await GitProcess.exec(
['config', '--add', '--global', key, 'first'],
__dirname
)
await GitProcess.exec(
['config', '--add', '--global', key, 'second'],
__dirname
)
})
it('will replace all entries for a global value', async () => {
await setGlobalConfigValue(key, 'the correct value')
const value = await getGlobalConfigValue(key)
expect(value).to.equal('the correct value')
})
afterEach(async () => {
await GitProcess.exec(
['config', '--unset-all', '--global', key],
__dirname | })
}) | )
}) | random_line_split |
metrix++.py | #
# Metrix++, Copyright 2009-2013, Metrix++ Project
# Link: http://metrixplusplus.sourceforge.net
#
# This file is a part of Metrix++ Tool.
#
# Metrix++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Metrix++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
#
if __name__ == '__main__':
| metrixpp.start() | import metrixpp
| random_line_split |
metrix++.py | #
# Metrix++, Copyright 2009-2013, Metrix++ Project
# Link: http://metrixplusplus.sourceforge.net
#
# This file is a part of Metrix++ Tool.
#
# Metrix++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Metrix++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
#
if __name__ == '__main__':
| import metrixpp
metrixpp.start() | conditional_block |
|
test_geocoding.py | # -*- coding: utf-8 -*-
import pytest
from gmaps import errors
from gmaps import Geocoding
from .testutils import retry
geocoding = Geocoding(sensor=False)
@retry
def test_geocode():
results = geocoding.geocode(u"Wrocław, Hubska")
assert results
assert len(results) > 0
@retry
def test_geocode_override_sensor():
results = geocoding.geocode(u"Wrocław, Hubska", sensor=True)
assert results
assert len(results) > 0
@retry
def test_geocode_components_filters():
"""Test if querying with same route but different component filtering
returns different locations"""
# both these cities has this street
results1 = geocoding.geocode(u"Łubinowa",
components={"locality": "Wrocław"})
results2 = geocoding.geocode(u"Łubinowa",
components={"locality": "Warszawa"})
assert results1[0]['geometry']['location'] != results2[0]['geometry'][
'location']
@retry
def test_geocode_components_without_address():
"""Test if querying explicitely set components returns same location like
with string address"""
components = {"route": "Łubinowa", "locality": "Wrocław"}
address = ",".join(components.values())
|
@retry
def test_geocode_no_results_exception():
components = {"administrative_area": "TZ", "country": "FR"}
with pytest.raises(errors.NoResults):
geocoding.geocode(components)
@retry
def test_geocode_language():
results = geocoding.geocode(u"Wrocław, Hubska", language='pl')
assert 'Polska' in results[0]['formatted_address']
@retry
def test_geocode_region():
results = geocoding.geocode("Toledo", region="us")
assert 'USA' in results[0]['formatted_address']
results = geocoding.geocode("Toledo", region="es")
assert 'Spain' in results[0]['formatted_address']
@retry
def test_geocode_bounds():
results1 = geocoding.geocode("Winnetka", bounds=(
(42.1282269, -87.71095989999999), (42.0886089, -87.7708363)))
results2 = geocoding.geocode("Winnetka", bounds=(
(34.172684, -118.604794), (34.236144, -118.500938)))
assert results1[0]['formatted_address'] != results2[0]['formatted_address']
@retry
def test_reverse():
results = geocoding.reverse(lat=51.213, lon=21.213)
assert results
assert len(results) > 0
assert results[0]['formatted_address']
@retry
def test_reverse_override_sensor():
results = geocoding.reverse(lat=51.213, lon=21.213, sensor=True)
assert results
assert len(results) > 0
@retry
def test_reverse_language():
results = geocoding.reverse(lat=51.213, lon=21.213, language='pl')
assert results
# given lat lon are position somwhere in poland so test if there is
# 'Polska' in formatted_address of first result
assert 'Polska' in results[0]['formatted_address']
@pytest.mark.xfail
@retry
def test_exception_when_sensor_bad():
with pytest.raises(errors.GmapException):
geocoding.reverse(lat=51.213, lon=21.213, sensor="foo")
with pytest.raises(errors.GmapException):
geocoding.geocode(u"Wrocław, Hubska", sensor="foo") | results_with_address = geocoding.geocode(components=components)
results_without_address = geocoding.geocode(address)
assert results_with_address[0]['geometry']['location'] == \
results_without_address[0]['geometry']['location']
| random_line_split |
test_geocoding.py | # -*- coding: utf-8 -*-
import pytest
from gmaps import errors
from gmaps import Geocoding
from .testutils import retry
geocoding = Geocoding(sensor=False)
@retry
def test_geocode():
results = geocoding.geocode(u"Wrocław, Hubska")
assert results
assert len(results) > 0
@retry
def test_geocode_override_sensor():
results = geocoding.geocode(u"Wrocław, Hubska", sensor=True)
assert results
assert len(results) > 0
@retry
def test_geocode_components_filters():
"""Test if querying with same route but different component filtering
returns different locations"""
# both these cities has this street
results1 = geocoding.geocode(u"Łubinowa",
components={"locality": "Wrocław"})
results2 = geocoding.geocode(u"Łubinowa",
components={"locality": "Warszawa"})
assert results1[0]['geometry']['location'] != results2[0]['geometry'][
'location']
@retry
def test_geocode_components_without_address():
"""Test if querying explicitely set components returns same location like
with string address"""
components = {"route": "Łubinowa", "locality": "Wrocław"}
address = ",".join(components.values())
results_with_address = geocoding.geocode(components=components)
results_without_address = geocoding.geocode(address)
assert results_with_address[0]['geometry']['location'] == \
results_without_address[0]['geometry']['location']
@retry
def test_geocode_no_results_exception():
components = {"administrative_area": "TZ", "country": "FR"}
with pytest.raises(errors.NoResults):
geocoding.geocode(components)
@retry
def test_geocode_language():
results = geocoding.geocode(u"Wrocław, Hubska", language='pl')
assert 'Polska' in results[0]['formatted_address']
@retry
def test_geocode_region():
results = geocoding.geocode("Toledo", region="us")
assert 'USA' in results[0]['formatted_address']
results = geocoding.geocode("Toledo", region="es")
assert 'Spain' in results[0]['formatted_address']
@retry
def test_geocode_bounds():
results1 = geocoding.geocode("Winnetka", bounds=(
(42.1282269, -87.71095989999999), (42.0886089, -87.7708363)))
results2 = geocoding.geocode("Winnetka", bounds=(
(34.172684, -118.604794), (34.236144, -118.500938)))
assert results1[0]['formatted_address'] != results2[0]['formatted_address']
@retry
def test_reverse():
results = geocoding.reverse(lat=51.213, lon=21.213)
assert results
assert len(results) > 0
assert results[0]['formatted_address']
@retry
def test_rev | results = geocoding.reverse(lat=51.213, lon=21.213, sensor=True)
assert results
assert len(results) > 0
@retry
def test_reverse_language():
results = geocoding.reverse(lat=51.213, lon=21.213, language='pl')
assert results
# given lat lon are position somwhere in poland so test if there is
# 'Polska' in formatted_address of first result
assert 'Polska' in results[0]['formatted_address']
@pytest.mark.xfail
@retry
def test_exception_when_sensor_bad():
with pytest.raises(errors.GmapException):
geocoding.reverse(lat=51.213, lon=21.213, sensor="foo")
with pytest.raises(errors.GmapException):
geocoding.geocode(u"Wrocław, Hubska", sensor="foo")
| erse_override_sensor():
| identifier_name |
test_geocoding.py | # -*- coding: utf-8 -*-
import pytest
from gmaps import errors
from gmaps import Geocoding
from .testutils import retry
geocoding = Geocoding(sensor=False)
@retry
def test_geocode():
results = geocoding.geocode(u"Wrocław, Hubska")
assert results
assert len(results) > 0
@retry
def test_geocode_override_sensor():
results = geocoding.geocode(u"Wrocław, Hubska", sensor=True)
assert results
assert len(results) > 0
@retry
def test_geocode_components_filters():
"""Test if querying with same route but different component filtering
returns different locations"""
# both these cities has this street
results1 = geocoding.geocode(u"Łubinowa",
components={"locality": "Wrocław"})
results2 = geocoding.geocode(u"Łubinowa",
components={"locality": "Warszawa"})
assert results1[0]['geometry']['location'] != results2[0]['geometry'][
'location']
@retry
def test_geocode_components_without_address():
"""Test if querying explicitely set components returns same location like
with string address"""
components = {"route": "Łubinowa", "locality": "Wrocław"}
address = ",".join(components.values())
results_with_address = geocoding.geocode(components=components)
results_without_address = geocoding.geocode(address)
assert results_with_address[0]['geometry']['location'] == \
results_without_address[0]['geometry']['location']
@retry
def test_geocode_no_results_exception():
components = {"administrative_area": "TZ", "country": "FR"}
with pytest.raises(errors.NoResults):
geocoding.geocode(components)
@retry
def test_geocode_language():
results = geocoding.geocode(u"Wrocław, Hubska", language='pl')
assert 'Polska' in results[0]['formatted_address']
@retry
def test_geocode_region():
results | y
def test_geocode_bounds():
results1 = geocoding.geocode("Winnetka", bounds=(
(42.1282269, -87.71095989999999), (42.0886089, -87.7708363)))
results2 = geocoding.geocode("Winnetka", bounds=(
(34.172684, -118.604794), (34.236144, -118.500938)))
assert results1[0]['formatted_address'] != results2[0]['formatted_address']
@retry
def test_reverse():
results = geocoding.reverse(lat=51.213, lon=21.213)
assert results
assert len(results) > 0
assert results[0]['formatted_address']
@retry
def test_reverse_override_sensor():
results = geocoding.reverse(lat=51.213, lon=21.213, sensor=True)
assert results
assert len(results) > 0
@retry
def test_reverse_language():
results = geocoding.reverse(lat=51.213, lon=21.213, language='pl')
assert results
# given lat lon are position somwhere in poland so test if there is
# 'Polska' in formatted_address of first result
assert 'Polska' in results[0]['formatted_address']
@pytest.mark.xfail
@retry
def test_exception_when_sensor_bad():
with pytest.raises(errors.GmapException):
geocoding.reverse(lat=51.213, lon=21.213, sensor="foo")
with pytest.raises(errors.GmapException):
geocoding.geocode(u"Wrocław, Hubska", sensor="foo")
| = geocoding.geocode("Toledo", region="us")
assert 'USA' in results[0]['formatted_address']
results = geocoding.geocode("Toledo", region="es")
assert 'Spain' in results[0]['formatted_address']
@retr | identifier_body |
bigquerydatatransfer_v1_generated_data_transfer_service_get_data_source_sync.py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDataSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-datatransfer
# [START bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
from google.cloud import bigquery_datatransfer_v1
def sample_get_data_source():
# Create a client
|
# [END bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
| client = bigquery_datatransfer_v1.DataTransferServiceClient()
# Initialize request argument(s)
request = bigquery_datatransfer_v1.GetDataSourceRequest(
name="name_value",
)
# Make the request
response = client.get_data_source(request=request)
# Handle the response
print(response) | identifier_body |
bigquerydatatransfer_v1_generated_data_transfer_service_get_data_source_sync.py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDataSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-datatransfer
# [START bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
from google.cloud import bigquery_datatransfer_v1
def | ():
# Create a client
client = bigquery_datatransfer_v1.DataTransferServiceClient()
# Initialize request argument(s)
request = bigquery_datatransfer_v1.GetDataSourceRequest(
name="name_value",
)
# Make the request
response = client.get_data_source(request=request)
# Handle the response
print(response)
# [END bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
| sample_get_data_source | identifier_name |
bigquerydatatransfer_v1_generated_data_transfer_service_get_data_source_sync.py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDataSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-datatransfer
# [START bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
from google.cloud import bigquery_datatransfer_v1
def sample_get_data_source():
# Create a client
client = bigquery_datatransfer_v1.DataTransferServiceClient()
# Initialize request argument(s)
request = bigquery_datatransfer_v1.GetDataSourceRequest(
name="name_value", | # Handle the response
print(response)
# [END bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync] | )
# Make the request
response = client.get_data_source(request=request)
| random_line_split |
common.py | from datetime import date
class YearInfo(object):
def __init__(self, year, months_ok, months_na):
self.year = year
self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.months_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
return len(self.months_er) != 0
def payments_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
for payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.month
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
|
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing
| if year.missing():
return False | conditional_block |
common.py | from datetime import date
class | (object):
def __init__(self, year, months_ok, months_na):
self.year = year
self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.months_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
return len(self.months_er) != 0
def payments_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
for payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.month
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
if year.missing():
return False
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing
| YearInfo | identifier_name |
common.py | from datetime import date | self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.months_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
return len(self.months_er) != 0
def payments_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
for payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.month
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
if year.missing():
return False
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing |
class YearInfo(object):
def __init__(self, year, months_ok, months_na):
self.year = year | random_line_split |
common.py | from datetime import date
class YearInfo(object):
def __init__(self, year, months_ok, months_na):
self.year = year
self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.months_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
|
def payments_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
for payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.month
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
if year.missing():
return False
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing
| return len(self.months_er) != 0 | identifier_body |
exercise.py | #!/usr/bin/python
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue...\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def add_item(self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
for key, value in self.items.items():
if value is True:
print key
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
|
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise() | def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True | random_line_split |
exercise.py | #!/usr/bin/python
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue...\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def add_item(self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
for key, value in self.items.items():
if value is True:
|
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise()
| print key | conditional_block |
exercise.py | #!/usr/bin/python
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue...\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def add_item(self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
|
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise()
| for key, value in self.items.items():
if value is True:
print key | identifier_body |
exercise.py | #!/usr/bin/python
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue...\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def | (self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
for key, value in self.items.items():
if value is True:
print key
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise()
| add_item | identifier_name |
hellojs.d.ts | // Type definitions for hello.js 0.2.3
// Project: http://adodson.com/hello.js/
// Definitions by: Pavel Zika <https://github.com/PavelPZ>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
interface HelloJSLoginOptions {
redirect_uri?: string;
display?: string;
scope?: string;
response_type?: string;
force?: boolean;
oauth_proxy?: string;
timeout?: number;
default_service?: string;
}
interface HelloJSLogoutOptions {
force?: boolean;
}
interface HelloJSEvent {
on(event: string, callback: (auth: HelloJSEventArgument) => void): HelloJSStatic;
off(event: string, callback: (auth: HelloJSEventArgument) => void): HelloJSStatic;
findEvents(event: string, callback: (name: string, index: number) => void): void;
emit(event: string, data: any): HelloJSStatic;
emitAfter(): HelloJSStatic;
success(callback: (json?: any) => void): HelloJSStatic;
error(callback: (json?: any) => void): HelloJSStatic;
complete(callback: (json?: any) => void): HelloJSStatic;
}
interface HelloJSEventArgument {
network: string;
authResponse?: any;
}
interface HelloJSStatic extends HelloJSEvent {
init(serviceAppIds: { [id: string]: string; }, options?: HelloJSLoginOptions): void;
login(network: string, options?: HelloJSLoginOptions, callback?: () => void): void;
logout(network: string, options?: HelloJSLogoutOptions, callback?: () => void): void;
getAuthResponse(network: string): any;
service(network: string): HelloJSServiceDef;
settings: HelloJSLoginOptions;
(network: string): HelloJSStaticNamed;
init(servicesDef: { [id: string]: HelloJSServiceDef; }): void;
}
interface HelloJSStaticNamed {
login(option?: HelloJSLoginOptions, callback?: () => void): void;
logout(callback?: () => void): void;
getAuthResponse(): any;
api(path?: string, method?: string, data?: any, callback?: (json?: any) => void): HelloJSStatic;
}
interface HelloJSOAuthDef {
version: number;
auth: string;
request: string;
token: string;
}
interface HelloJSServiceDef {
name: string;
oauth: HelloJSOAuthDef;
scope?: { [id: string]: string; };
scope_delim?: string;
autorefresh?: boolean;
base?: string;
root?: string; | xhr?: (par: any) => void;
jsonp?: (par: any) => void;
form?: (par: any) => void;
api?: (...par: any[]) => void;
}
declare var hello: HelloJSStatic; | get?: { [id: string]: any; };
post?: { [id: string]: any; };
del?: { [id: string]: string; };
put?: { [id: string]: any; };
wrap?: { [id: string]: (par: any) => void; }; | random_line_split |
unittest_cst.py | """
@file
@brief Helpers to compress constant used in unit tests.
"""
import base64
import json
import lzma
import pprint
def compress_cst(data, length=70, as_text=False):
"""
Transforms a huge constant into a sequence of compressed binary strings.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst
data = {'values': [0.5, 6.9]}
print(compress_cst(data))
"""
js = json.dumps(data)
data_js = js.encode("utf-8")
data_out = lzma.compress(data_js)
data64 = base64.b64encode(data_out)
bufs = []
pos = 0
while pos < len(data64):
if pos + length < len(data64):
bufs.append(data64[pos:pos + length])
pos += length
else:
|
if as_text:
return pprint.pformat(bufs) # pragma: no cover
return bufs
def decompress_cst(data):
"""
Transforms a huge constant produced by function @see fn compress_cst
into the original value.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst, decompress_cst
data = {'values': [0.5, 6.9]}
cp = compress_cst(data)
back = decompress_cst(cp)
print(back)
"""
if isinstance(data, list):
data = b"".join(data)
data64 = base64.b64decode(data)
data_in = lzma.decompress(data64)
dec = data_in.decode('utf-8')
return json.loads(dec)
| bufs.append(data64[pos:])
pos = len(data64) | conditional_block |
unittest_cst.py | """
@file
@brief Helpers to compress constant used in unit tests.
"""
import base64
import json
import lzma
import pprint
def compress_cst(data, length=70, as_text=False):
"""
Transforms a huge constant into a sequence of compressed binary strings.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst
data = {'values': [0.5, 6.9]}
print(compress_cst(data))
"""
js = json.dumps(data)
data_js = js.encode("utf-8")
data_out = lzma.compress(data_js)
data64 = base64.b64encode(data_out)
bufs = []
pos = 0
while pos < len(data64):
if pos + length < len(data64): | pos += length
else:
bufs.append(data64[pos:])
pos = len(data64)
if as_text:
return pprint.pformat(bufs) # pragma: no cover
return bufs
def decompress_cst(data):
"""
Transforms a huge constant produced by function @see fn compress_cst
into the original value.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst, decompress_cst
data = {'values': [0.5, 6.9]}
cp = compress_cst(data)
back = decompress_cst(cp)
print(back)
"""
if isinstance(data, list):
data = b"".join(data)
data64 = base64.b64decode(data)
data_in = lzma.decompress(data64)
dec = data_in.decode('utf-8')
return json.loads(dec) | bufs.append(data64[pos:pos + length]) | random_line_split |
unittest_cst.py | """
@file
@brief Helpers to compress constant used in unit tests.
"""
import base64
import json
import lzma
import pprint
def compress_cst(data, length=70, as_text=False):
"""
Transforms a huge constant into a sequence of compressed binary strings.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst
data = {'values': [0.5, 6.9]}
print(compress_cst(data))
"""
js = json.dumps(data)
data_js = js.encode("utf-8")
data_out = lzma.compress(data_js)
data64 = base64.b64encode(data_out)
bufs = []
pos = 0
while pos < len(data64):
if pos + length < len(data64):
bufs.append(data64[pos:pos + length])
pos += length
else:
bufs.append(data64[pos:])
pos = len(data64)
if as_text:
return pprint.pformat(bufs) # pragma: no cover
return bufs
def | (data):
"""
Transforms a huge constant produced by function @see fn compress_cst
into the original value.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst, decompress_cst
data = {'values': [0.5, 6.9]}
cp = compress_cst(data)
back = decompress_cst(cp)
print(back)
"""
if isinstance(data, list):
data = b"".join(data)
data64 = base64.b64decode(data)
data_in = lzma.decompress(data64)
dec = data_in.decode('utf-8')
return json.loads(dec)
| decompress_cst | identifier_name |
unittest_cst.py | """
@file
@brief Helpers to compress constant used in unit tests.
"""
import base64
import json
import lzma
import pprint
def compress_cst(data, length=70, as_text=False):
"""
Transforms a huge constant into a sequence of compressed binary strings.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst
data = {'values': [0.5, 6.9]}
print(compress_cst(data))
"""
js = json.dumps(data)
data_js = js.encode("utf-8")
data_out = lzma.compress(data_js)
data64 = base64.b64encode(data_out)
bufs = []
pos = 0
while pos < len(data64):
if pos + length < len(data64):
bufs.append(data64[pos:pos + length])
pos += length
else:
bufs.append(data64[pos:])
pos = len(data64)
if as_text:
return pprint.pformat(bufs) # pragma: no cover
return bufs
def decompress_cst(data):
| data = b"".join(data)
data64 = base64.b64decode(data)
data_in = lzma.decompress(data64)
dec = data_in.decode('utf-8')
return json.loads(dec)
| """
Transforms a huge constant produced by function @see fn compress_cst
into the original value.
:param data: data
:param length: line length
:param as_text: returns the results as text
:return: results
.. runpython::
:showcode:
from pyquickhelper.pycode.unittest_cst import compress_cst, decompress_cst
data = {'values': [0.5, 6.9]}
cp = compress_cst(data)
back = decompress_cst(cp)
print(back)
"""
if isinstance(data, list): | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.