file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
movemant.rs | extern crate ggez;
extern crate rand;
use rand::Rng;
use ggez::graphics::{Point};
use ::MainState;
#[derive(Clone)]
pub struct Rabbit{
pub x:i32,
pub y:i32,
pub point:Point,
}
pub fn | (tself:&mut MainState){
let mut next_rabbits = Vec::new();
for obj in tself.rabbits_hash.iter(){
let cor_prev = obj.1;
let rab = rabbit_run(cor_prev);
let mut cor_next = rab.0;
cor_next.point = match tself.map_hash.get(&(cor_next.x, cor_next.y)) {
Some(n) => *n,
None => Point{x:150.0,y:150.0},
};
//println!("Rabbit({},{},{}) go {}, prev:{},{}; next:{},{}",(obj.0).0,(obj.0).1,(obj.0).2, rab.1, cor_prev.x, cor_prev.y, cor_next.x, cor_next.y);
//println!("next:{},{}", cor_next.x, cor_next.y);
//println!("next point:{},{}", &next_point.x, &next_point.y);
next_rabbits.push( ( *obj.0, cor_next ) );
};
for rabbit in next_rabbits{
tself.rabbits_hash.remove( &rabbit.0 );
tself.rabbits_hash.insert( ((rabbit.0).0, (rabbit.0).1, (rabbit.0).2), rabbit.1 );
}
}
pub fn rabbit_run (cor_prev:& Rabbit)-> (Rabbit, &str){
let not_run = Rabbit{
x:cor_prev.x,
y:cor_prev.y,
point: cor_prev.point,
};
let left = Rabbit{
x: (cor_prev.x - 1),
y: cor_prev.y,
point: cor_prev.point,
};
let right = Rabbit{
x: (cor_prev.x + 1),
y: cor_prev.y,
point: cor_prev.point,
};
let up_left = Rabbit{
x: (cor_prev.x - 1),
y: (cor_prev.y - 1),
point: cor_prev.point,
};
let down_right = Rabbit{
x: (cor_prev.x + 1),
y: (cor_prev.y + 1),
point: cor_prev.point,
};
let false_random = rand::thread_rng().gen_range(0, 5);
let variants = [
(not_run, "not_run"),
(left, "left") ,
(right, "right"),
(up_left, "up_left"),
(down_right, "down_right"),
];
let lets_go = variants[false_random].clone();
/*if (lets_go.0.x > 4) || (lets_go.0.y < -4){
return variants[0].clone();
}*/
return lets_go
} | rabbits_run | identifier_name |
movemant.rs | extern crate ggez;
extern crate rand;
use rand::Rng;
use ggez::graphics::{Point};
use ::MainState;
#[derive(Clone)]
pub struct Rabbit{
pub x:i32,
pub y:i32,
pub point:Point,
}
pub fn rabbits_run (tself:&mut MainState){
let mut next_rabbits = Vec::new();
for obj in tself.rabbits_hash.iter(){
let cor_prev = obj.1;
let rab = rabbit_run(cor_prev);
let mut cor_next = rab.0;
cor_next.point = match tself.map_hash.get(&(cor_next.x, cor_next.y)) {
Some(n) => *n,
None => Point{x:150.0,y:150.0},
};
//println!("Rabbit({},{},{}) go {}, prev:{},{}; next:{},{}",(obj.0).0,(obj.0).1,(obj.0).2, rab.1, cor_prev.x, cor_prev.y, cor_next.x, cor_next.y);
//println!("next:{},{}", cor_next.x, cor_next.y);
//println!("next point:{},{}", &next_point.x, &next_point.y);
next_rabbits.push( ( *obj.0, cor_next ) );
};
for rabbit in next_rabbits{
tself.rabbits_hash.remove( &rabbit.0 );
tself.rabbits_hash.insert( ((rabbit.0).0, (rabbit.0).1, (rabbit.0).2), rabbit.1 );
}
}
pub fn rabbit_run (cor_prev:& Rabbit)-> (Rabbit, &str){
let not_run = Rabbit{
x:cor_prev.x,
y:cor_prev.y,
point: cor_prev.point,
};
let left = Rabbit{
x: (cor_prev.x - 1),
y: cor_prev.y, | point: cor_prev.point,
};
let right = Rabbit{
x: (cor_prev.x + 1),
y: cor_prev.y,
point: cor_prev.point,
};
let up_left = Rabbit{
x: (cor_prev.x - 1),
y: (cor_prev.y - 1),
point: cor_prev.point,
};
let down_right = Rabbit{
x: (cor_prev.x + 1),
y: (cor_prev.y + 1),
point: cor_prev.point,
};
let false_random = rand::thread_rng().gen_range(0, 5);
let variants = [
(not_run, "not_run"),
(left, "left") ,
(right, "right"),
(up_left, "up_left"),
(down_right, "down_right"),
];
let lets_go = variants[false_random].clone();
/*if (lets_go.0.x > 4) || (lets_go.0.y < -4){
return variants[0].clone();
}*/
return lets_go
} | random_line_split |
|
index.js | window.onload = function() {
var prod_img = document.getElementById("prod_img");
// Set an interval o keep rotating the image every 256 milliseconds
setInterval(function(){
prod_img.src = getImageSrc();
},1000);
var imgIndex = 1;
function | () {
imgIndex = (imgIndex + 1) > 5? 1 : imgIndex + 1;
return imgIndex + ".jpeg";
}
var home = document.getElementById("pic");
var details = document.getElementById("inf");
var review = document.getElementById("rev");
document.getElementById("home").onclick = function() {
home.style.display = "block";
details.style.display = "none";
review.style.display = "none";
}
document.getElementById("details").onclick = function() {
home.style.display = "none";
details.style.display = "block";
review.style.display = "none";
}
document.getElementById("review").onclick = function() {
home.style.display = "none";
details.style.display = "none";
review.style.display = "block";
}
} | getImageSrc | identifier_name |
index.js | window.onload = function() {
var prod_img = document.getElementById("prod_img");
// Set an interval o keep rotating the image every 256 milliseconds
setInterval(function(){
prod_img.src = getImageSrc();
},1000);
var imgIndex = 1;
function getImageSrc() |
var home = document.getElementById("pic");
var details = document.getElementById("inf");
var review = document.getElementById("rev");
document.getElementById("home").onclick = function() {
home.style.display = "block";
details.style.display = "none";
review.style.display = "none";
}
document.getElementById("details").onclick = function() {
home.style.display = "none";
details.style.display = "block";
review.style.display = "none";
}
document.getElementById("review").onclick = function() {
home.style.display = "none";
details.style.display = "none";
review.style.display = "block";
}
} | {
imgIndex = (imgIndex + 1) > 5? 1 : imgIndex + 1;
return imgIndex + ".jpeg";
} | identifier_body |
index.js | window.onload = function() {
var prod_img = document.getElementById("prod_img");
// Set an interval o keep rotating the image every 256 milliseconds
setInterval(function(){
prod_img.src = getImageSrc();
},1000);
var imgIndex = 1;
function getImageSrc() {
imgIndex = (imgIndex + 1) > 5? 1 : imgIndex + 1;
return imgIndex + ".jpeg";
}
| document.getElementById("home").onclick = function() {
home.style.display = "block";
details.style.display = "none";
review.style.display = "none";
}
document.getElementById("details").onclick = function() {
home.style.display = "none";
details.style.display = "block";
review.style.display = "none";
}
document.getElementById("review").onclick = function() {
home.style.display = "none";
details.style.display = "none";
review.style.display = "block";
}
} | var home = document.getElementById("pic");
var details = document.getElementById("inf");
var review = document.getElementById("rev");
| random_line_split |
perf_context.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PerfLevel {
Uninitialized,
Disable,
EnableCount,
EnableTimeExceptForMutex,
EnableTimeAndCPUTimeExceptForMutex,
EnableTime,
OutOfBounds,
}
/// Extensions for measuring engine performance.
///
/// A PerfContext is created with a specific measurement level,
/// and a 'kind' which represents wich tikv subsystem measurements are being
/// collected for.
///
/// In rocks, `PerfContext` uses global state, and does not require
/// access through an engine. Thus perf data is not per-engine.
/// This doesn't seem like a reasonable assumption for engines generally,
/// so this abstraction follows the existing pattern in this crate and
/// requires `PerfContext` to be accessed through the engine.
pub trait PerfContextExt {
type PerfContext: PerfContext;
fn get_perf_context(&self, level: PerfLevel, kind: PerfContextKind) -> Self::PerfContext;
}
/// The raftstore subsystem the PerfContext is being created for.
///
/// This is a leaky abstraction that supports the encapsulation of metrics
/// reporting by the two raftstore subsystems that use `report_metrics`.
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum | {
RaftstoreApply,
RaftstoreStore,
}
/// Reports metrics to prometheus
///
/// For alternate engines, it is reasonable to make `start_observe`
/// and `report_metrics` no-ops.
pub trait PerfContext: Send {
/// Reinitializes statistics and the perf level
fn start_observe(&mut self);
/// Reports the current collected metrics to prometheus
fn report_metrics(&mut self);
}
| PerfContextKind | identifier_name |
perf_context.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PerfLevel {
Uninitialized,
Disable,
EnableCount,
EnableTimeExceptForMutex,
EnableTimeAndCPUTimeExceptForMutex,
EnableTime,
OutOfBounds,
}
/// Extensions for measuring engine performance.
///
/// A PerfContext is created with a specific measurement level,
/// and a 'kind' which represents wich tikv subsystem measurements are being
/// collected for.
///
/// In rocks, `PerfContext` uses global state, and does not require
/// access through an engine. Thus perf data is not per-engine.
/// This doesn't seem like a reasonable assumption for engines generally,
/// so this abstraction follows the existing pattern in this crate and
/// requires `PerfContext` to be accessed through the engine.
pub trait PerfContextExt {
type PerfContext: PerfContext;
| }
/// The raftstore subsystem the PerfContext is being created for.
///
/// This is a leaky abstraction that supports the encapsulation of metrics
/// reporting by the two raftstore subsystems that use `report_metrics`.
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum PerfContextKind {
RaftstoreApply,
RaftstoreStore,
}
/// Reports metrics to prometheus
///
/// For alternate engines, it is reasonable to make `start_observe`
/// and `report_metrics` no-ops.
pub trait PerfContext: Send {
/// Reinitializes statistics and the perf level
fn start_observe(&mut self);
/// Reports the current collected metrics to prometheus
fn report_metrics(&mut self);
} | fn get_perf_context(&self, level: PerfLevel, kind: PerfContextKind) -> Self::PerfContext; | random_line_split |
CommandHandler.js | /**
* CommandHandler: for handling script commands.
* @TODO: Flesh this out!
**/
var _ = require('underscore');
var $ = require('jquery');
var Radio = require('backbone.radio');
class | {
constructor(opts) {
this.channel = opts.channel;
this.broadcastChannel = this.channel;
this.plugins = opts.plugins;
this.widgetRegistry = opts.widgetRegistry;
// @TODO: later, should probably decouple theatre from commandhandler.
this.theatreChannel = opts.theatreChannel;
}
handle(cmd) {
// Dispatch by parsing command.
var cmdParts = cmd.cmd.split(':');
if (cmdParts[0] == 'widget') {
var action = cmdParts[1];
if (action == 'create') {
var classParts = cmd.widgetClass.split(':');
var widgetClass = this.plugins[classParts[0]].widgets[classParts[1]];
var widgetChannel = Radio.channel(cmd.widgetId);
var mergedWidgetOpts = _.extend({
broadcastChannel: this.broadcastChannel, channel: widgetChannel
}, cmd.widgetOpts);
var widget = new widgetClass(mergedWidgetOpts);
// Wire widget command triggers, for things like
// click behavior.
_.each(cmd.cmdTriggers, (cmdTrigger, action) => {
widget.$el.on(action, () => {
this.handle(cmdTrigger);
});
});
this.widgetRegistry.registerWidget({id: cmd.widgetId, widget: widget});
if (cmd.regionId) {
var regionParts = cmd.regionId.split(':');
var regionWidgetId = regionParts[0];
var widgetChannel = this.widgetRegistry.getWidget(regionWidgetId).channel;
var region = widgetChannel.request('region:get', {id: regionParts[1]});
region.show(widget);
}
} else if (action == 'request') {
var widgetChannel = this.widgetRegistry.getWidget(cmd.widgetId).channel;
return widgetChannel.request(cmd.req, cmd.opts);
}
} else if (cmdParts[0] == 'service'){
var serviceParts = cmd.serviceId.split(':');
return this.plugins[serviceParts[0]].services[serviceParts[1]](cmd.opts);
} else if (cmdParts[0] == 'region'){
return this.channel.request(cmd.cmd, cmd.opts);
} else if(cmdParts[0] == 'batch') {
var promises = [];
for (var i=0; i < cmd.cmds.length; i++) {
promises.push(this.handle(cmd.cmds[i]));
}
return $.when.apply($, promises);
} else if(cmdParts[0] == 'theatre') {
// @TODO: decouple theatre later, so that
// we don't need to keep direct reference here.
cmdParts.shift();
var theatreCmd = cmdParts.join(':');
return this.theatreChannel.request(theatreCmd, cmd.opts);
} else if(cmdParts[0] == 'debug') {
console.log('debug', cmd);
}
}
}
module.exports = CommandHandler;
| CommandHandler | identifier_name |
CommandHandler.js | /**
* CommandHandler: for handling script commands. |
var _ = require('underscore');
var $ = require('jquery');
var Radio = require('backbone.radio');
class CommandHandler {
constructor(opts) {
this.channel = opts.channel;
this.broadcastChannel = this.channel;
this.plugins = opts.plugins;
this.widgetRegistry = opts.widgetRegistry;
// @TODO: later, should probably decouple theatre from commandhandler.
this.theatreChannel = opts.theatreChannel;
}
handle(cmd) {
// Dispatch by parsing command.
var cmdParts = cmd.cmd.split(':');
if (cmdParts[0] == 'widget') {
var action = cmdParts[1];
if (action == 'create') {
var classParts = cmd.widgetClass.split(':');
var widgetClass = this.plugins[classParts[0]].widgets[classParts[1]];
var widgetChannel = Radio.channel(cmd.widgetId);
var mergedWidgetOpts = _.extend({
broadcastChannel: this.broadcastChannel, channel: widgetChannel
}, cmd.widgetOpts);
var widget = new widgetClass(mergedWidgetOpts);
// Wire widget command triggers, for things like
// click behavior.
_.each(cmd.cmdTriggers, (cmdTrigger, action) => {
widget.$el.on(action, () => {
this.handle(cmdTrigger);
});
});
this.widgetRegistry.registerWidget({id: cmd.widgetId, widget: widget});
if (cmd.regionId) {
var regionParts = cmd.regionId.split(':');
var regionWidgetId = regionParts[0];
var widgetChannel = this.widgetRegistry.getWidget(regionWidgetId).channel;
var region = widgetChannel.request('region:get', {id: regionParts[1]});
region.show(widget);
}
} else if (action == 'request') {
var widgetChannel = this.widgetRegistry.getWidget(cmd.widgetId).channel;
return widgetChannel.request(cmd.req, cmd.opts);
}
} else if (cmdParts[0] == 'service'){
var serviceParts = cmd.serviceId.split(':');
return this.plugins[serviceParts[0]].services[serviceParts[1]](cmd.opts);
} else if (cmdParts[0] == 'region'){
return this.channel.request(cmd.cmd, cmd.opts);
} else if(cmdParts[0] == 'batch') {
var promises = [];
for (var i=0; i < cmd.cmds.length; i++) {
promises.push(this.handle(cmd.cmds[i]));
}
return $.when.apply($, promises);
} else if(cmdParts[0] == 'theatre') {
// @TODO: decouple theatre later, so that
// we don't need to keep direct reference here.
cmdParts.shift();
var theatreCmd = cmdParts.join(':');
return this.theatreChannel.request(theatreCmd, cmd.opts);
} else if(cmdParts[0] == 'debug') {
console.log('debug', cmd);
}
}
}
module.exports = CommandHandler; | * @TODO: Flesh this out!
**/ | random_line_split |
setting_util.py | #!/usr/bin/env python
import glob
import inspect
import os
import keyring
import getpass
import sys
import signal
from i3pystatus import Module, SettingsBase
from i3pystatus.core import ClassFinder
from collections import defaultdict, OrderedDict
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def get_int_in_range(prompt, _range):
while True:
|
modules = [os.path.basename(m.replace('.py', ''))
for m in glob.glob(os.path.join(os.path.dirname(__file__), "i3pystatus", "*.py"))
if not os.path.basename(m).startswith('_')]
protected_settings = SettingsBase._SettingsBase__PROTECTED_SETTINGS
class_finder = ClassFinder(Module)
credential_modules = defaultdict(dict)
for module_name in modules:
try:
module = class_finder.get_module(module_name)
clazz = class_finder.get_class(module)
members = [m[0] for m in inspect.getmembers(clazz) if not m[0].startswith('_')]
if any([hasattr(clazz, setting) for setting in protected_settings]):
credential_modules[clazz.__name__]['credentials'] = list(set(protected_settings) & set(members))
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
elif hasattr(clazz, 'required'):
protected = []
required = getattr(clazz, 'required')
for setting in protected_settings:
if setting in required:
protected.append(setting)
if protected:
credential_modules[clazz.__name__]['credentials'] = protected
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
except ImportError:
continue
choices = [k for k in credential_modules.keys()]
for idx, module in enumerate(choices, start=1):
print("%s - %s" % (idx, module))
index = get_int_in_range("Choose module:\n> ", range(1, len(choices) + 1))
module_name = choices[index - 1]
module = credential_modules[module_name]
for idx, setting in enumerate(module['credentials'], start=1):
print("%s - %s" % (idx, setting))
choices = module['credentials']
index = get_int_in_range("Choose setting for %s:\n> " % module_name, range(1, len(choices) + 1))
setting = choices[index - 1]
answer = getpass.getpass("Enter value for %s:\n> " % setting)
answer2 = getpass.getpass("Re-enter value\n> ")
if answer == answer2:
key = "%s.%s" % (module['key'], setting)
keyring.set_password(key, getpass.getuser(), answer)
print("%s set!" % setting)
else:
print("Values don't match - nothing set.")
| answer = input(prompt)
try:
n = int(answer.strip())
if n in _range:
return n
else:
print("Value out of range!")
except ValueError:
print("Invalid input!") | conditional_block |
setting_util.py | #!/usr/bin/env python
import glob
import inspect
import os
import keyring
import getpass
import sys
import signal
from i3pystatus import Module, SettingsBase
from i3pystatus.core import ClassFinder
from collections import defaultdict, OrderedDict
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def get_int_in_range(prompt, _range):
|
modules = [os.path.basename(m.replace('.py', ''))
for m in glob.glob(os.path.join(os.path.dirname(__file__), "i3pystatus", "*.py"))
if not os.path.basename(m).startswith('_')]
protected_settings = SettingsBase._SettingsBase__PROTECTED_SETTINGS
class_finder = ClassFinder(Module)
credential_modules = defaultdict(dict)
for module_name in modules:
try:
module = class_finder.get_module(module_name)
clazz = class_finder.get_class(module)
members = [m[0] for m in inspect.getmembers(clazz) if not m[0].startswith('_')]
if any([hasattr(clazz, setting) for setting in protected_settings]):
credential_modules[clazz.__name__]['credentials'] = list(set(protected_settings) & set(members))
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
elif hasattr(clazz, 'required'):
protected = []
required = getattr(clazz, 'required')
for setting in protected_settings:
if setting in required:
protected.append(setting)
if protected:
credential_modules[clazz.__name__]['credentials'] = protected
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
except ImportError:
continue
choices = [k for k in credential_modules.keys()]
for idx, module in enumerate(choices, start=1):
print("%s - %s" % (idx, module))
index = get_int_in_range("Choose module:\n> ", range(1, len(choices) + 1))
module_name = choices[index - 1]
module = credential_modules[module_name]
for idx, setting in enumerate(module['credentials'], start=1):
print("%s - %s" % (idx, setting))
choices = module['credentials']
index = get_int_in_range("Choose setting for %s:\n> " % module_name, range(1, len(choices) + 1))
setting = choices[index - 1]
answer = getpass.getpass("Enter value for %s:\n> " % setting)
answer2 = getpass.getpass("Re-enter value\n> ")
if answer == answer2:
key = "%s.%s" % (module['key'], setting)
keyring.set_password(key, getpass.getuser(), answer)
print("%s set!" % setting)
else:
print("Values don't match - nothing set.")
| while True:
answer = input(prompt)
try:
n = int(answer.strip())
if n in _range:
return n
else:
print("Value out of range!")
except ValueError:
print("Invalid input!") | identifier_body |
setting_util.py | #!/usr/bin/env python
import glob
import inspect
import os
import keyring
import getpass
import sys
import signal
from i3pystatus import Module, SettingsBase
from i3pystatus.core import ClassFinder
from collections import defaultdict, OrderedDict
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def get_int_in_range(prompt, _range):
while True: | n = int(answer.strip())
if n in _range:
return n
else:
print("Value out of range!")
except ValueError:
print("Invalid input!")
modules = [os.path.basename(m.replace('.py', ''))
for m in glob.glob(os.path.join(os.path.dirname(__file__), "i3pystatus", "*.py"))
if not os.path.basename(m).startswith('_')]
protected_settings = SettingsBase._SettingsBase__PROTECTED_SETTINGS
class_finder = ClassFinder(Module)
credential_modules = defaultdict(dict)
for module_name in modules:
try:
module = class_finder.get_module(module_name)
clazz = class_finder.get_class(module)
members = [m[0] for m in inspect.getmembers(clazz) if not m[0].startswith('_')]
if any([hasattr(clazz, setting) for setting in protected_settings]):
credential_modules[clazz.__name__]['credentials'] = list(set(protected_settings) & set(members))
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
elif hasattr(clazz, 'required'):
protected = []
required = getattr(clazz, 'required')
for setting in protected_settings:
if setting in required:
protected.append(setting)
if protected:
credential_modules[clazz.__name__]['credentials'] = protected
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
except ImportError:
continue
choices = [k for k in credential_modules.keys()]
for idx, module in enumerate(choices, start=1):
print("%s - %s" % (idx, module))
index = get_int_in_range("Choose module:\n> ", range(1, len(choices) + 1))
module_name = choices[index - 1]
module = credential_modules[module_name]
for idx, setting in enumerate(module['credentials'], start=1):
print("%s - %s" % (idx, setting))
choices = module['credentials']
index = get_int_in_range("Choose setting for %s:\n> " % module_name, range(1, len(choices) + 1))
setting = choices[index - 1]
answer = getpass.getpass("Enter value for %s:\n> " % setting)
answer2 = getpass.getpass("Re-enter value\n> ")
if answer == answer2:
key = "%s.%s" % (module['key'], setting)
keyring.set_password(key, getpass.getuser(), answer)
print("%s set!" % setting)
else:
print("Values don't match - nothing set.") | answer = input(prompt)
try: | random_line_split |
setting_util.py | #!/usr/bin/env python
import glob
import inspect
import os
import keyring
import getpass
import sys
import signal
from i3pystatus import Module, SettingsBase
from i3pystatus.core import ClassFinder
from collections import defaultdict, OrderedDict
def | (signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def get_int_in_range(prompt, _range):
while True:
answer = input(prompt)
try:
n = int(answer.strip())
if n in _range:
return n
else:
print("Value out of range!")
except ValueError:
print("Invalid input!")
modules = [os.path.basename(m.replace('.py', ''))
for m in glob.glob(os.path.join(os.path.dirname(__file__), "i3pystatus", "*.py"))
if not os.path.basename(m).startswith('_')]
protected_settings = SettingsBase._SettingsBase__PROTECTED_SETTINGS
class_finder = ClassFinder(Module)
credential_modules = defaultdict(dict)
for module_name in modules:
try:
module = class_finder.get_module(module_name)
clazz = class_finder.get_class(module)
members = [m[0] for m in inspect.getmembers(clazz) if not m[0].startswith('_')]
if any([hasattr(clazz, setting) for setting in protected_settings]):
credential_modules[clazz.__name__]['credentials'] = list(set(protected_settings) & set(members))
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
elif hasattr(clazz, 'required'):
protected = []
required = getattr(clazz, 'required')
for setting in protected_settings:
if setting in required:
protected.append(setting)
if protected:
credential_modules[clazz.__name__]['credentials'] = protected
credential_modules[clazz.__name__]['key'] = "%s.%s" % (clazz.__module__, clazz.__name__)
except ImportError:
continue
choices = [k for k in credential_modules.keys()]
for idx, module in enumerate(choices, start=1):
print("%s - %s" % (idx, module))
index = get_int_in_range("Choose module:\n> ", range(1, len(choices) + 1))
module_name = choices[index - 1]
module = credential_modules[module_name]
for idx, setting in enumerate(module['credentials'], start=1):
print("%s - %s" % (idx, setting))
choices = module['credentials']
index = get_int_in_range("Choose setting for %s:\n> " % module_name, range(1, len(choices) + 1))
setting = choices[index - 1]
answer = getpass.getpass("Enter value for %s:\n> " % setting)
answer2 = getpass.getpass("Re-enter value\n> ")
if answer == answer2:
key = "%s.%s" % (module['key'], setting)
keyring.set_password(key, getpass.getuser(), answer)
print("%s set!" % setting)
else:
print("Values don't match - nothing set.")
| signal_handler | identifier_name |
page.component.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
import { Component, Inject, NgZone, OnDestroy, OnInit, ViewChild, AfterContentChecked } from '@angular/core';
import { Meta, Title } from '@angular/platform-browser';
import { ActivatedRoute, Router } from '@angular/router';
import { filter, map, publishReplay, refCount, tap, takeWhile } from 'rxjs/operators';
import { NB_WINDOW } from '@nebular/theme';
import { NgdTabbedBlockComponent } from '../../blocks/components/tabbed-block/tabbed-block.component';
import { NgdStructureService } from '../../@theme/services';
@Component({
selector: 'ngd-page',
templateUrl: './page.component.html',
styleUrls: ['./page.component.scss'],
})
export class NgdPageComponent implements OnInit, AfterContentChecked, OnDestroy {
currentItem;
private alive = true;
currentTabName: string = '';
@ViewChild(NgdTabbedBlockComponent, { static: false }) tabbedBlock: NgdTabbedBlockComponent;
constructor(@Inject(NB_WINDOW) private window,
private ngZone: NgZone,
private router: Router,
private activatedRoute: ActivatedRoute,
private structureService: NgdStructureService,
private titleService: Title,
private metaTagsService: Meta) {
}
get showSettings() {
return this.currentItem && this.currentItem.children
.some((item) => ['markdown', 'component', 'tabbed'].includes(item.block));
}
ngOnInit() {
this.handlePageNavigation();
this.window.history.scrollRestoration = 'manual';
}
ngAfterContentChecked() {
const currentTabName = this.getCurrentTabName();
if (this.currentTabName !== currentTabName) {
Promise.resolve().then(() => this.currentTabName = currentTabName);
}
}
ngOnDestroy() {
this.alive = false;
}
| () {
this.activatedRoute.params
.pipe(
takeWhile(() => this.alive),
filter((params: any) => params.subPage),
map((params: any) => {
const slag = `${params.page}_${params.subPage}`;
return this.structureService.findPageBySlag(this.structureService.getPreparedStructure(), slag);
}),
filter(item => item),
tap((item: any) => {
this.titleService.setTitle(`UI Kitten - ${item.title}`);
this.metaTagsService.updateTag({ name: 'description', content: item.description });
this.metaTagsService.updateTag({ name: 'keywords', content: item.keywords })
}),
publishReplay(),
refCount(),
)
.subscribe((item) => {
this.currentItem = item;
});
}
protected getCurrentTabName(): string {
if (this.tabbedBlock && this.tabbedBlock.currentTab) {
return this.tabbedBlock.currentTab.tab;
}
return '';
}
}
| handlePageNavigation | identifier_name |
page.component.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
import { Component, Inject, NgZone, OnDestroy, OnInit, ViewChild, AfterContentChecked } from '@angular/core';
import { Meta, Title } from '@angular/platform-browser';
import { ActivatedRoute, Router } from '@angular/router';
import { filter, map, publishReplay, refCount, tap, takeWhile } from 'rxjs/operators';
import { NB_WINDOW } from '@nebular/theme';
import { NgdTabbedBlockComponent } from '../../blocks/components/tabbed-block/tabbed-block.component';
import { NgdStructureService } from '../../@theme/services';
@Component({
selector: 'ngd-page',
templateUrl: './page.component.html',
styleUrls: ['./page.component.scss'],
})
export class NgdPageComponent implements OnInit, AfterContentChecked, OnDestroy {
currentItem;
private alive = true;
currentTabName: string = '';
@ViewChild(NgdTabbedBlockComponent, { static: false }) tabbedBlock: NgdTabbedBlockComponent;
constructor(@Inject(NB_WINDOW) private window,
private ngZone: NgZone,
private router: Router,
private activatedRoute: ActivatedRoute,
private structureService: NgdStructureService,
private titleService: Title,
private metaTagsService: Meta) {
}
get showSettings() {
return this.currentItem && this.currentItem.children
.some((item) => ['markdown', 'component', 'tabbed'].includes(item.block));
}
ngOnInit() {
this.handlePageNavigation();
this.window.history.scrollRestoration = 'manual';
}
ngAfterContentChecked() {
const currentTabName = this.getCurrentTabName();
if (this.currentTabName !== currentTabName) {
Promise.resolve().then(() => this.currentTabName = currentTabName);
}
}
ngOnDestroy() {
this.alive = false;
}
handlePageNavigation() {
this.activatedRoute.params
.pipe(
takeWhile(() => this.alive),
filter((params: any) => params.subPage),
map((params: any) => {
const slag = `${params.page}_${params.subPage}`;
return this.structureService.findPageBySlag(this.structureService.getPreparedStructure(), slag);
}),
filter(item => item),
tap((item: any) => {
this.titleService.setTitle(`UI Kitten - ${item.title}`);
this.metaTagsService.updateTag({ name: 'description', content: item.description });
this.metaTagsService.updateTag({ name: 'keywords', content: item.keywords })
}),
publishReplay(),
refCount(),
)
.subscribe((item) => {
this.currentItem = item;
});
}
protected getCurrentTabName(): string {
if (this.tabbedBlock && this.tabbedBlock.currentTab) |
return '';
}
}
| {
return this.tabbedBlock.currentTab.tab;
} | conditional_block |
page.component.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
import { Component, Inject, NgZone, OnDestroy, OnInit, ViewChild, AfterContentChecked } from '@angular/core'; | import { NB_WINDOW } from '@nebular/theme';
import { NgdTabbedBlockComponent } from '../../blocks/components/tabbed-block/tabbed-block.component';
import { NgdStructureService } from '../../@theme/services';
@Component({
selector: 'ngd-page',
templateUrl: './page.component.html',
styleUrls: ['./page.component.scss'],
})
export class NgdPageComponent implements OnInit, AfterContentChecked, OnDestroy {
currentItem;
private alive = true;
currentTabName: string = '';
@ViewChild(NgdTabbedBlockComponent, { static: false }) tabbedBlock: NgdTabbedBlockComponent;
constructor(@Inject(NB_WINDOW) private window,
private ngZone: NgZone,
private router: Router,
private activatedRoute: ActivatedRoute,
private structureService: NgdStructureService,
private titleService: Title,
private metaTagsService: Meta) {
}
get showSettings() {
return this.currentItem && this.currentItem.children
.some((item) => ['markdown', 'component', 'tabbed'].includes(item.block));
}
ngOnInit() {
this.handlePageNavigation();
this.window.history.scrollRestoration = 'manual';
}
ngAfterContentChecked() {
const currentTabName = this.getCurrentTabName();
if (this.currentTabName !== currentTabName) {
Promise.resolve().then(() => this.currentTabName = currentTabName);
}
}
ngOnDestroy() {
this.alive = false;
}
handlePageNavigation() {
this.activatedRoute.params
.pipe(
takeWhile(() => this.alive),
filter((params: any) => params.subPage),
map((params: any) => {
const slag = `${params.page}_${params.subPage}`;
return this.structureService.findPageBySlag(this.structureService.getPreparedStructure(), slag);
}),
filter(item => item),
tap((item: any) => {
this.titleService.setTitle(`UI Kitten - ${item.title}`);
this.metaTagsService.updateTag({ name: 'description', content: item.description });
this.metaTagsService.updateTag({ name: 'keywords', content: item.keywords })
}),
publishReplay(),
refCount(),
)
.subscribe((item) => {
this.currentItem = item;
});
}
protected getCurrentTabName(): string {
if (this.tabbedBlock && this.tabbedBlock.currentTab) {
return this.tabbedBlock.currentTab.tab;
}
return '';
}
} | import { Meta, Title } from '@angular/platform-browser';
import { ActivatedRoute, Router } from '@angular/router';
import { filter, map, publishReplay, refCount, tap, takeWhile } from 'rxjs/operators'; | random_line_split |
page.component.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
import { Component, Inject, NgZone, OnDestroy, OnInit, ViewChild, AfterContentChecked } from '@angular/core';
import { Meta, Title } from '@angular/platform-browser';
import { ActivatedRoute, Router } from '@angular/router';
import { filter, map, publishReplay, refCount, tap, takeWhile } from 'rxjs/operators';
import { NB_WINDOW } from '@nebular/theme';
import { NgdTabbedBlockComponent } from '../../blocks/components/tabbed-block/tabbed-block.component';
import { NgdStructureService } from '../../@theme/services';
@Component({
selector: 'ngd-page',
templateUrl: './page.component.html',
styleUrls: ['./page.component.scss'],
})
export class NgdPageComponent implements OnInit, AfterContentChecked, OnDestroy {
currentItem;
private alive = true;
currentTabName: string = '';
@ViewChild(NgdTabbedBlockComponent, { static: false }) tabbedBlock: NgdTabbedBlockComponent;
constructor(@Inject(NB_WINDOW) private window,
private ngZone: NgZone,
private router: Router,
private activatedRoute: ActivatedRoute,
private structureService: NgdStructureService,
private titleService: Title,
private metaTagsService: Meta) {
}
get showSettings() {
return this.currentItem && this.currentItem.children
.some((item) => ['markdown', 'component', 'tabbed'].includes(item.block));
}
ngOnInit() |
ngAfterContentChecked() {
const currentTabName = this.getCurrentTabName();
if (this.currentTabName !== currentTabName) {
Promise.resolve().then(() => this.currentTabName = currentTabName);
}
}
ngOnDestroy() {
this.alive = false;
}
handlePageNavigation() {
this.activatedRoute.params
.pipe(
takeWhile(() => this.alive),
filter((params: any) => params.subPage),
map((params: any) => {
const slag = `${params.page}_${params.subPage}`;
return this.structureService.findPageBySlag(this.structureService.getPreparedStructure(), slag);
}),
filter(item => item),
tap((item: any) => {
this.titleService.setTitle(`UI Kitten - ${item.title}`);
this.metaTagsService.updateTag({ name: 'description', content: item.description });
this.metaTagsService.updateTag({ name: 'keywords', content: item.keywords })
}),
publishReplay(),
refCount(),
)
.subscribe((item) => {
this.currentItem = item;
});
}
protected getCurrentTabName(): string {
if (this.tabbedBlock && this.tabbedBlock.currentTab) {
return this.tabbedBlock.currentTab.tab;
}
return '';
}
}
| {
this.handlePageNavigation();
this.window.history.scrollRestoration = 'manual';
} | identifier_body |
conf.py | # -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path
ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, ROOTDIR)
import redpipe # noqa
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__
# The full version, including alpha/beta/rc tags.
release = redpipe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'redpipe-logo.gif',
'github_banner': True,
'github_user': '72squared',
'github_repo': 'redpipe',
'travis_button': True,
'analytics_id': 'UA-98626018-1',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
| #
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RedPipe.tex', u'%s Documentation' % project,
u'John Loehrer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'making redis pipelines easy in python',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri'] | # Latex figure (float) alignment | random_line_split |
day3.rs | use std::io::{self, BufRead};
use std::fmt;
static DEFAULT: i32 = 0;
static MULTIPLE: i32 = -1;
pub struct Entry {
num: i32,
index_x: i32,
index_y: i32,
width: i32,
height: i32
}
impl fmt::Display for Entry {
fn fmt(&self,fmt: &mut fmt::Formatter) -> fmt::Result {
let str_num = "num=".to_string()+&self.num.to_string();
let str_index_x = "index_x=".to_string() + &self.index_x.to_string();
let str_index_y = "index_y=".to_string() + &self.index_y.to_string();
let str_w = "w=".to_string() + &self.width.to_string();
let str_h = "h=".to_string() + &self.height.to_string();
let parsed = str_num + " " + str_index_x.as_str() + " " + str_index_y.as_str() + " " + str_w.as_str() + " " + str_h.as_str();
fmt.write_str(parsed.as_str())?;
Ok(())
}
}
fn paint_matrix(matrix: &mut [[i32; 10]; 10]) {
for (i, row) in matrix.iter_mut().enumerate() {
for (y, col) in row.iter_mut().enumerate() {
if *col == 0 as i32 {
print!(".");
} else if *col == -1 as i32 {
print!("X");
} else {
print!("{}", col);
}
}
println!();
}
} | if current_value == DEFAULT {
matrix[x as usize][y as usize] = entry.num;
//matrix[x as usize][y as usize] = "1";
} else {
if current_value != MULTIPLE {
matrix[x as usize][y as usize] = MULTIPLE;
}
}
}
}
}
fn code3() {
use regex::Regex;
let re = Regex::new(r"^#(\d) @ (\d),(\d): (\d)x(\d)").unwrap();
let mut vec: Vec<String> = Vec::new();
let stdin = io::stdin();
for line in stdin.lock().lines() {
let str_line = line.unwrap().trim().to_string();
if str_line == "0" {
println!("exit!");
break;
}
vec.push(str_line);
}
// check input
let mut entries: Vec<Entry> = Vec::new();
for line in vec {
println!("{:?}", re.is_match(line.as_str()));
for cap in re.captures_iter(line.as_str()) {
let num: i32 = cap[1].parse::<i32>().unwrap();
let index_x: i32 = cap[2].parse::<i32>().unwrap();
let index_y: i32 = cap[3].parse::<i32>().unwrap();
let width: i32 = cap[4].parse::<i32>().unwrap();
let height: i32 = cap[5].parse::<i32>().unwrap();
let entry = Entry{num, index_x, index_y, width, height};
entries.push(entry);
}
}
let mut matrix = [[DEFAULT; 10];10];
for mut ent in entries {
println!("{}",ent);
fill_matrix(&mut matrix, &mut ent);
}
paint_matrix(&mut matrix);
} |
fn fill_matrix(matrix: &mut [[i32; 10]; 10], entry: &mut Entry) {
for x in entry.index_x..entry.width{
for y in entry.index_y..entry.height {
let current_value = matrix[x as usize][y as usize]; | random_line_split |
day3.rs | use std::io::{self, BufRead};
use std::fmt;
static DEFAULT: i32 = 0;
static MULTIPLE: i32 = -1;
pub struct | {
num: i32,
index_x: i32,
index_y: i32,
width: i32,
height: i32
}
impl fmt::Display for Entry {
fn fmt(&self,fmt: &mut fmt::Formatter) -> fmt::Result {
let str_num = "num=".to_string()+&self.num.to_string();
let str_index_x = "index_x=".to_string() + &self.index_x.to_string();
let str_index_y = "index_y=".to_string() + &self.index_y.to_string();
let str_w = "w=".to_string() + &self.width.to_string();
let str_h = "h=".to_string() + &self.height.to_string();
let parsed = str_num + " " + str_index_x.as_str() + " " + str_index_y.as_str() + " " + str_w.as_str() + " " + str_h.as_str();
fmt.write_str(parsed.as_str())?;
Ok(())
}
}
fn paint_matrix(matrix: &mut [[i32; 10]; 10]) {
for (i, row) in matrix.iter_mut().enumerate() {
for (y, col) in row.iter_mut().enumerate() {
if *col == 0 as i32 {
print!(".");
} else if *col == -1 as i32 {
print!("X");
} else {
print!("{}", col);
}
}
println!();
}
}
fn fill_matrix(matrix: &mut [[i32; 10]; 10], entry: &mut Entry) {
for x in entry.index_x..entry.width{
for y in entry.index_y..entry.height {
let current_value = matrix[x as usize][y as usize];
if current_value == DEFAULT {
matrix[x as usize][y as usize] = entry.num;
//matrix[x as usize][y as usize] = "1";
} else {
if current_value != MULTIPLE {
matrix[x as usize][y as usize] = MULTIPLE;
}
}
}
}
}
fn code3() {
use regex::Regex;
let re = Regex::new(r"^#(\d) @ (\d),(\d): (\d)x(\d)").unwrap();
let mut vec: Vec<String> = Vec::new();
let stdin = io::stdin();
for line in stdin.lock().lines() {
let str_line = line.unwrap().trim().to_string();
if str_line == "0" {
println!("exit!");
break;
}
vec.push(str_line);
}
// check input
let mut entries: Vec<Entry> = Vec::new();
for line in vec {
println!("{:?}", re.is_match(line.as_str()));
for cap in re.captures_iter(line.as_str()) {
let num: i32 = cap[1].parse::<i32>().unwrap();
let index_x: i32 = cap[2].parse::<i32>().unwrap();
let index_y: i32 = cap[3].parse::<i32>().unwrap();
let width: i32 = cap[4].parse::<i32>().unwrap();
let height: i32 = cap[5].parse::<i32>().unwrap();
let entry = Entry{num, index_x, index_y, width, height};
entries.push(entry);
}
}
let mut matrix = [[DEFAULT; 10];10];
for mut ent in entries {
println!("{}",ent);
fill_matrix(&mut matrix, &mut ent);
}
paint_matrix(&mut matrix);
}
| Entry | identifier_name |
day3.rs | use std::io::{self, BufRead};
use std::fmt;
static DEFAULT: i32 = 0;
static MULTIPLE: i32 = -1;
pub struct Entry {
num: i32,
index_x: i32,
index_y: i32,
width: i32,
height: i32
}
impl fmt::Display for Entry {
fn fmt(&self,fmt: &mut fmt::Formatter) -> fmt::Result {
let str_num = "num=".to_string()+&self.num.to_string();
let str_index_x = "index_x=".to_string() + &self.index_x.to_string();
let str_index_y = "index_y=".to_string() + &self.index_y.to_string();
let str_w = "w=".to_string() + &self.width.to_string();
let str_h = "h=".to_string() + &self.height.to_string();
let parsed = str_num + " " + str_index_x.as_str() + " " + str_index_y.as_str() + " " + str_w.as_str() + " " + str_h.as_str();
fmt.write_str(parsed.as_str())?;
Ok(())
}
}
fn paint_matrix(matrix: &mut [[i32; 10]; 10]) {
for (i, row) in matrix.iter_mut().enumerate() {
for (y, col) in row.iter_mut().enumerate() {
if *col == 0 as i32 | else if *col == -1 as i32 {
print!("X");
} else {
print!("{}", col);
}
}
println!();
}
}
fn fill_matrix(matrix: &mut [[i32; 10]; 10], entry: &mut Entry) {
for x in entry.index_x..entry.width{
for y in entry.index_y..entry.height {
let current_value = matrix[x as usize][y as usize];
if current_value == DEFAULT {
matrix[x as usize][y as usize] = entry.num;
//matrix[x as usize][y as usize] = "1";
} else {
if current_value != MULTIPLE {
matrix[x as usize][y as usize] = MULTIPLE;
}
}
}
}
}
fn code3() {
use regex::Regex;
let re = Regex::new(r"^#(\d) @ (\d),(\d): (\d)x(\d)").unwrap();
let mut vec: Vec<String> = Vec::new();
let stdin = io::stdin();
for line in stdin.lock().lines() {
let str_line = line.unwrap().trim().to_string();
if str_line == "0" {
println!("exit!");
break;
}
vec.push(str_line);
}
// check input
let mut entries: Vec<Entry> = Vec::new();
for line in vec {
println!("{:?}", re.is_match(line.as_str()));
for cap in re.captures_iter(line.as_str()) {
let num: i32 = cap[1].parse::<i32>().unwrap();
let index_x: i32 = cap[2].parse::<i32>().unwrap();
let index_y: i32 = cap[3].parse::<i32>().unwrap();
let width: i32 = cap[4].parse::<i32>().unwrap();
let height: i32 = cap[5].parse::<i32>().unwrap();
let entry = Entry{num, index_x, index_y, width, height};
entries.push(entry);
}
}
let mut matrix = [[DEFAULT; 10];10];
for mut ent in entries {
println!("{}",ent);
fill_matrix(&mut matrix, &mut ent);
}
paint_matrix(&mut matrix);
}
| {
print!(".");
} | conditional_block |
day3.rs | use std::io::{self, BufRead};
use std::fmt;
static DEFAULT: i32 = 0;
static MULTIPLE: i32 = -1;
pub struct Entry {
num: i32,
index_x: i32,
index_y: i32,
width: i32,
height: i32
}
impl fmt::Display for Entry {
fn fmt(&self,fmt: &mut fmt::Formatter) -> fmt::Result |
}
fn paint_matrix(matrix: &mut [[i32; 10]; 10]) {
for (i, row) in matrix.iter_mut().enumerate() {
for (y, col) in row.iter_mut().enumerate() {
if *col == 0 as i32 {
print!(".");
} else if *col == -1 as i32 {
print!("X");
} else {
print!("{}", col);
}
}
println!();
}
}
fn fill_matrix(matrix: &mut [[i32; 10]; 10], entry: &mut Entry) {
for x in entry.index_x..entry.width{
for y in entry.index_y..entry.height {
let current_value = matrix[x as usize][y as usize];
if current_value == DEFAULT {
matrix[x as usize][y as usize] = entry.num;
//matrix[x as usize][y as usize] = "1";
} else {
if current_value != MULTIPLE {
matrix[x as usize][y as usize] = MULTIPLE;
}
}
}
}
}
fn code3() {
use regex::Regex;
let re = Regex::new(r"^#(\d) @ (\d),(\d): (\d)x(\d)").unwrap();
let mut vec: Vec<String> = Vec::new();
let stdin = io::stdin();
for line in stdin.lock().lines() {
let str_line = line.unwrap().trim().to_string();
if str_line == "0" {
println!("exit!");
break;
}
vec.push(str_line);
}
// check input
let mut entries: Vec<Entry> = Vec::new();
for line in vec {
println!("{:?}", re.is_match(line.as_str()));
for cap in re.captures_iter(line.as_str()) {
let num: i32 = cap[1].parse::<i32>().unwrap();
let index_x: i32 = cap[2].parse::<i32>().unwrap();
let index_y: i32 = cap[3].parse::<i32>().unwrap();
let width: i32 = cap[4].parse::<i32>().unwrap();
let height: i32 = cap[5].parse::<i32>().unwrap();
let entry = Entry{num, index_x, index_y, width, height};
entries.push(entry);
}
}
let mut matrix = [[DEFAULT; 10];10];
for mut ent in entries {
println!("{}",ent);
fill_matrix(&mut matrix, &mut ent);
}
paint_matrix(&mut matrix);
}
| {
let str_num = "num=".to_string()+&self.num.to_string();
let str_index_x = "index_x=".to_string() + &self.index_x.to_string();
let str_index_y = "index_y=".to_string() + &self.index_y.to_string();
let str_w = "w=".to_string() + &self.width.to_string();
let str_h = "h=".to_string() + &self.height.to_string();
let parsed = str_num + " " + str_index_x.as_str() + " " + str_index_y.as_str() + " " + str_w.as_str() + " " + str_h.as_str();
fmt.write_str(parsed.as_str())?;
Ok(())
} | identifier_body |
functional_tests.py | import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
@pytest.fixture(scope='function')
def browser(request):
browser_ = webdriver.Firefox()
def | ():
browser_.quit()
request.addfinalizer(fin)
return browser_
def test_can_show_a_relevant_code_snippet(browser):
# Jan visits the site
browser.get('http://localhost:8000')
# He notices the title and header reference the site name
site_name = 'Scout'
assert site_name in browser.title
header_text = browser.find_element_by_tag_name('h1').text
assert site_name in header_text
# He is invited to search for code snippets
expected_search_prompt = 'Enter some code-related keywords'
search_box = browser.find_element_by_id('id_search_box')
actual_search_prompt = search_box.get_attribute('placeholder')
assert actual_search_prompt == expected_search_prompt
# He searches "python yield"
search_box.send_keys('python yield')
search_box.send_keys(Keys.ENTER)
# The page updates, and now the page shows a code snippet
# that uses the dummy variables "mylist" and "mygenerator"
# (the highest-voted python page on StackOverflow.com is
# /questions/231767/what-does-the-yield-keyword-do-in-python)
snippets = browser.find_elements_by_tag_name('code')
assert any(['mylist' in snippet.text and 'mygenerator' in snippet.text
for snippet in snippets])
| fin | identifier_name |
functional_tests.py | import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
@pytest.fixture(scope='function')
def browser(request):
browser_ = webdriver.Firefox()
def fin():
browser_.quit()
request.addfinalizer(fin)
return browser_
def test_can_show_a_relevant_code_snippet(browser):
# Jan visits the site
browser.get('http://localhost:8000')
# He notices the title and header reference the site name
site_name = 'Scout'
assert site_name in browser.title
header_text = browser.find_element_by_tag_name('h1').text
assert site_name in header_text
# He is invited to search for code snippets
expected_search_prompt = 'Enter some code-related keywords'
search_box = browser.find_element_by_id('id_search_box')
actual_search_prompt = search_box.get_attribute('placeholder')
assert actual_search_prompt == expected_search_prompt
# He searches "python yield"
search_box.send_keys('python yield')
search_box.send_keys(Keys.ENTER)
# The page updates, and now the page shows a code snippet | # (the highest-voted python page on StackOverflow.com is
# /questions/231767/what-does-the-yield-keyword-do-in-python)
snippets = browser.find_elements_by_tag_name('code')
assert any(['mylist' in snippet.text and 'mygenerator' in snippet.text
for snippet in snippets]) | # that uses the dummy variables "mylist" and "mygenerator" | random_line_split |
functional_tests.py | import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
@pytest.fixture(scope='function')
def browser(request):
|
def test_can_show_a_relevant_code_snippet(browser):
# Jan visits the site
browser.get('http://localhost:8000')
# He notices the title and header reference the site name
site_name = 'Scout'
assert site_name in browser.title
header_text = browser.find_element_by_tag_name('h1').text
assert site_name in header_text
# He is invited to search for code snippets
expected_search_prompt = 'Enter some code-related keywords'
search_box = browser.find_element_by_id('id_search_box')
actual_search_prompt = search_box.get_attribute('placeholder')
assert actual_search_prompt == expected_search_prompt
# He searches "python yield"
search_box.send_keys('python yield')
search_box.send_keys(Keys.ENTER)
# The page updates, and now the page shows a code snippet
# that uses the dummy variables "mylist" and "mygenerator"
# (the highest-voted python page on StackOverflow.com is
# /questions/231767/what-does-the-yield-keyword-do-in-python)
snippets = browser.find_elements_by_tag_name('code')
assert any(['mylist' in snippet.text and 'mygenerator' in snippet.text
for snippet in snippets])
| browser_ = webdriver.Firefox()
def fin():
browser_.quit()
request.addfinalizer(fin)
return browser_ | identifier_body |
kraken_v5.py | import logging
from pajbot.apiwrappers.response_cache import ListSerializer
from pajbot.apiwrappers.twitch.base import BaseTwitchAPI
from pajbot.models.emote import Emote
log = logging.getLogger(__name__)
class TwitchKrakenV5API(BaseTwitchAPI):
authorization_header_prefix = "OAuth"
def __init__(self, client_credentials, redis):
super().__init__(base_url="https://api.twitch.tv/kraken/", redis=redis)
self.session.headers["Accept"] = "application/vnd.twitchtv.v5+json"
self.client_credentials = client_credentials
@property
def default_authorization(self):
return self.client_credentials
def get_stream_status(self, user_id):
data = self.get(["streams", user_id])
def rest_data_offline():
return {
"viewers": -1,
"game": None,
"title": None,
"created_at": None,
"followers": -1,
"views": -1,
"broadcast_id": None,
}
def rest_data_online():
|
online = "stream" in data and data["stream"] is not None
def rest_data():
nonlocal online
if online:
return rest_data_online()
else:
return rest_data_offline()
return {"online": online, **rest_data()}
def set_game(self, user_id, game, authorization):
self.put(["channels", user_id], json={"channel": {"game": game}}, authorization=authorization)
def set_title(self, user_id, title, authorization):
self.put(["channels", user_id], json={"channel": {"status": title}}, authorization=authorization)
def get_vod_videos(self, channel_name):
return self.get(["channels", channel_name, "videos"], {"broadcast_type": "archive"})
def fetch_global_emotes(self):
# circular import prevention
from pajbot.managers.emote import EmoteManager
resp = self.get("/chat/emoticon_images", params={"emotesets": "0"})
return [EmoteManager.twitch_emote(str(data["id"]), data["code"]) for data in resp["emoticon_sets"]["0"]]
def get_global_emotes(self, force_fetch=False):
return self.cache.cache_fetch_fn(
redis_key="api:twitch:kraken:v5:global-emotes",
fetch_fn=lambda: self.fetch_global_emotes(),
serializer=ListSerializer(Emote),
expiry=60 * 60,
force_fetch=force_fetch,
)
| stream = data["stream"]
return {
"viewers": stream["viewers"],
"game": stream["game"],
"title": stream["channel"]["status"],
"created_at": stream["created_at"],
"followers": stream["channel"]["followers"],
"views": stream["channel"]["views"],
"broadcast_id": stream["_id"],
} | identifier_body |
kraken_v5.py | import logging
from pajbot.apiwrappers.response_cache import ListSerializer
from pajbot.apiwrappers.twitch.base import BaseTwitchAPI
from pajbot.models.emote import Emote
log = logging.getLogger(__name__)
class TwitchKrakenV5API(BaseTwitchAPI):
authorization_header_prefix = "OAuth"
def __init__(self, client_credentials, redis):
super().__init__(base_url="https://api.twitch.tv/kraken/", redis=redis)
self.session.headers["Accept"] = "application/vnd.twitchtv.v5+json"
self.client_credentials = client_credentials
@property | def default_authorization(self):
return self.client_credentials
def get_stream_status(self, user_id):
data = self.get(["streams", user_id])
def rest_data_offline():
return {
"viewers": -1,
"game": None,
"title": None,
"created_at": None,
"followers": -1,
"views": -1,
"broadcast_id": None,
}
def rest_data_online():
stream = data["stream"]
return {
"viewers": stream["viewers"],
"game": stream["game"],
"title": stream["channel"]["status"],
"created_at": stream["created_at"],
"followers": stream["channel"]["followers"],
"views": stream["channel"]["views"],
"broadcast_id": stream["_id"],
}
online = "stream" in data and data["stream"] is not None
def rest_data():
nonlocal online
if online:
return rest_data_online()
else:
return rest_data_offline()
return {"online": online, **rest_data()}
def set_game(self, user_id, game, authorization):
self.put(["channels", user_id], json={"channel": {"game": game}}, authorization=authorization)
def set_title(self, user_id, title, authorization):
self.put(["channels", user_id], json={"channel": {"status": title}}, authorization=authorization)
def get_vod_videos(self, channel_name):
return self.get(["channels", channel_name, "videos"], {"broadcast_type": "archive"})
def fetch_global_emotes(self):
# circular import prevention
from pajbot.managers.emote import EmoteManager
resp = self.get("/chat/emoticon_images", params={"emotesets": "0"})
return [EmoteManager.twitch_emote(str(data["id"]), data["code"]) for data in resp["emoticon_sets"]["0"]]
def get_global_emotes(self, force_fetch=False):
return self.cache.cache_fetch_fn(
redis_key="api:twitch:kraken:v5:global-emotes",
fetch_fn=lambda: self.fetch_global_emotes(),
serializer=ListSerializer(Emote),
expiry=60 * 60,
force_fetch=force_fetch,
) | random_line_split |
|
kraken_v5.py | import logging
from pajbot.apiwrappers.response_cache import ListSerializer
from pajbot.apiwrappers.twitch.base import BaseTwitchAPI
from pajbot.models.emote import Emote
log = logging.getLogger(__name__)
class TwitchKrakenV5API(BaseTwitchAPI):
authorization_header_prefix = "OAuth"
def __init__(self, client_credentials, redis):
super().__init__(base_url="https://api.twitch.tv/kraken/", redis=redis)
self.session.headers["Accept"] = "application/vnd.twitchtv.v5+json"
self.client_credentials = client_credentials
@property
def default_authorization(self):
return self.client_credentials
def get_stream_status(self, user_id):
data = self.get(["streams", user_id])
def rest_data_offline():
return {
"viewers": -1,
"game": None,
"title": None,
"created_at": None,
"followers": -1,
"views": -1,
"broadcast_id": None,
}
def rest_data_online():
stream = data["stream"]
return {
"viewers": stream["viewers"],
"game": stream["game"],
"title": stream["channel"]["status"],
"created_at": stream["created_at"],
"followers": stream["channel"]["followers"],
"views": stream["channel"]["views"],
"broadcast_id": stream["_id"],
}
online = "stream" in data and data["stream"] is not None
def rest_data():
nonlocal online
if online:
return rest_data_online()
else:
return rest_data_offline()
return {"online": online, **rest_data()}
def set_game(self, user_id, game, authorization):
self.put(["channels", user_id], json={"channel": {"game": game}}, authorization=authorization)
def set_title(self, user_id, title, authorization):
self.put(["channels", user_id], json={"channel": {"status": title}}, authorization=authorization)
def | (self, channel_name):
return self.get(["channels", channel_name, "videos"], {"broadcast_type": "archive"})
def fetch_global_emotes(self):
# circular import prevention
from pajbot.managers.emote import EmoteManager
resp = self.get("/chat/emoticon_images", params={"emotesets": "0"})
return [EmoteManager.twitch_emote(str(data["id"]), data["code"]) for data in resp["emoticon_sets"]["0"]]
def get_global_emotes(self, force_fetch=False):
return self.cache.cache_fetch_fn(
redis_key="api:twitch:kraken:v5:global-emotes",
fetch_fn=lambda: self.fetch_global_emotes(),
serializer=ListSerializer(Emote),
expiry=60 * 60,
force_fetch=force_fetch,
)
| get_vod_videos | identifier_name |
kraken_v5.py | import logging
from pajbot.apiwrappers.response_cache import ListSerializer
from pajbot.apiwrappers.twitch.base import BaseTwitchAPI
from pajbot.models.emote import Emote
log = logging.getLogger(__name__)
class TwitchKrakenV5API(BaseTwitchAPI):
authorization_header_prefix = "OAuth"
def __init__(self, client_credentials, redis):
super().__init__(base_url="https://api.twitch.tv/kraken/", redis=redis)
self.session.headers["Accept"] = "application/vnd.twitchtv.v5+json"
self.client_credentials = client_credentials
@property
def default_authorization(self):
return self.client_credentials
def get_stream_status(self, user_id):
data = self.get(["streams", user_id])
def rest_data_offline():
return {
"viewers": -1,
"game": None,
"title": None,
"created_at": None,
"followers": -1,
"views": -1,
"broadcast_id": None,
}
def rest_data_online():
stream = data["stream"]
return {
"viewers": stream["viewers"],
"game": stream["game"],
"title": stream["channel"]["status"],
"created_at": stream["created_at"],
"followers": stream["channel"]["followers"],
"views": stream["channel"]["views"],
"broadcast_id": stream["_id"],
}
online = "stream" in data and data["stream"] is not None
def rest_data():
nonlocal online
if online:
return rest_data_online()
else:
|
return {"online": online, **rest_data()}
def set_game(self, user_id, game, authorization):
self.put(["channels", user_id], json={"channel": {"game": game}}, authorization=authorization)
def set_title(self, user_id, title, authorization):
self.put(["channels", user_id], json={"channel": {"status": title}}, authorization=authorization)
def get_vod_videos(self, channel_name):
return self.get(["channels", channel_name, "videos"], {"broadcast_type": "archive"})
def fetch_global_emotes(self):
# circular import prevention
from pajbot.managers.emote import EmoteManager
resp = self.get("/chat/emoticon_images", params={"emotesets": "0"})
return [EmoteManager.twitch_emote(str(data["id"]), data["code"]) for data in resp["emoticon_sets"]["0"]]
def get_global_emotes(self, force_fetch=False):
return self.cache.cache_fetch_fn(
redis_key="api:twitch:kraken:v5:global-emotes",
fetch_fn=lambda: self.fetch_global_emotes(),
serializer=ListSerializer(Emote),
expiry=60 * 60,
force_fetch=force_fetch,
)
| return rest_data_offline() | conditional_block |
mqueue.rs | use {Error, Result, from_ffi};
use errno::Errno;
use libc::{c_int, c_long, c_char, size_t, mode_t, strlen};
use std::ffi::CString;
use sys::stat::Mode;
pub use self::consts::*;
pub type MQd = c_int;
#[cfg(target_os = "linux")]
mod consts {
use libc::c_int;
bitflags!(
flags MQ_OFlag: c_int {
const O_RDONLY = 0o00000000,
const O_WRONLY = 0o00000001,
const O_RDWR = 0o00000002,
const O_CREAT = 0o00000100,
const O_EXCL = 0o00000200,
const O_NONBLOCK = 0o00004000,
const O_CLOEXEC = 0o02000000,
}
);
bitflags!(
flags FdFlag: c_int {
const FD_CLOEXEC = 1
}
);
}
mod ffi {
use libc::{c_char, size_t, ssize_t, c_uint, c_int};
use super::MQd;
use super::MqAttr;
extern "C" {
pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> MQd;
pub fn mq_close (mqd: MQd) -> c_int;
pub fn mq_receive (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: *const c_uint) -> ssize_t;
pub fn mq_send (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: c_uint) -> c_int;
pub fn mq_getattr(mqd: MQd, attr: *mut MqAttr) -> c_int;
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MqAttr {
pub mq_flags: c_long,
pub mq_maxmsg: c_long,
pub mq_msgsize: c_long,
pub mq_curmsgs: c_long,
pad: [c_long; 4]
}
impl MqAttr {
pub fn new(mq_flags: c_long, mq_maxmsg: c_long, mq_msgsize: c_long, mq_curmsgs: c_long) -> MqAttr {
MqAttr { mq_flags: mq_flags, mq_maxmsg: mq_maxmsg, mq_msgsize: mq_msgsize, mq_curmsgs: mq_curmsgs, pad: [0; 4] }
}
}
#[inline]
pub fn mq_open(name: &CString, oflag: MQ_OFlag, mode: Mode, attr: &MqAttr) -> Result<MQd> {
let res = unsafe { ffi::mq_open(name.as_ptr(), oflag.bits(), mode.bits() as mode_t, attr as *const MqAttr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res)
}
pub fn mq_close(mqdes: MQd) -> Result<()> {
let res = unsafe { ffi::mq_close(mqdes) };
from_ffi(res)
}
pub fn mq_receive(mqdes: MQd, message: &mut [u8], msq_prio: u32) -> Result<usize> {
let len = message.len() as size_t;
let res = unsafe { ffi::mq_receive(mqdes, message.as_mut_ptr() as *mut c_char, len, &msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn mq_send(mqdes: MQd, message: &CString, msq_prio: u32) -> Result<usize> |
pub fn mq_getattr(mqd: MQd) -> Result<MqAttr> {
let mut attr = MqAttr::new(0, 0, 0, 0);
let res = unsafe { ffi::mq_getattr(mqd, &mut attr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(attr)
}
| {
let len = unsafe { strlen(message.as_ptr()) as size_t };
let res = unsafe { ffi::mq_send(mqdes, message.as_ptr(), len, msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
} | identifier_body |
mqueue.rs | use {Error, Result, from_ffi};
use errno::Errno;
use libc::{c_int, c_long, c_char, size_t, mode_t, strlen};
use std::ffi::CString;
use sys::stat::Mode;
pub use self::consts::*;
pub type MQd = c_int;
#[cfg(target_os = "linux")]
mod consts {
use libc::c_int;
bitflags!(
flags MQ_OFlag: c_int {
const O_RDONLY = 0o00000000,
const O_WRONLY = 0o00000001,
const O_RDWR = 0o00000002,
const O_CREAT = 0o00000100,
const O_EXCL = 0o00000200,
const O_NONBLOCK = 0o00004000,
const O_CLOEXEC = 0o02000000,
}
);
bitflags!(
flags FdFlag: c_int {
const FD_CLOEXEC = 1
}
);
}
mod ffi {
use libc::{c_char, size_t, ssize_t, c_uint, c_int};
use super::MQd;
use super::MqAttr;
extern "C" {
pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> MQd;
pub fn mq_close (mqd: MQd) -> c_int;
pub fn mq_receive (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: *const c_uint) -> ssize_t;
pub fn mq_send (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: c_uint) -> c_int;
pub fn mq_getattr(mqd: MQd, attr: *mut MqAttr) -> c_int;
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MqAttr {
pub mq_flags: c_long,
pub mq_maxmsg: c_long,
pub mq_msgsize: c_long,
pub mq_curmsgs: c_long,
pad: [c_long; 4]
}
impl MqAttr {
pub fn new(mq_flags: c_long, mq_maxmsg: c_long, mq_msgsize: c_long, mq_curmsgs: c_long) -> MqAttr {
MqAttr { mq_flags: mq_flags, mq_maxmsg: mq_maxmsg, mq_msgsize: mq_msgsize, mq_curmsgs: mq_curmsgs, pad: [0; 4] }
}
}
#[inline]
pub fn mq_open(name: &CString, oflag: MQ_OFlag, mode: Mode, attr: &MqAttr) -> Result<MQd> {
let res = unsafe { ffi::mq_open(name.as_ptr(), oflag.bits(), mode.bits() as mode_t, attr as *const MqAttr) };
if res < 0 { | }
Ok(res)
}
pub fn mq_close(mqdes: MQd) -> Result<()> {
let res = unsafe { ffi::mq_close(mqdes) };
from_ffi(res)
}
pub fn mq_receive(mqdes: MQd, message: &mut [u8], msq_prio: u32) -> Result<usize> {
let len = message.len() as size_t;
let res = unsafe { ffi::mq_receive(mqdes, message.as_mut_ptr() as *mut c_char, len, &msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn mq_send(mqdes: MQd, message: &CString, msq_prio: u32) -> Result<usize> {
let len = unsafe { strlen(message.as_ptr()) as size_t };
let res = unsafe { ffi::mq_send(mqdes, message.as_ptr(), len, msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn mq_getattr(mqd: MQd) -> Result<MqAttr> {
let mut attr = MqAttr::new(0, 0, 0, 0);
let res = unsafe { ffi::mq_getattr(mqd, &mut attr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(attr)
} | return Err(Error::Sys(Errno::last())); | random_line_split |
mqueue.rs | use {Error, Result, from_ffi};
use errno::Errno;
use libc::{c_int, c_long, c_char, size_t, mode_t, strlen};
use std::ffi::CString;
use sys::stat::Mode;
pub use self::consts::*;
pub type MQd = c_int;
#[cfg(target_os = "linux")]
mod consts {
use libc::c_int;
bitflags!(
flags MQ_OFlag: c_int {
const O_RDONLY = 0o00000000,
const O_WRONLY = 0o00000001,
const O_RDWR = 0o00000002,
const O_CREAT = 0o00000100,
const O_EXCL = 0o00000200,
const O_NONBLOCK = 0o00004000,
const O_CLOEXEC = 0o02000000,
}
);
bitflags!(
flags FdFlag: c_int {
const FD_CLOEXEC = 1
}
);
}
mod ffi {
use libc::{c_char, size_t, ssize_t, c_uint, c_int};
use super::MQd;
use super::MqAttr;
extern "C" {
pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> MQd;
pub fn mq_close (mqd: MQd) -> c_int;
pub fn mq_receive (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: *const c_uint) -> ssize_t;
pub fn mq_send (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: c_uint) -> c_int;
pub fn mq_getattr(mqd: MQd, attr: *mut MqAttr) -> c_int;
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MqAttr {
pub mq_flags: c_long,
pub mq_maxmsg: c_long,
pub mq_msgsize: c_long,
pub mq_curmsgs: c_long,
pad: [c_long; 4]
}
impl MqAttr {
pub fn new(mq_flags: c_long, mq_maxmsg: c_long, mq_msgsize: c_long, mq_curmsgs: c_long) -> MqAttr {
MqAttr { mq_flags: mq_flags, mq_maxmsg: mq_maxmsg, mq_msgsize: mq_msgsize, mq_curmsgs: mq_curmsgs, pad: [0; 4] }
}
}
#[inline]
pub fn mq_open(name: &CString, oflag: MQ_OFlag, mode: Mode, attr: &MqAttr) -> Result<MQd> {
let res = unsafe { ffi::mq_open(name.as_ptr(), oflag.bits(), mode.bits() as mode_t, attr as *const MqAttr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res)
}
pub fn mq_close(mqdes: MQd) -> Result<()> {
let res = unsafe { ffi::mq_close(mqdes) };
from_ffi(res)
}
pub fn mq_receive(mqdes: MQd, message: &mut [u8], msq_prio: u32) -> Result<usize> {
let len = message.len() as size_t;
let res = unsafe { ffi::mq_receive(mqdes, message.as_mut_ptr() as *mut c_char, len, &msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn | (mqdes: MQd, message: &CString, msq_prio: u32) -> Result<usize> {
let len = unsafe { strlen(message.as_ptr()) as size_t };
let res = unsafe { ffi::mq_send(mqdes, message.as_ptr(), len, msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn mq_getattr(mqd: MQd) -> Result<MqAttr> {
let mut attr = MqAttr::new(0, 0, 0, 0);
let res = unsafe { ffi::mq_getattr(mqd, &mut attr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(attr)
}
| mq_send | identifier_name |
mqueue.rs | use {Error, Result, from_ffi};
use errno::Errno;
use libc::{c_int, c_long, c_char, size_t, mode_t, strlen};
use std::ffi::CString;
use sys::stat::Mode;
pub use self::consts::*;
pub type MQd = c_int;
#[cfg(target_os = "linux")]
mod consts {
use libc::c_int;
bitflags!(
flags MQ_OFlag: c_int {
const O_RDONLY = 0o00000000,
const O_WRONLY = 0o00000001,
const O_RDWR = 0o00000002,
const O_CREAT = 0o00000100,
const O_EXCL = 0o00000200,
const O_NONBLOCK = 0o00004000,
const O_CLOEXEC = 0o02000000,
}
);
bitflags!(
flags FdFlag: c_int {
const FD_CLOEXEC = 1
}
);
}
mod ffi {
use libc::{c_char, size_t, ssize_t, c_uint, c_int};
use super::MQd;
use super::MqAttr;
extern "C" {
pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> MQd;
pub fn mq_close (mqd: MQd) -> c_int;
pub fn mq_receive (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: *const c_uint) -> ssize_t;
pub fn mq_send (mqd: MQd, msg_ptr: *const c_char, msg_len: size_t, msq_prio: c_uint) -> c_int;
pub fn mq_getattr(mqd: MQd, attr: *mut MqAttr) -> c_int;
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MqAttr {
pub mq_flags: c_long,
pub mq_maxmsg: c_long,
pub mq_msgsize: c_long,
pub mq_curmsgs: c_long,
pad: [c_long; 4]
}
impl MqAttr {
pub fn new(mq_flags: c_long, mq_maxmsg: c_long, mq_msgsize: c_long, mq_curmsgs: c_long) -> MqAttr {
MqAttr { mq_flags: mq_flags, mq_maxmsg: mq_maxmsg, mq_msgsize: mq_msgsize, mq_curmsgs: mq_curmsgs, pad: [0; 4] }
}
}
#[inline]
pub fn mq_open(name: &CString, oflag: MQ_OFlag, mode: Mode, attr: &MqAttr) -> Result<MQd> {
let res = unsafe { ffi::mq_open(name.as_ptr(), oflag.bits(), mode.bits() as mode_t, attr as *const MqAttr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res)
}
pub fn mq_close(mqdes: MQd) -> Result<()> {
let res = unsafe { ffi::mq_close(mqdes) };
from_ffi(res)
}
pub fn mq_receive(mqdes: MQd, message: &mut [u8], msq_prio: u32) -> Result<usize> {
let len = message.len() as size_t;
let res = unsafe { ffi::mq_receive(mqdes, message.as_mut_ptr() as *mut c_char, len, &msq_prio) };
if res < 0 |
Ok(res as usize)
}
pub fn mq_send(mqdes: MQd, message: &CString, msq_prio: u32) -> Result<usize> {
let len = unsafe { strlen(message.as_ptr()) as size_t };
let res = unsafe { ffi::mq_send(mqdes, message.as_ptr(), len, msq_prio) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(res as usize)
}
pub fn mq_getattr(mqd: MQd) -> Result<MqAttr> {
let mut attr = MqAttr::new(0, 0, 0, 0);
let res = unsafe { ffi::mq_getattr(mqd, &mut attr) };
if res < 0 {
return Err(Error::Sys(Errno::last()));
}
Ok(attr)
}
| {
return Err(Error::Sys(Errno::last()));
} | conditional_block |
19180cf98af6_nsx_gw_devices.py | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'),
mysql_engine='InnoDB')
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id) SELECT "
"id, id as nsx_id FROM networkgatewaydevicereferences")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
|
op.drop_table('networkgatewaydevices')
# Re-create previous version of networkgatewaydevices table
op.create_table(
'networkgatewaydevices',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Copy from networkgatewaydevicereferences to networkgatewaydevices
op.execute("INSERT INTO networkgatewaydevices SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevicereferences")
# Dropt networkgatewaydevicereferences
op.drop_table('networkgatewaydevicereferences')
| return | conditional_block |
19180cf98af6_nsx_gw_devices.py | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def | (active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'),
mysql_engine='InnoDB')
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id) SELECT "
"id, id as nsx_id FROM networkgatewaydevicereferences")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('networkgatewaydevices')
# Re-create previous version of networkgatewaydevices table
op.create_table(
'networkgatewaydevices',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Copy from networkgatewaydevicereferences to networkgatewaydevices
op.execute("INSERT INTO networkgatewaydevices SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevicereferences")
# Dropt networkgatewaydevicereferences
op.drop_table('networkgatewaydevicereferences')
| upgrade | identifier_name |
19180cf98af6_nsx_gw_devices.py | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'), | "id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id) SELECT "
"id, id as nsx_id FROM networkgatewaydevicereferences")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('networkgatewaydevices')
# Re-create previous version of networkgatewaydevices table
op.create_table(
'networkgatewaydevices',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Copy from networkgatewaydevicereferences to networkgatewaydevices
op.execute("INSERT INTO networkgatewaydevices SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevicereferences")
# Dropt networkgatewaydevicereferences
op.drop_table('networkgatewaydevicereferences') | sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'),
mysql_engine='InnoDB')
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT " | random_line_split |
19180cf98af6_nsx_gw_devices.py | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
| sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id) SELECT "
"id, id as nsx_id FROM networkgatewaydevicereferences")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('networkgatewaydevices')
# Re-create previous version of networkgatewaydevices table
op.create_table(
'networkgatewaydevices',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Copy from networkgatewaydevicereferences to networkgatewaydevices
op.execute("INSERT INTO networkgatewaydevices SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevicereferences")
# Dropt networkgatewaydevicereferences
op.drop_table('networkgatewaydevicereferences')
| if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'),
mysql_engine='InnoDB')
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices', | identifier_body |
log_entry.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details. | // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Log entry type definition.
use std::ops::Deref;
use util::{H256, Address, Bytes, HeapSizeOf, Hashable};
use util::bloom::Bloomable;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use ethjson;
/// A record of execution for a `LOG` operation.
#[derive(Default, Debug, Clone, PartialEq, Eq, Binary)]
pub struct LogEntry {
/// The address of the contract executing at the point of the `LOG` operation.
pub address: Address,
/// The topics associated with the `LOG` operation.
pub topics: Vec<H256>,
/// The data associated with the `LOG` operation.
pub data: Bytes,
}
impl Encodable for LogEntry {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(3);
s.append(&self.address);
s.append(&self.topics);
s.append(&self.data);
}
}
impl Decodable for LogEntry {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let entry = LogEntry {
address: try!(d.val_at(0)),
topics: try!(d.val_at(1)),
data: try!(d.val_at(2)),
};
Ok(entry)
}
}
impl HeapSizeOf for LogEntry {
fn heap_size_of_children(&self) -> usize {
self.topics.heap_size_of_children() + self.data.heap_size_of_children()
}
}
impl LogEntry {
/// Calculates the bloom of this log entry.
pub fn bloom(&self) -> LogBloom {
self.topics.iter().fold(LogBloom::from_bloomed(&self.address.sha3()), |b, t| b.with_bloomed(&t.sha3()))
}
}
impl From<ethjson::state::Log> for LogEntry {
fn from(l: ethjson::state::Log) -> Self {
LogEntry {
address: l.address.into(),
topics: l.topics.into_iter().map(Into::into).collect(),
data: l.data.into(),
}
}
}
/// Log localized in a blockchain.
#[derive(Default, Debug, PartialEq, Clone, Binary)]
pub struct LocalizedLogEntry {
/// Plain log entry.
pub entry: LogEntry,
/// Block in which this log was created.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// Hash of transaction in which this log was created.
pub transaction_hash: H256,
/// Index of transaction within block.
pub transaction_index: usize,
/// Log position in the block.
pub log_index: usize,
}
impl Deref for LocalizedLogEntry {
type Target = LogEntry;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::LogEntry;
#[test]
fn test_empty_log_bloom() {
let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let log = LogEntry {
address: address,
topics: vec![],
data: vec![]
};
assert_eq!(log.bloom(), bloom);
}
} | random_line_split |
|
log_entry.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Log entry type definition.
use std::ops::Deref;
use util::{H256, Address, Bytes, HeapSizeOf, Hashable};
use util::bloom::Bloomable;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use ethjson;
/// A record of execution for a `LOG` operation.
#[derive(Default, Debug, Clone, PartialEq, Eq, Binary)]
pub struct LogEntry {
/// The address of the contract executing at the point of the `LOG` operation.
pub address: Address,
/// The topics associated with the `LOG` operation.
pub topics: Vec<H256>,
/// The data associated with the `LOG` operation.
pub data: Bytes,
}
impl Encodable for LogEntry {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(3);
s.append(&self.address);
s.append(&self.topics);
s.append(&self.data);
}
}
impl Decodable for LogEntry {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let entry = LogEntry {
address: try!(d.val_at(0)),
topics: try!(d.val_at(1)),
data: try!(d.val_at(2)),
};
Ok(entry)
}
}
impl HeapSizeOf for LogEntry {
fn heap_size_of_children(&self) -> usize {
self.topics.heap_size_of_children() + self.data.heap_size_of_children()
}
}
impl LogEntry {
/// Calculates the bloom of this log entry.
pub fn bloom(&self) -> LogBloom {
self.topics.iter().fold(LogBloom::from_bloomed(&self.address.sha3()), |b, t| b.with_bloomed(&t.sha3()))
}
}
impl From<ethjson::state::Log> for LogEntry {
fn from(l: ethjson::state::Log) -> Self {
LogEntry {
address: l.address.into(),
topics: l.topics.into_iter().map(Into::into).collect(),
data: l.data.into(),
}
}
}
/// Log localized in a blockchain.
#[derive(Default, Debug, PartialEq, Clone, Binary)]
pub struct LocalizedLogEntry {
/// Plain log entry.
pub entry: LogEntry,
/// Block in which this log was created.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// Hash of transaction in which this log was created.
pub transaction_hash: H256,
/// Index of transaction within block.
pub transaction_index: usize,
/// Log position in the block.
pub log_index: usize,
}
impl Deref for LocalizedLogEntry {
type Target = LogEntry;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::LogEntry;
#[test]
fn | () {
let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let log = LogEntry {
address: address,
topics: vec![],
data: vec![]
};
assert_eq!(log.bloom(), bloom);
}
}
| test_empty_log_bloom | identifier_name |
log_entry.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Log entry type definition.
use std::ops::Deref;
use util::{H256, Address, Bytes, HeapSizeOf, Hashable};
use util::bloom::Bloomable;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use ethjson;
/// A record of execution for a `LOG` operation.
#[derive(Default, Debug, Clone, PartialEq, Eq, Binary)]
pub struct LogEntry {
/// The address of the contract executing at the point of the `LOG` operation.
pub address: Address,
/// The topics associated with the `LOG` operation.
pub topics: Vec<H256>,
/// The data associated with the `LOG` operation.
pub data: Bytes,
}
impl Encodable for LogEntry {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(3);
s.append(&self.address);
s.append(&self.topics);
s.append(&self.data);
}
}
impl Decodable for LogEntry {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let entry = LogEntry {
address: try!(d.val_at(0)),
topics: try!(d.val_at(1)),
data: try!(d.val_at(2)),
};
Ok(entry)
}
}
impl HeapSizeOf for LogEntry {
fn heap_size_of_children(&self) -> usize {
self.topics.heap_size_of_children() + self.data.heap_size_of_children()
}
}
impl LogEntry {
/// Calculates the bloom of this log entry.
pub fn bloom(&self) -> LogBloom {
self.topics.iter().fold(LogBloom::from_bloomed(&self.address.sha3()), |b, t| b.with_bloomed(&t.sha3()))
}
}
impl From<ethjson::state::Log> for LogEntry {
fn from(l: ethjson::state::Log) -> Self {
LogEntry {
address: l.address.into(),
topics: l.topics.into_iter().map(Into::into).collect(),
data: l.data.into(),
}
}
}
/// Log localized in a blockchain.
#[derive(Default, Debug, PartialEq, Clone, Binary)]
pub struct LocalizedLogEntry {
/// Plain log entry.
pub entry: LogEntry,
/// Block in which this log was created.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// Hash of transaction in which this log was created.
pub transaction_hash: H256,
/// Index of transaction within block.
pub transaction_index: usize,
/// Log position in the block.
pub log_index: usize,
}
impl Deref for LocalizedLogEntry {
type Target = LogEntry;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::LogEntry;
#[test]
fn test_empty_log_bloom() |
}
| {
let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let log = LogEntry {
address: address,
topics: vec![],
data: vec![]
};
assert_eq!(log.bloom(), bloom);
} | identifier_body |
dhash.py | __author__ = 'anicca'
# core
import math
import sys
from itertools import izip
# 3rd party
from PIL import Image, ImageChops
import argh
def dhash(image, hash_size=8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2 ** (index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def rosetta(image1, image2):
|
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
im1 = Image.open(im1)
im2 = Image.open(im2)
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def main(image_filename1, image_filename2, dhash=False, rosetta=False, rmsdiff=False):
pass
if __name__ == '__main__':
argh.dispatch_command(main)
| i1 = Image.open(image1)
i2 = Image.open(image2)
assert i1.mode == i2.mode, "Different kinds of images."
print i1.size, i2.size
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1 - p2) for p1, p2 in pairs)
else:
dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))
ncomponents = i1.size[0] * i1.size[1] * 3
retval = (dif / 255.0 * 100) / ncomponents
return retval | identifier_body |
dhash.py | __author__ = 'anicca'
# core
import math
import sys
from itertools import izip
# 3rd party
from PIL import Image, ImageChops
import argh
def dhash(image, hash_size=8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2 ** (index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def rosetta(image1, image2):
i1 = Image.open(image1)
i2 = Image.open(image2)
assert i1.mode == i2.mode, "Different kinds of images."
print i1.size, i2.size
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
|
else:
dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))
ncomponents = i1.size[0] * i1.size[1] * 3
retval = (dif / 255.0 * 100) / ncomponents
return retval
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
im1 = Image.open(im1)
im2 = Image.open(im2)
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def main(image_filename1, image_filename2, dhash=False, rosetta=False, rmsdiff=False):
pass
if __name__ == '__main__':
argh.dispatch_command(main)
| dif = sum(abs(p1 - p2) for p1, p2 in pairs) | conditional_block |
dhash.py | __author__ = 'anicca'
# core
import math
import sys
from itertools import izip
# 3rd party
from PIL import Image, ImageChops
import argh
def dhash(image, hash_size=8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2 ** (index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def rosetta(image1, image2):
i1 = Image.open(image1)
i2 = Image.open(image2)
assert i1.mode == i2.mode, "Different kinds of images."
print i1.size, i2.size
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1 - p2) for p1, p2 in pairs)
else: | return retval
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
im1 = Image.open(im1)
im2 = Image.open(im2)
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def main(image_filename1, image_filename2, dhash=False, rosetta=False, rmsdiff=False):
pass
if __name__ == '__main__':
argh.dispatch_command(main) | dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))
ncomponents = i1.size[0] * i1.size[1] * 3
retval = (dif / 255.0 * 100) / ncomponents | random_line_split |
dhash.py | __author__ = 'anicca'
# core
import math
import sys
from itertools import izip
# 3rd party
from PIL import Image, ImageChops
import argh
def dhash(image, hash_size=8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2 ** (index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def rosetta(image1, image2):
i1 = Image.open(image1)
i2 = Image.open(image2)
assert i1.mode == i2.mode, "Different kinds of images."
print i1.size, i2.size
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1 - p2) for p1, p2 in pairs)
else:
dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))
ncomponents = i1.size[0] * i1.size[1] * 3
retval = (dif / 255.0 * 100) / ncomponents
return retval
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
im1 = Image.open(im1)
im2 = Image.open(im2)
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def | (image_filename1, image_filename2, dhash=False, rosetta=False, rmsdiff=False):
pass
if __name__ == '__main__':
argh.dispatch_command(main)
| main | identifier_name |
grants.py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
|
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
| hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants}) | identifier_body |
grants.py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org' | super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants | start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs): | random_line_split |
grants.py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
|
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
| grants[normalized_field] = values.xpath('a/text()').extract()[0] | conditional_block |
grants.py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def | (self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
| parse | identifier_name |
preprocess.py | #!/usr/bin/python
import re, csv, sys
from urlparse import urlparse
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.text import TextCollection
#process command line arguments
if len(sys.argv) < 2:
print "ERROR: arg1: must specify the input file"
print " arg2: specify -t to generate test ARFF"
sys.exit(1)
test = False
if len(sys.argv) > 2:
test = (sys.argv[2] == '-t')
# initialize some variables
stoplist = stopwords.words('english')
stoplist.extend(['.', ',', ':', '?', '!' ';', '"', "'", '-', '--', '(', ')', '/', '\\',
'[', ']', '{', '}', '|', '+', '*', '^'])
emots_pos = [':)', ':D', ':-)', ':-D', '=)', '=D', ':]', ':-]', '=]', 'X)', 'XD', 'X]',
'X-)', 'X-D', 'X-]', 'C:', ';)', ';D', ';]', ';-)', ';-D', ';-]', '<3',
':P', ':-P', '=P', 'XP', 'X-P', ':o)', ':3', ':>', '8)', ':^)', '8-D', '8D',
'=3', 'B^D', '\\o/', '<:', '(:', '(-:', '(=', '[:', '[-:', '[=', '(X', '[X',
'(-X', '[-X', ':\')', ':\'-)', ':\']', ':\'-]', '=\')', '=\']', ';^)',
'>:P', ':-b', ':b']
emots_pos = [emot.lower() for emot in emots_pos]
emots_neg = [':(', ':[', ':-(', ':-[', 'D:', '=(', '=[', 'D=', 'DX', ':C', '</3',
'>:[', ':-c', ':-<', ':<', '>:', ':{', ':\'-(', ':\'(', ':\'[', '=\'(',
'=\'[', 'D;', 'D\':', 'D:<', 'D8', 'D-\':', '):', ']:', ')-:', ']-:',
')=', ']=', ']:<', '>-:']
emots_neg = [emot.lower() for emot in emots_neg]
gaz_pos = []
gaz_neg = []
tweets = []
sentiments = []
emots_count = []
punct_count = []
gaz_count = []
words = [] #will contain all non-stop words that occur >1 times
words1 = [] #will contain all non-stop words that occur 1 time
# generate the gazetteers
gaz_file = open('positive-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_pos.append(line)
gaz_file.close()
gaz_file = open('negative-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_neg.append(line)
gaz_file.close()
# print some information
print 'Number of positive emoticons: ' + str(len(emots_pos))
print 'Number of negative emoticons: ' + str(len(emots_neg))
print '\nNumber of positive gazetteer words: ' + str(len(gaz_pos))
print 'Number of negative gazetteer words: ' + str(len(gaz_neg))
# extract all tweets and words (IN TRAINING)
words_file = []
if not test:
words_file = open('words-list.txt', 'w') # COMMENT OUT FOR TESTING
tweet_file = open(sys.argv[1], 'rb')
reader = csv.reader(tweet_file, delimiter=',', quotechar='"', escapechar='\\', quoting=csv.QUOTE_ALL)
for line in reader:
# save tweet data
tweet = line[4].lower()
sent = line[1]
# REMOVE THIS SECTION FOR TESTING
if not test:
if sent == 'positive':
sent = 'POS'
elif sent == 'negative':
sent = 'NEG'
else:
sent = 'OTHER'
sentiments.append(sent)
# standardize URLs
w = tweet.split()
for i in range(len(w)):
r = urlparse(w[i])
if r[0] != '' and r[1] != '':
w[i] = 'URL'
tweet = ' '.join(w)
tweets.append(tweet)
# count emoticons
count_pos = 0
for emot in emots_pos:
count_pos += tweet.count(emot)
count_neg = 0
for emot in emots_neg:
count_neg += tweet.count(emot)
emots_count.append( (count_pos, count_neg) )
# count punctuation
punct_count.append( (tweet.count('?'), tweet.count('!')) )
| count_pos = 0
for gw in gaz_pos:
count_pos += tweet.count(gw)
count_neg = 0
for gw in gaz_neg:
count_neg += tweet.count(gw)
gaz_count.append( (count_pos, count_neg) )
# USE THIS SECTION FOR TRAINING
# extract only words used >1 times, and ignore stopwords
if not test :
tweet_sents = sent_tokenize(tweet)
for sent in tweet_sents:
sw = word_tokenize(sent)
for word in sw:
if word not in stoplist:
if word not in words:
if word in words1:
words.append(word)
words_file.write(word + '\n')
else:
words1.append(word)
tweet_file.close()
if not test:
words_file.close() # COMMENT OUT FOR TESTING
# USE THIS SECTION FOR TESTING
# extract all words (IN TESTING)
if test:
wfile = open('words-list.txt', 'r')
for line in wfile:
words.append(line.strip())
wfile.close()
# print some more information
print '\nNumber of tweets: ' + str(len(tweets))
print 'Number of words occuring >1 time: ' + str(len(words))
print 'Number of words occuring 1 time: ' + str(len(words1))
# create .arff file for Weka
texts = TextCollection(tweets)
arff = open('tweets_sentiment.arff', "w")
wc = 0
# header
arff.write("@relation sentiment_analysis\n\n")
arff.write("@attribute numPosEmots numeric\n")
arff.write("@attribute numNegEmots numeric\n")
arff.write("@attribute numQuest numeric\n")
arff.write("@attribute numExclam numeric\n")
arff.write("@attribute numPosGaz numeric\n")
arff.write("@attribute numNegGaz numeric\n")
for word in words:
arff.write("@attribute word_")
sub_w = re.subn('[^a-zA-Z]', 'X', word)
arff.write(sub_w[0])
if sub_w[1] > 0:
arff.write('_' + str(wc))
wc += 1
arff.write(" numeric\n")
arff.write("@attribute class {POS, NEG, OTHER}\n\n")
arff.write("@data\n")
# data
for i in xrange(len(tweets)):
arff.write(str(emots_count[i][0]) + ',' + str(emots_count[i][1]) + ',')
arff.write(str(punct_count[i][0]) + ',' + str(punct_count[i][1]) + ',')
arff.write(str(gaz_count[i][0]) + ',' + str(gaz_count[i][1]) + ',')
for j in xrange(len(words)): #loop through unigrams
arff.write(str(texts.tf_idf(words[j], tweets[i])) + ',')
arff.write(sentiments[i] + '\n')
arff.close()
print '\nFinished pre-processing! The ARFF file for Weka has been created.' | # count gazetteer words
| random_line_split |
preprocess.py | #!/usr/bin/python
import re, csv, sys
from urlparse import urlparse
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.text import TextCollection
#process command line arguments
if len(sys.argv) < 2:
print "ERROR: arg1: must specify the input file"
print " arg2: specify -t to generate test ARFF"
sys.exit(1)
test = False
if len(sys.argv) > 2:
test = (sys.argv[2] == '-t')
# initialize some variables
stoplist = stopwords.words('english')
stoplist.extend(['.', ',', ':', '?', '!' ';', '"', "'", '-', '--', '(', ')', '/', '\\',
'[', ']', '{', '}', '|', '+', '*', '^'])
emots_pos = [':)', ':D', ':-)', ':-D', '=)', '=D', ':]', ':-]', '=]', 'X)', 'XD', 'X]',
'X-)', 'X-D', 'X-]', 'C:', ';)', ';D', ';]', ';-)', ';-D', ';-]', '<3',
':P', ':-P', '=P', 'XP', 'X-P', ':o)', ':3', ':>', '8)', ':^)', '8-D', '8D',
'=3', 'B^D', '\\o/', '<:', '(:', '(-:', '(=', '[:', '[-:', '[=', '(X', '[X',
'(-X', '[-X', ':\')', ':\'-)', ':\']', ':\'-]', '=\')', '=\']', ';^)',
'>:P', ':-b', ':b']
emots_pos = [emot.lower() for emot in emots_pos]
emots_neg = [':(', ':[', ':-(', ':-[', 'D:', '=(', '=[', 'D=', 'DX', ':C', '</3',
'>:[', ':-c', ':-<', ':<', '>:', ':{', ':\'-(', ':\'(', ':\'[', '=\'(',
'=\'[', 'D;', 'D\':', 'D:<', 'D8', 'D-\':', '):', ']:', ')-:', ']-:',
')=', ']=', ']:<', '>-:']
emots_neg = [emot.lower() for emot in emots_neg]
gaz_pos = []
gaz_neg = []
tweets = []
sentiments = []
emots_count = []
punct_count = []
gaz_count = []
words = [] #will contain all non-stop words that occur >1 times
words1 = [] #will contain all non-stop words that occur 1 time
# generate the gazetteers
gaz_file = open('positive-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_pos.append(line)
gaz_file.close()
gaz_file = open('negative-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_neg.append(line)
gaz_file.close()
# print some information
print 'Number of positive emoticons: ' + str(len(emots_pos))
print 'Number of negative emoticons: ' + str(len(emots_neg))
print '\nNumber of positive gazetteer words: ' + str(len(gaz_pos))
print 'Number of negative gazetteer words: ' + str(len(gaz_neg))
# extract all tweets and words (IN TRAINING)
words_file = []
if not test:
words_file = open('words-list.txt', 'w') # COMMENT OUT FOR TESTING
tweet_file = open(sys.argv[1], 'rb')
reader = csv.reader(tweet_file, delimiter=',', quotechar='"', escapechar='\\', quoting=csv.QUOTE_ALL)
for line in reader:
# save tweet data
tweet = line[4].lower()
sent = line[1]
# REMOVE THIS SECTION FOR TESTING
if not test:
if sent == 'positive':
|
elif sent == 'negative':
sent = 'NEG'
else:
sent = 'OTHER'
sentiments.append(sent)
# standardize URLs
w = tweet.split()
for i in range(len(w)):
r = urlparse(w[i])
if r[0] != '' and r[1] != '':
w[i] = 'URL'
tweet = ' '.join(w)
tweets.append(tweet)
# count emoticons
count_pos = 0
for emot in emots_pos:
count_pos += tweet.count(emot)
count_neg = 0
for emot in emots_neg:
count_neg += tweet.count(emot)
emots_count.append( (count_pos, count_neg) )
# count punctuation
punct_count.append( (tweet.count('?'), tweet.count('!')) )
# count gazetteer words
count_pos = 0
for gw in gaz_pos:
count_pos += tweet.count(gw)
count_neg = 0
for gw in gaz_neg:
count_neg += tweet.count(gw)
gaz_count.append( (count_pos, count_neg) )
# USE THIS SECTION FOR TRAINING
# extract only words used >1 times, and ignore stopwords
if not test :
tweet_sents = sent_tokenize(tweet)
for sent in tweet_sents:
sw = word_tokenize(sent)
for word in sw:
if word not in stoplist:
if word not in words:
if word in words1:
words.append(word)
words_file.write(word + '\n')
else:
words1.append(word)
tweet_file.close()
if not test:
words_file.close() # COMMENT OUT FOR TESTING
# USE THIS SECTION FOR TESTING
# extract all words (IN TESTING)
if test:
wfile = open('words-list.txt', 'r')
for line in wfile:
words.append(line.strip())
wfile.close()
# print some more information
print '\nNumber of tweets: ' + str(len(tweets))
print 'Number of words occuring >1 time: ' + str(len(words))
print 'Number of words occuring 1 time: ' + str(len(words1))
# create .arff file for Weka
texts = TextCollection(tweets)
arff = open('tweets_sentiment.arff', "w")
wc = 0
# header
arff.write("@relation sentiment_analysis\n\n")
arff.write("@attribute numPosEmots numeric\n")
arff.write("@attribute numNegEmots numeric\n")
arff.write("@attribute numQuest numeric\n")
arff.write("@attribute numExclam numeric\n")
arff.write("@attribute numPosGaz numeric\n")
arff.write("@attribute numNegGaz numeric\n")
for word in words:
arff.write("@attribute word_")
sub_w = re.subn('[^a-zA-Z]', 'X', word)
arff.write(sub_w[0])
if sub_w[1] > 0:
arff.write('_' + str(wc))
wc += 1
arff.write(" numeric\n")
arff.write("@attribute class {POS, NEG, OTHER}\n\n")
arff.write("@data\n")
# data
for i in xrange(len(tweets)):
arff.write(str(emots_count[i][0]) + ',' + str(emots_count[i][1]) + ',')
arff.write(str(punct_count[i][0]) + ',' + str(punct_count[i][1]) + ',')
arff.write(str(gaz_count[i][0]) + ',' + str(gaz_count[i][1]) + ',')
for j in xrange(len(words)): #loop through unigrams
arff.write(str(texts.tf_idf(words[j], tweets[i])) + ',')
arff.write(sentiments[i] + '\n')
arff.close()
print '\nFinished pre-processing! The ARFF file for Weka has been created.' | sent = 'POS' | conditional_block |
common.js | /* |
var lang = {
anon: 'Anonymous',
search: 'Search',
show: 'Show',
hide: 'Hide',
report: 'Report',
focus: 'Focus',
expand: 'Expand',
last: 'Last',
see_all: 'See all',
bottom: 'Bottom',
expand_images: 'Expand Images',
live: 'live',
catalog: 'Catalog',
return: 'Return',
top: 'Top',
reply: 'Reply',
newThread: 'New thread',
locked_to_bottom: 'Locked to bottom',
you: '(You)',
done: 'Done',
send: 'Send',
// Time-related
week: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
year: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'],
just_now: 'just now',
unit_minute: 'minute',
unit_hour: 'hour',
unit_day: 'day',
unit_month: 'month',
unit_year: 'year',
// Moderation language map
mod: {
clearSelection: ['Clear', 'Clear selected posts'],
spoilerImages: ['Spoiler', 'Spoiler selected post images'],
deleteImages: ['Del Img', 'Delete selected post images'],
deletePosts: ['Del Post', 'Delete selected posts'],
lockThread: ['Lock', 'Lock selected threads'],
toggleMnemonics: ['Mnemonics', 'Toggle mnemonic display'],
sendNotification: [
'Notification',
'Send notifaction message to all clients'
],
dispatchFun: ['Fun', 'Execute arbitrary JavaScript on all clients'],
renderPanel: ['Panel', 'Toggle administrator panel display'],
modLog: ['Log', 'Show moderation log'],
placeholders: {
msg: 'Message'
},
// Correspond to websocket calls in common/index.js
7: 'Image spoilered',
8: 'Image deleted',
9: 'Post deleted',
// Formatting function for moderation messages
formatLog: function (act) {
return lang.mod[act.kind] + ' by ' + act.ident;
}
},
// Format functions
pluralize: function(n, noun) {
// For words ending with 'y' and not a vovel before that
if (n != 1
&& noun.slice(-1) == 'y'
&& ['a', 'e', 'i', 'o', 'u'].indexOf(noun.slice(-2, -1)
.toLowerCase()) < 0) {
noun = noun.slice(0, -1) + 'ie';
}
return n + ' ' + noun + (n == 1 ? '' : 's');
},
capitalize: function(word) {
return word[0].toUpperCase() + word.slice(1);
},
// 56 minutes ago
ago: function(time, unit) {
return lang.pluralize(time, unit) + ' ago';
},
// 47 replies and 21 images omitted
abbrev_msg: function(omit, img_omit, url) {
var html = lang.pluralize(omit, 'reply');
if (img_omit)
html += ' and ' + lang.pluralize(img_omit, 'image');
html += ' omitted';
if (url) {
html += ' <span class="act"><a href="' + url + '" class="history">'
+ lang.see_all + '</a></span>';
}
return html;
}
};
module.exports = lang; | * Shared by the server and client
*/ | random_line_split |
common.js | /*
* Shared by the server and client
*/
var lang = {
anon: 'Anonymous',
search: 'Search',
show: 'Show',
hide: 'Hide',
report: 'Report',
focus: 'Focus',
expand: 'Expand',
last: 'Last',
see_all: 'See all',
bottom: 'Bottom',
expand_images: 'Expand Images',
live: 'live',
catalog: 'Catalog',
return: 'Return',
top: 'Top',
reply: 'Reply',
newThread: 'New thread',
locked_to_bottom: 'Locked to bottom',
you: '(You)',
done: 'Done',
send: 'Send',
// Time-related
week: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
year: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'],
just_now: 'just now',
unit_minute: 'minute',
unit_hour: 'hour',
unit_day: 'day',
unit_month: 'month',
unit_year: 'year',
// Moderation language map
mod: {
clearSelection: ['Clear', 'Clear selected posts'],
spoilerImages: ['Spoiler', 'Spoiler selected post images'],
deleteImages: ['Del Img', 'Delete selected post images'],
deletePosts: ['Del Post', 'Delete selected posts'],
lockThread: ['Lock', 'Lock selected threads'],
toggleMnemonics: ['Mnemonics', 'Toggle mnemonic display'],
sendNotification: [
'Notification',
'Send notifaction message to all clients'
],
dispatchFun: ['Fun', 'Execute arbitrary JavaScript on all clients'],
renderPanel: ['Panel', 'Toggle administrator panel display'],
modLog: ['Log', 'Show moderation log'],
placeholders: {
msg: 'Message'
},
// Correspond to websocket calls in common/index.js
7: 'Image spoilered',
8: 'Image deleted',
9: 'Post deleted',
// Formatting function for moderation messages
formatLog: function (act) {
return lang.mod[act.kind] + ' by ' + act.ident;
}
},
// Format functions
pluralize: function(n, noun) {
// For words ending with 'y' and not a vovel before that
if (n != 1
&& noun.slice(-1) == 'y'
&& ['a', 'e', 'i', 'o', 'u'].indexOf(noun.slice(-2, -1)
.toLowerCase()) < 0) {
noun = noun.slice(0, -1) + 'ie';
}
return n + ' ' + noun + (n == 1 ? '' : 's');
},
capitalize: function(word) {
return word[0].toUpperCase() + word.slice(1);
},
// 56 minutes ago
ago: function(time, unit) {
return lang.pluralize(time, unit) + ' ago';
},
// 47 replies and 21 images omitted
abbrev_msg: function(omit, img_omit, url) {
var html = lang.pluralize(omit, 'reply');
if (img_omit)
html += ' and ' + lang.pluralize(img_omit, 'image');
html += ' omitted';
if (url) |
return html;
}
};
module.exports = lang;
| {
html += ' <span class="act"><a href="' + url + '" class="history">'
+ lang.see_all + '</a></span>';
} | conditional_block |
filterPanel.styles.ts | /**
* @copyright 2009-2020 Vanilla Forums Inc.
* @license GPL-2.0-only
*/
import { styleFactory } from "@library/styles/styleUtils";
import { useThemeCache } from "@library/styles/themeCache";
import { styleUnit } from "@library/styles/styleUnit";
import { Mixins } from "@library/styles/Mixins";
import { globalVariables } from "@library/styles/globalStyleVars";
import { SectionTypes } from "@library/layout/types/interface.layoutTypes";
export const filterPanelClasses = useThemeCache((mediaQueries) => {
const globalVars = globalVariables();
const style = styleFactory("filterPanel");
const header = style(
"header",
{
marginBottom: styleUnit(globalVars.gutter.size * 1.5),
...{
"&&": {
border: 0,
...Mixins.padding({
horizontal: 0,
bottom: 0,
}),
},
},
},
mediaQueries({
[SectionTypes.TWO_COLUMNS]: {
oneColumnDown: {
...Mixins.absolute.srOnly(),
},
},
}),
);
const body = style("body", {
...{
"&&": {
...Mixins.padding({
horizontal: 0,
}),
},
},
});
const footer = style("body", {
...{
"&&": {
border: 0,
marginTop: styleUnit(globalVars.gutter.size),
...Mixins.padding({
horizontal: 0,
}), | ...{
"&&": {
...Mixins.font({
...globalVars.fontSizeAndWeightVars("large", "bold"),
}),
},
},
});
return {
header,
body,
footer,
title,
};
}); | },
},
});
const title = style("title", { | random_line_split |
test_pep277.py | filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def | (self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class | test_listdir | identifier_name |
test_pep277.py | filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
|
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class | with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename)) | identifier_body |
test_pep277.py | filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
|
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class | os.unlink(os.path.join(dirname, fname)) | conditional_block |
test_pep277.py | enames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
| if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class | ])
# Is it Unicode-friendly?
| random_line_split |
vDialing.py | # vDial-up client
# Copyright (C) 2015 - 2017 Nathaniel Olsen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import socket
import libs.vDialupcore as core
from multiprocessing import Process
import sys
import struct
def MD5SUM_mismatch(vNumber_to_connect, sock):
print("*Warning: The server's MD5SUM does not match with the one listed on file, Do you wish to continue? (Y/N)")
if vNumber_to_connect == core.RegServ_vNumber:
MD5SUM_on_file = core.RegServ_MD5SUM
else:
pass # Right now, there is no way to retrieve a server's md5sum until I implement md5sum retriving in RegServ.
print("MD5SUM on file: %s" % (MD5SUM_on_file))
print("MD5SUM according to server: %s" % (received.split()[1]))
print("")
choice = input("Enter choice (Y/N): ")
if choice == 'Y' or choice == 'y':
init(sock, vNumber_to_connect)
if choice == 'N' or choice == 'n':
sys.exit() # Exit for now.
class main():
def send_msg(sock, msg):
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + str.encode(msg)
sock.sendall(msg)
def recv_msg(sock):
# Read message length and unpack it into an integer
raw_msglen = main.recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', str.encode(raw_msglen))[0]
return main.recvall(sock, msglen)
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = ''
while len(data) < n:
packet = (sock.recv(n - len(data)).decode('utf-8'))
if not packet:
return None
data += packet
return data
def servping(sock):
while 1:
|
def vdialing(vNumber_to_connect, vNumber_IP):
if core.config['use_ipv6_when_possible']:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("vDialing %s..." % (vNumber_to_connect))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
core.dialupnoise()
try:
sock.connect((vNumber_IP, 5000))
except ConnectionRefusedError:
print("Error: Connection Refused.")
sys.exit()
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected.")
#Process(target=main.servping, args=[sock]).start() # The ability to check if a server connection is still alive is coming soon.
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
main.init(sock, vNumber_to_connect)
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
else:
Process(target=core.dialupnoise()).start()
sock.connect((vNumber_IP, 5000))
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected to Registation Server!")
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
def init(sock, vNumber_to_connect):
main.send_msg(sock, "VNUMBER: {}".format(core.config['vDial-up Settings']['vNumber']))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
main.send_msg(sock, "CLIENTREGISTER")
if main.recv_msg(sock).split()[0] == "CONFIG:":
if main.recv_msg(sock).split()[1] == "vNumber":
core.config['vDial-up Settings']['vNumber'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[1] == "Key":
core.config['vDial-up Settings']['Key'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[0] == "TOCLIENT:":
print(" ".join(main.recv_msg(sock).split()[2:]))
else:
main.send_msg(sock, "KEY: {}".format(core.config['vDial-up Settings']['Key']))
main.send_msg(sock, "INIT")
| sleep(20)
sock.sendall(bytes("SERVPING" + "\n", "utf-8"))
if main.listen_for_data(sock) == "PONG":
break
else:
print("Disconnected: Connection timeout.") | conditional_block |
vDialing.py | # vDial-up client
# Copyright (C) 2015 - 2017 Nathaniel Olsen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import socket
import libs.vDialupcore as core
from multiprocessing import Process
import sys
import struct
def MD5SUM_mismatch(vNumber_to_connect, sock):
print("*Warning: The server's MD5SUM does not match with the one listed on file, Do you wish to continue? (Y/N)")
if vNumber_to_connect == core.RegServ_vNumber:
MD5SUM_on_file = core.RegServ_MD5SUM
else:
pass # Right now, there is no way to retrieve a server's md5sum until I implement md5sum retriving in RegServ.
print("MD5SUM on file: %s" % (MD5SUM_on_file))
print("MD5SUM according to server: %s" % (received.split()[1]))
print("")
choice = input("Enter choice (Y/N): ")
if choice == 'Y' or choice == 'y':
init(sock, vNumber_to_connect)
if choice == 'N' or choice == 'n':
sys.exit() # Exit for now.
class | ():
def send_msg(sock, msg):
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + str.encode(msg)
sock.sendall(msg)
def recv_msg(sock):
# Read message length and unpack it into an integer
raw_msglen = main.recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', str.encode(raw_msglen))[0]
return main.recvall(sock, msglen)
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = ''
while len(data) < n:
packet = (sock.recv(n - len(data)).decode('utf-8'))
if not packet:
return None
data += packet
return data
def servping(sock):
while 1:
sleep(20)
sock.sendall(bytes("SERVPING" + "\n", "utf-8"))
if main.listen_for_data(sock) == "PONG":
break
else:
print("Disconnected: Connection timeout.")
def vdialing(vNumber_to_connect, vNumber_IP):
if core.config['use_ipv6_when_possible']:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("vDialing %s..." % (vNumber_to_connect))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
core.dialupnoise()
try:
sock.connect((vNumber_IP, 5000))
except ConnectionRefusedError:
print("Error: Connection Refused.")
sys.exit()
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected.")
#Process(target=main.servping, args=[sock]).start() # The ability to check if a server connection is still alive is coming soon.
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
main.init(sock, vNumber_to_connect)
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
else:
Process(target=core.dialupnoise()).start()
sock.connect((vNumber_IP, 5000))
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected to Registation Server!")
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
def init(sock, vNumber_to_connect):
main.send_msg(sock, "VNUMBER: {}".format(core.config['vDial-up Settings']['vNumber']))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
main.send_msg(sock, "CLIENTREGISTER")
if main.recv_msg(sock).split()[0] == "CONFIG:":
if main.recv_msg(sock).split()[1] == "vNumber":
core.config['vDial-up Settings']['vNumber'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[1] == "Key":
core.config['vDial-up Settings']['Key'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[0] == "TOCLIENT:":
print(" ".join(main.recv_msg(sock).split()[2:]))
else:
main.send_msg(sock, "KEY: {}".format(core.config['vDial-up Settings']['Key']))
main.send_msg(sock, "INIT")
| main | identifier_name |
vDialing.py | # vDial-up client
# Copyright (C) 2015 - 2017 Nathaniel Olsen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import socket
import libs.vDialupcore as core | from multiprocessing import Process
import sys
import struct
def MD5SUM_mismatch(vNumber_to_connect, sock):
print("*Warning: The server's MD5SUM does not match with the one listed on file, Do you wish to continue? (Y/N)")
if vNumber_to_connect == core.RegServ_vNumber:
MD5SUM_on_file = core.RegServ_MD5SUM
else:
pass # Right now, there is no way to retrieve a server's md5sum until I implement md5sum retriving in RegServ.
print("MD5SUM on file: %s" % (MD5SUM_on_file))
print("MD5SUM according to server: %s" % (received.split()[1]))
print("")
choice = input("Enter choice (Y/N): ")
if choice == 'Y' or choice == 'y':
init(sock, vNumber_to_connect)
if choice == 'N' or choice == 'n':
sys.exit() # Exit for now.
class main():
def send_msg(sock, msg):
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + str.encode(msg)
sock.sendall(msg)
def recv_msg(sock):
# Read message length and unpack it into an integer
raw_msglen = main.recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', str.encode(raw_msglen))[0]
return main.recvall(sock, msglen)
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = ''
while len(data) < n:
packet = (sock.recv(n - len(data)).decode('utf-8'))
if not packet:
return None
data += packet
return data
def servping(sock):
while 1:
sleep(20)
sock.sendall(bytes("SERVPING" + "\n", "utf-8"))
if main.listen_for_data(sock) == "PONG":
break
else:
print("Disconnected: Connection timeout.")
def vdialing(vNumber_to_connect, vNumber_IP):
if core.config['use_ipv6_when_possible']:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("vDialing %s..." % (vNumber_to_connect))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
core.dialupnoise()
try:
sock.connect((vNumber_IP, 5000))
except ConnectionRefusedError:
print("Error: Connection Refused.")
sys.exit()
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected.")
#Process(target=main.servping, args=[sock]).start() # The ability to check if a server connection is still alive is coming soon.
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
main.init(sock, vNumber_to_connect)
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
else:
Process(target=core.dialupnoise()).start()
sock.connect((vNumber_IP, 5000))
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected to Registation Server!")
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
def init(sock, vNumber_to_connect):
main.send_msg(sock, "VNUMBER: {}".format(core.config['vDial-up Settings']['vNumber']))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
main.send_msg(sock, "CLIENTREGISTER")
if main.recv_msg(sock).split()[0] == "CONFIG:":
if main.recv_msg(sock).split()[1] == "vNumber":
core.config['vDial-up Settings']['vNumber'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[1] == "Key":
core.config['vDial-up Settings']['Key'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[0] == "TOCLIENT:":
print(" ".join(main.recv_msg(sock).split()[2:]))
else:
main.send_msg(sock, "KEY: {}".format(core.config['vDial-up Settings']['Key']))
main.send_msg(sock, "INIT") | random_line_split |
|
vDialing.py | # vDial-up client
# Copyright (C) 2015 - 2017 Nathaniel Olsen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import socket
import libs.vDialupcore as core
from multiprocessing import Process
import sys
import struct
def MD5SUM_mismatch(vNumber_to_connect, sock):
print("*Warning: The server's MD5SUM does not match with the one listed on file, Do you wish to continue? (Y/N)")
if vNumber_to_connect == core.RegServ_vNumber:
MD5SUM_on_file = core.RegServ_MD5SUM
else:
pass # Right now, there is no way to retrieve a server's md5sum until I implement md5sum retriving in RegServ.
print("MD5SUM on file: %s" % (MD5SUM_on_file))
print("MD5SUM according to server: %s" % (received.split()[1]))
print("")
choice = input("Enter choice (Y/N): ")
if choice == 'Y' or choice == 'y':
init(sock, vNumber_to_connect)
if choice == 'N' or choice == 'n':
sys.exit() # Exit for now.
class main():
def send_msg(sock, msg):
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + str.encode(msg)
sock.sendall(msg)
def recv_msg(sock):
# Read message length and unpack it into an integer
raw_msglen = main.recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', str.encode(raw_msglen))[0]
return main.recvall(sock, msglen)
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
|
def servping(sock):
while 1:
sleep(20)
sock.sendall(bytes("SERVPING" + "\n", "utf-8"))
if main.listen_for_data(sock) == "PONG":
break
else:
print("Disconnected: Connection timeout.")
def vdialing(vNumber_to_connect, vNumber_IP):
if core.config['use_ipv6_when_possible']:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("vDialing %s..." % (vNumber_to_connect))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
core.dialupnoise()
try:
sock.connect((vNumber_IP, 5000))
except ConnectionRefusedError:
print("Error: Connection Refused.")
sys.exit()
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected.")
#Process(target=main.servping, args=[sock]).start() # The ability to check if a server connection is still alive is coming soon.
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
main.init(sock, vNumber_to_connect)
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
else:
Process(target=core.dialupnoise()).start()
sock.connect((vNumber_IP, 5000))
main.send_msg(sock, "INITPING")
if main.recv_msg(sock) == "PONG":
print("Connected to Registation Server!")
main.send_msg(sock, "MD5SUMCHECK")
if main.recv_msg(sock).split()[0] == "MD5SUM:":
if main.recv_msg(sock).split()[1] == core.RegServ_MD5SUM:
print("MD5SUM verification was succeeded.")
else:
MD5SUM_mismatch(vNumber_to_connect, sock)
else:
print("Error: Unable to retrieve MD5SUM.")
else:
print("Error: Server did not properly respond to INITPING, disconnecting.")
def init(sock, vNumber_to_connect):
main.send_msg(sock, "VNUMBER: {}".format(core.config['vDial-up Settings']['vNumber']))
if core.config['vDial-up Settings']['vNumber'] == "000000000":
main.send_msg(sock, "CLIENTREGISTER")
if main.recv_msg(sock).split()[0] == "CONFIG:":
if main.recv_msg(sock).split()[1] == "vNumber":
core.config['vDial-up Settings']['vNumber'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[1] == "Key":
core.config['vDial-up Settings']['Key'] = main.recv_msg(sock).split()[2]
core.saveconfig()
if main.recv_msg(sock).split()[0] == "TOCLIENT:":
print(" ".join(main.recv_msg(sock).split()[2:]))
else:
main.send_msg(sock, "KEY: {}".format(core.config['vDial-up Settings']['Key']))
main.send_msg(sock, "INIT")
| data = ''
while len(data) < n:
packet = (sock.recv(n - len(data)).decode('utf-8'))
if not packet:
return None
data += packet
return data | identifier_body |
che-stack.factory.ts | /*
* Copyright (c) 2015-2016 Codenvy, S.A.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Codenvy, S.A. - initial API and implementation
*/
'use strict';
/**
* This class is handling the stacks retrieval
* It sets to the array of stacks
* @author Florent Benoit
* @author Ann Shumilova
*/
export class CheStack {
/**
* Default constructor that is using resource
* @ngInject for Dependency injection
*/
constructor($resource) {
// keep resource
this.$resource = $resource;
// stacks per id
this.stacksById = {};
// stacks
this.stacks = [];
// remote call
this.remoteStackAPI = this.$resource('/api/stack', {}, {
getStacks: {method: 'GET', url: '/api/stack?maxItems=50', isArray: true}, //TODO 50 items is temp solution while paging is not added
getStack: {method: 'GET', url: '/api/stack/:stackId'},
updateStack: {method: 'PUT', url: '/api/stack/:stackId'},
createStack: {method: 'POST', url: '/api/stack'},
deleteStack: {method: 'DELETE', url: '/api/stack/:stackId'}
});
}
/**
* Fetch the stacks
*/
fetchStacks() {
let promise = this.remoteStackAPI.getStacks().$promise;
let updatedPromise = promise.then((stacks) => {
// reset global list
this.stacks.length = 0;
for (var member in this.stacksById) {
delete this.stacksById[member];
}
stacks.forEach((stack) => {
// get attributes
var stackId = stack.id;
// add element on the list
this.stacksById[stackId] = stack;
this.stacks.push(stack);
});
});
return updatedPromise;
}
/**
* Gets all stacks
* @returns {Array}
*/
getStacks() {
return this.stacks;
}
/**
* The stacks per id
* @returns {*}
*/
getStackById(id) {
return this.stacksById[id];
}
/**
* Creates new stack.
* @param stack data for new stack
* @returns {$promise|*|T.$promise}
*/
createStack(stack) { | return this.remoteStackAPI.createStack({}, stack).$promise;
}
/**
* Fetch pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
fetchStack(stackId) {
return this.remoteStackAPI.getStack({stackId: stackId}).$promise;
}
/**
* Update pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
updateStack(stackId, stack) {
return this.remoteStackAPI.updateStack({stackId: stackId}, stack).$promise;
}
/**
* Delete pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
deleteStack(stackId) {
return this.remoteStackAPI.deleteStack({stackId: stackId}).$promise;
}
} | random_line_split |
|
che-stack.factory.ts | /*
* Copyright (c) 2015-2016 Codenvy, S.A.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Codenvy, S.A. - initial API and implementation
*/
'use strict';
/**
* This class is handling the stacks retrieval
* It sets to the array of stacks
* @author Florent Benoit
* @author Ann Shumilova
*/
export class | {
/**
* Default constructor that is using resource
* @ngInject for Dependency injection
*/
constructor($resource) {
// keep resource
this.$resource = $resource;
// stacks per id
this.stacksById = {};
// stacks
this.stacks = [];
// remote call
this.remoteStackAPI = this.$resource('/api/stack', {}, {
getStacks: {method: 'GET', url: '/api/stack?maxItems=50', isArray: true}, //TODO 50 items is temp solution while paging is not added
getStack: {method: 'GET', url: '/api/stack/:stackId'},
updateStack: {method: 'PUT', url: '/api/stack/:stackId'},
createStack: {method: 'POST', url: '/api/stack'},
deleteStack: {method: 'DELETE', url: '/api/stack/:stackId'}
});
}
/**
* Fetch the stacks
*/
fetchStacks() {
let promise = this.remoteStackAPI.getStacks().$promise;
let updatedPromise = promise.then((stacks) => {
// reset global list
this.stacks.length = 0;
for (var member in this.stacksById) {
delete this.stacksById[member];
}
stacks.forEach((stack) => {
// get attributes
var stackId = stack.id;
// add element on the list
this.stacksById[stackId] = stack;
this.stacks.push(stack);
});
});
return updatedPromise;
}
/**
* Gets all stacks
* @returns {Array}
*/
getStacks() {
return this.stacks;
}
/**
* The stacks per id
* @returns {*}
*/
getStackById(id) {
return this.stacksById[id];
}
/**
* Creates new stack.
* @param stack data for new stack
* @returns {$promise|*|T.$promise}
*/
createStack(stack) {
return this.remoteStackAPI.createStack({}, stack).$promise;
}
/**
* Fetch pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
fetchStack(stackId) {
return this.remoteStackAPI.getStack({stackId: stackId}).$promise;
}
/**
* Update pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
updateStack(stackId, stack) {
return this.remoteStackAPI.updateStack({stackId: stackId}, stack).$promise;
}
/**
* Delete pointed stack.
* @param stackId stack's id
* @returns {$promise|*|T.$promise}
*/
deleteStack(stackId) {
return this.remoteStackAPI.deleteStack({stackId: stackId}).$promise;
}
}
| CheStack | identifier_name |
prism.py |
import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def getRowColFromGridId(grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts):
"""
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
os.makedirs(os.path.join(output_dir, str(year)))
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
img = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self.geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
|
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except:
| with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return | identifier_body |
prism.py |
import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def getRowColFromGridId(grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
| """
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
os.makedirs(os.path.join(output_dir, str(year)))
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
img = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self.geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except |
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts): | random_line_split |
prism.py |
import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def | (grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts):
"""
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
os.makedirs(os.path.join(output_dir, str(year)))
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
img = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self.geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except:
| getRowColFromGridId | identifier_name |
prism.py | import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def getRowColFromGridId(grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts):
"""
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
|
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
img = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self.geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except:
| os.makedirs(os.path.join(output_dir, str(year))) | conditional_block |
meta.ts | n_ast';
import {createI18nMessageFactory} from '../../../i18n/i18n_parser';
import * as html from '../../../ml_parser/ast';
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from '../../../ml_parser/interpolation_config';
import * as o from '../../../output/output_ast';
import {I18N_ATTR, I18N_ATTR_PREFIX, hasI18nAttrs, icuFromI18nMessage} from './util';
export type I18nMeta = {
id?: string,
customId?: string,
legacyId?: string,
description?: string,
meaning?: string
};
function setI18nRefs(html: html.Node & {i18n?: i18n.AST}, i18n: i18n.Node) {
html.i18n = i18n;
}
/**
* This visitor walks over HTML parse tree and converts information stored in
* i18n-related attributes ("i18n" and "i18n-*") into i18n meta object that is
* stored with other element's and attribute's information.
*/
export class I18nMetaVisitor implements html.Visitor {
// i18n message generation factory
private _createI18nMessage = createI18nMessageFactory(this.interpolationConfig);
constructor(
private interpolationConfig: InterpolationConfig = DEFAULT_INTERPOLATION_CONFIG,
private keepI18nAttrs: boolean = false, private i18nLegacyMessageIdFormat: string = '') {}
private _generateI18nMessage(
nodes: html.Node[], meta: string|i18n.AST = '',
visitNodeFn?: (html: html.Node, i18n: i18n.Node) => void): i18n.Message {
const parsed: I18nMeta =
typeof meta === 'string' ? parseI18nMeta(meta) : metaFromI18nMessage(meta as i18n.Message);
const message = this._createI18nMessage(
nodes, parsed.meaning || '', parsed.description || '', parsed.customId || '', visitNodeFn);
if (!message.id) {
// generate (or restore) message id if not specified in template
message.id = typeof meta !== 'string' && (meta as i18n.Message).id || decimalDigest(message);
}
if (this.i18nLegacyMessageIdFormat === 'xlf') {
message.legacyId = computeDigest(message);
} else if (
this.i18nLegacyMessageIdFormat === 'xlf2' || this.i18nLegacyMessageIdFormat === 'xmb') {
message.legacyId = computeDecimalDigest(message);
} else if (typeof meta !== 'string') {
// This occurs if we are doing the 2nd pass after whitespace removal
// In that case we want to reuse the legacy message generated in the 1st pass
// See `parseTemplate()` in `packages/compiler/src/render3/view/template.ts`
message.legacyId = (meta as i18n.Message).legacyId;
}
return message;
}
| (element: html.Element, context: any): any {
if (hasI18nAttrs(element)) {
const attrs: html.Attribute[] = [];
const attrsMeta: {[key: string]: string} = {};
for (const attr of element.attrs) {
if (attr.name === I18N_ATTR) {
// root 'i18n' node attribute
const i18n = element.i18n || attr.value;
const message = this._generateI18nMessage(element.children, i18n, setI18nRefs);
// do not assign empty i18n meta
if (message.nodes.length) {
element.i18n = message;
}
} else if (attr.name.startsWith(I18N_ATTR_PREFIX)) {
// 'i18n-*' attributes
const key = attr.name.slice(I18N_ATTR_PREFIX.length);
attrsMeta[key] = attr.value;
} else {
// non-i18n attributes
attrs.push(attr);
}
}
// set i18n meta for attributes
if (Object.keys(attrsMeta).length) {
for (const attr of attrs) {
const meta = attrsMeta[attr.name];
// do not create translation for empty attributes
if (meta !== undefined && attr.value) {
attr.i18n = this._generateI18nMessage([attr], attr.i18n || meta);
}
}
}
if (!this.keepI18nAttrs) {
// update element's attributes,
// keeping only non-i18n related ones
element.attrs = attrs;
}
}
html.visitAll(this, element.children);
return element;
}
visitExpansion(expansion: html.Expansion, context: any): any {
let message;
const meta = expansion.i18n;
if (meta instanceof i18n.IcuPlaceholder) {
// set ICU placeholder name (e.g. "ICU_1"),
// generated while processing root element contents,
// so we can reference it when we output translation
const name = meta.name;
message = this._generateI18nMessage([expansion], meta);
const icu = icuFromI18nMessage(message);
icu.name = name;
} else {
// when ICU is a root level translation
message = this._generateI18nMessage([expansion], meta);
}
expansion.i18n = message;
return expansion;
}
visitText(text: html.Text, context: any): any { return text; }
visitAttribute(attribute: html.Attribute, context: any): any { return attribute; }
visitComment(comment: html.Comment, context: any): any { return comment; }
visitExpansionCase(expansionCase: html.ExpansionCase, context: any): any { return expansionCase; }
}
export function metaFromI18nMessage(message: i18n.Message, id: string | null = null): I18nMeta {
return {
id: typeof id === 'string' ? id : message.id || '',
customId: message.customId,
legacyId: message.legacyId,
meaning: message.meaning || '',
description: message.description || ''
};
}
/** I18n separators for metadata **/
const I18N_MEANING_SEPARATOR = '|';
const I18N_ID_SEPARATOR = '@@';
/**
* Parses i18n metas like:
* - "@@id",
* - "description[@@id]",
* - "meaning|description[@@id]"
* and returns an object with parsed output.
*
* @param meta String that represents i18n meta
* @returns Object with id, meaning and description fields
*/
export function parseI18nMeta(meta?: string): I18nMeta {
let customId: string|undefined;
let meaning: string|undefined;
let description: string|undefined;
if (meta) {
const idIndex = meta.indexOf(I18N_ID_SEPARATOR);
const descIndex = meta.indexOf(I18N_MEANING_SEPARATOR);
let meaningAndDesc: string;
[meaningAndDesc, customId] =
(idIndex > -1) ? [meta.slice(0, idIndex), meta.slice(idIndex + 2)] : [meta, ''];
[meaning, description] = (descIndex > -1) ?
[meaningAndDesc.slice(0, descIndex), meaningAndDesc.slice(descIndex + 1)] :
['', meaningAndDesc];
}
return {customId, meaning, description};
}
/**
* Serialize the given `meta` and `messagePart` a string that can be used in a `$localize`
* tagged string. The format of the metadata is the same as that parsed by `parseI18nMeta()`.
*
* @param meta The metadata to serialize
* @param messagePart The first part of the tagged string
*/
export function serializeI18nHead(meta: I18nMeta, messagePart: string): string {
let metaBlock = meta.description || '';
if (meta.meaning) {
metaBlock = `${meta.meaning}|${metaBlock}`;
}
if (meta.customId || meta.legacyId) {
metaBlock = `${metaBlock}@@${meta.customId || meta.legacyId}`;
}
if (metaBlock === '') {
// There is no metaBlock, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${escapeColons(metaBlock)}:${messagePart}`;
}
}
/**
* Serialize the given `placeholderName` and `messagePart` into strings that can be used in a
* `$localize` tagged string.
*
* @param placeholderName The placeholder name to serialize
* @param messagePart The following message string after this placeholder
*/
export function serializeI18nTemplatePart(placeholderName: string, messagePart: string): string {
if (placeholderName === '') {
// There is no placeholder name block, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${placeholderName}:${messagePart | visitElement | identifier_name |
meta.ts | import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from '../../../ml_parser/interpolation_config';
import * as o from '../../../output/output_ast';
import {I18N_ATTR, I18N_ATTR_PREFIX, hasI18nAttrs, icuFromI18nMessage} from './util';
export type I18nMeta = {
id?: string,
customId?: string,
legacyId?: string,
description?: string,
meaning?: string
};
function setI18nRefs(html: html.Node & {i18n?: i18n.AST}, i18n: i18n.Node) {
html.i18n = i18n;
}
/**
* This visitor walks over HTML parse tree and converts information stored in
* i18n-related attributes ("i18n" and "i18n-*") into i18n meta object that is
* stored with other element's and attribute's information.
*/
export class I18nMetaVisitor implements html.Visitor {
// i18n message generation factory
private _createI18nMessage = createI18nMessageFactory(this.interpolationConfig);
constructor(
private interpolationConfig: InterpolationConfig = DEFAULT_INTERPOLATION_CONFIG,
private keepI18nAttrs: boolean = false, private i18nLegacyMessageIdFormat: string = '') {}
private _generateI18nMessage(
nodes: html.Node[], meta: string|i18n.AST = '',
visitNodeFn?: (html: html.Node, i18n: i18n.Node) => void): i18n.Message {
const parsed: I18nMeta =
typeof meta === 'string' ? parseI18nMeta(meta) : metaFromI18nMessage(meta as i18n.Message);
const message = this._createI18nMessage(
nodes, parsed.meaning || '', parsed.description || '', parsed.customId || '', visitNodeFn);
if (!message.id) {
// generate (or restore) message id if not specified in template
message.id = typeof meta !== 'string' && (meta as i18n.Message).id || decimalDigest(message);
}
if (this.i18nLegacyMessageIdFormat === 'xlf') {
message.legacyId = computeDigest(message);
} else if (
this.i18nLegacyMessageIdFormat === 'xlf2' || this.i18nLegacyMessageIdFormat === 'xmb') {
message.legacyId = computeDecimalDigest(message);
} else if (typeof meta !== 'string') {
// This occurs if we are doing the 2nd pass after whitespace removal
// In that case we want to reuse the legacy message generated in the 1st pass
// See `parseTemplate()` in `packages/compiler/src/render3/view/template.ts`
message.legacyId = (meta as i18n.Message).legacyId;
}
return message;
}
visitElement(element: html.Element, context: any): any {
if (hasI18nAttrs(element)) {
const attrs: html.Attribute[] = [];
const attrsMeta: {[key: string]: string} = {};
for (const attr of element.attrs) {
if (attr.name === I18N_ATTR) {
// root 'i18n' node attribute
const i18n = element.i18n || attr.value;
const message = this._generateI18nMessage(element.children, i18n, setI18nRefs);
// do not assign empty i18n meta
if (message.nodes.length) {
element.i18n = message;
}
} else if (attr.name.startsWith(I18N_ATTR_PREFIX)) {
// 'i18n-*' attributes
const key = attr.name.slice(I18N_ATTR_PREFIX.length);
attrsMeta[key] = attr.value;
} else {
// non-i18n attributes
attrs.push(attr);
}
}
// set i18n meta for attributes
if (Object.keys(attrsMeta).length) {
for (const attr of attrs) {
const meta = attrsMeta[attr.name];
// do not create translation for empty attributes
if (meta !== undefined && attr.value) {
attr.i18n = this._generateI18nMessage([attr], attr.i18n || meta);
}
}
}
if (!this.keepI18nAttrs) {
// update element's attributes,
// keeping only non-i18n related ones
element.attrs = attrs;
}
}
html.visitAll(this, element.children);
return element;
}
visitExpansion(expansion: html.Expansion, context: any): any {
let message;
const meta = expansion.i18n;
if (meta instanceof i18n.IcuPlaceholder) {
// set ICU placeholder name (e.g. "ICU_1"),
// generated while processing root element contents,
// so we can reference it when we output translation
const name = meta.name;
message = this._generateI18nMessage([expansion], meta);
const icu = icuFromI18nMessage(message);
icu.name = name;
} else {
// when ICU is a root level translation
message = this._generateI18nMessage([expansion], meta);
}
expansion.i18n = message;
return expansion;
}
visitText(text: html.Text, context: any): any { return text; }
visitAttribute(attribute: html.Attribute, context: any): any { return attribute; }
visitComment(comment: html.Comment, context: any): any { return comment; }
visitExpansionCase(expansionCase: html.ExpansionCase, context: any): any { return expansionCase; }
}
export function metaFromI18nMessage(message: i18n.Message, id: string | null = null): I18nMeta {
return {
id: typeof id === 'string' ? id : message.id || '',
customId: message.customId,
legacyId: message.legacyId,
meaning: message.meaning || '',
description: message.description || ''
};
}
/** I18n separators for metadata **/
const I18N_MEANING_SEPARATOR = '|';
const I18N_ID_SEPARATOR = '@@';
/**
* Parses i18n metas like:
* - "@@id",
* - "description[@@id]",
* - "meaning|description[@@id]"
* and returns an object with parsed output.
*
* @param meta String that represents i18n meta
* @returns Object with id, meaning and description fields
*/
export function parseI18nMeta(meta?: string): I18nMeta {
let customId: string|undefined;
let meaning: string|undefined;
let description: string|undefined;
if (meta) {
const idIndex = meta.indexOf(I18N_ID_SEPARATOR);
const descIndex = meta.indexOf(I18N_MEANING_SEPARATOR);
let meaningAndDesc: string;
[meaningAndDesc, customId] =
(idIndex > -1) ? [meta.slice(0, idIndex), meta.slice(idIndex + 2)] : [meta, ''];
[meaning, description] = (descIndex > -1) ?
[meaningAndDesc.slice(0, descIndex), meaningAndDesc.slice(descIndex + 1)] :
['', meaningAndDesc];
}
return {customId, meaning, description};
}
/**
* Serialize the given `meta` and `messagePart` a string that can be used in a `$localize`
* tagged string. The format of the metadata is the same as that parsed by `parseI18nMeta()`.
*
* @param meta The metadata to serialize
* @param messagePart The first part of the tagged string
*/
export function serializeI18nHead(meta: I18nMeta, messagePart: string): string {
let metaBlock = meta.description || '';
if (meta.meaning) {
metaBlock = `${meta.meaning}|${metaBlock}`;
}
if (meta.customId || meta.legacyId) {
metaBlock = `${metaBlock}@@${meta.customId || meta.legacyId}`;
}
if (metaBlock === '') {
// There is no metaBlock, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${escapeColons(metaBlock)}:${messagePart}`;
}
}
/**
* Serialize the given `placeholderName` and `messagePart` into strings that can be used in a
* `$localize` tagged string.
*
* @param placeholderName The placeholder name to serialize
* @param messagePart The following message string after this placeholder
*/
export function serializeI18nTemplatePart(placeholderName: string, messagePart: string): string {
if (placeholderName === '') {
// There is no placeholder name block, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${placeholderName}:${messagePart}`;
}
}
// Converts i18n meta information for a message (id, description, meaning) | // to a JsDoc statement formatted as expected by the Closure compiler. | random_line_split |
|
meta.ts | _ast';
import {createI18nMessageFactory} from '../../../i18n/i18n_parser';
import * as html from '../../../ml_parser/ast';
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from '../../../ml_parser/interpolation_config';
import * as o from '../../../output/output_ast';
import {I18N_ATTR, I18N_ATTR_PREFIX, hasI18nAttrs, icuFromI18nMessage} from './util';
export type I18nMeta = {
id?: string,
customId?: string,
legacyId?: string,
description?: string,
meaning?: string
};
function setI18nRefs(html: html.Node & {i18n?: i18n.AST}, i18n: i18n.Node) {
html.i18n = i18n;
}
/**
* This visitor walks over HTML parse tree and converts information stored in
* i18n-related attributes ("i18n" and "i18n-*") into i18n meta object that is
* stored with other element's and attribute's information.
*/
export class I18nMetaVisitor implements html.Visitor {
// i18n message generation factory
private _createI18nMessage = createI18nMessageFactory(this.interpolationConfig);
constructor(
private interpolationConfig: InterpolationConfig = DEFAULT_INTERPOLATION_CONFIG,
private keepI18nAttrs: boolean = false, private i18nLegacyMessageIdFormat: string = '') {}
private _generateI18nMessage(
nodes: html.Node[], meta: string|i18n.AST = '',
visitNodeFn?: (html: html.Node, i18n: i18n.Node) => void): i18n.Message {
const parsed: I18nMeta =
typeof meta === 'string' ? parseI18nMeta(meta) : metaFromI18nMessage(meta as i18n.Message);
const message = this._createI18nMessage(
nodes, parsed.meaning || '', parsed.description || '', parsed.customId || '', visitNodeFn);
if (!message.id) {
// generate (or restore) message id if not specified in template
message.id = typeof meta !== 'string' && (meta as i18n.Message).id || decimalDigest(message);
}
if (this.i18nLegacyMessageIdFormat === 'xlf') {
message.legacyId = computeDigest(message);
} else if (
this.i18nLegacyMessageIdFormat === 'xlf2' || this.i18nLegacyMessageIdFormat === 'xmb') {
message.legacyId = computeDecimalDigest(message);
} else if (typeof meta !== 'string') {
// This occurs if we are doing the 2nd pass after whitespace removal
// In that case we want to reuse the legacy message generated in the 1st pass
// See `parseTemplate()` in `packages/compiler/src/render3/view/template.ts`
message.legacyId = (meta as i18n.Message).legacyId;
}
return message;
}
visitElement(element: html.Element, context: any): any {
if (hasI18nAttrs(element)) {
const attrs: html.Attribute[] = [];
const attrsMeta: {[key: string]: string} = {};
for (const attr of element.attrs) {
if (attr.name === I18N_ATTR) {
// root 'i18n' node attribute
const i18n = element.i18n || attr.value;
const message = this._generateI18nMessage(element.children, i18n, setI18nRefs);
// do not assign empty i18n meta
if (message.nodes.length) {
element.i18n = message;
}
} else if (attr.name.startsWith(I18N_ATTR_PREFIX)) {
// 'i18n-*' attributes
const key = attr.name.slice(I18N_ATTR_PREFIX.length);
attrsMeta[key] = attr.value;
} else {
// non-i18n attributes
attrs.push(attr);
}
}
// set i18n meta for attributes
if (Object.keys(attrsMeta).length) {
for (const attr of attrs) {
const meta = attrsMeta[attr.name];
// do not create translation for empty attributes
if (meta !== undefined && attr.value) {
attr.i18n = this._generateI18nMessage([attr], attr.i18n || meta);
}
}
}
if (!this.keepI18nAttrs) {
// update element's attributes,
// keeping only non-i18n related ones
element.attrs = attrs;
}
}
html.visitAll(this, element.children);
return element;
}
visitExpansion(expansion: html.Expansion, context: any): any {
let message;
const meta = expansion.i18n;
if (meta instanceof i18n.IcuPlaceholder) {
// set ICU placeholder name (e.g. "ICU_1"),
// generated while processing root element contents,
// so we can reference it when we output translation
const name = meta.name;
message = this._generateI18nMessage([expansion], meta);
const icu = icuFromI18nMessage(message);
icu.name = name;
} else {
// when ICU is a root level translation
message = this._generateI18nMessage([expansion], meta);
}
expansion.i18n = message;
return expansion;
}
visitText(text: html.Text, context: any): any { return text; }
visitAttribute(attribute: html.Attribute, context: any): any { return attribute; }
visitComment(comment: html.Comment, context: any): any { return comment; }
visitExpansionCase(expansionCase: html.ExpansionCase, context: any): any { return expansionCase; }
}
export function metaFromI18nMessage(message: i18n.Message, id: string | null = null): I18nMeta {
return {
id: typeof id === 'string' ? id : message.id || '',
customId: message.customId,
legacyId: message.legacyId,
meaning: message.meaning || '',
description: message.description || ''
};
}
/** I18n separators for metadata **/
const I18N_MEANING_SEPARATOR = '|';
const I18N_ID_SEPARATOR = '@@';
/**
* Parses i18n metas like:
* - "@@id",
* - "description[@@id]",
* - "meaning|description[@@id]"
* and returns an object with parsed output.
*
* @param meta String that represents i18n meta
* @returns Object with id, meaning and description fields
*/
export function parseI18nMeta(meta?: string): I18nMeta {
let customId: string|undefined;
let meaning: string|undefined;
let description: string|undefined;
if (meta) {
const idIndex = meta.indexOf(I18N_ID_SEPARATOR);
const descIndex = meta.indexOf(I18N_MEANING_SEPARATOR);
let meaningAndDesc: string;
[meaningAndDesc, customId] =
(idIndex > -1) ? [meta.slice(0, idIndex), meta.slice(idIndex + 2)] : [meta, ''];
[meaning, description] = (descIndex > -1) ?
[meaningAndDesc.slice(0, descIndex), meaningAndDesc.slice(descIndex + 1)] :
['', meaningAndDesc];
}
return {customId, meaning, description};
}
/**
* Serialize the given `meta` and `messagePart` a string that can be used in a `$localize`
* tagged string. The format of the metadata is the same as that parsed by `parseI18nMeta()`.
*
* @param meta The metadata to serialize
* @param messagePart The first part of the tagged string
*/
export function serializeI18nHead(meta: I18nMeta, messagePart: string): string |
/**
* Serialize the given `placeholderName` and `messagePart` into strings that can be used in a
* `$localize` tagged string.
*
* @param placeholderName The placeholder name to serialize
* @param messagePart The following message string after this placeholder
*/
export function serializeI18nTemplatePart(placeholderName: string, messagePart: string): string {
if (placeholderName === '') {
// There is no placeholder name block, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${placeholderName}:${message | {
let metaBlock = meta.description || '';
if (meta.meaning) {
metaBlock = `${meta.meaning}|${metaBlock}`;
}
if (meta.customId || meta.legacyId) {
metaBlock = `${metaBlock}@@${meta.customId || meta.legacyId}`;
}
if (metaBlock === '') {
// There is no metaBlock, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${escapeColons(metaBlock)}:${messagePart}`;
}
} | identifier_body |
meta.ts | n_ast';
import {createI18nMessageFactory} from '../../../i18n/i18n_parser';
import * as html from '../../../ml_parser/ast';
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from '../../../ml_parser/interpolation_config';
import * as o from '../../../output/output_ast';
import {I18N_ATTR, I18N_ATTR_PREFIX, hasI18nAttrs, icuFromI18nMessage} from './util';
export type I18nMeta = {
id?: string,
customId?: string,
legacyId?: string,
description?: string,
meaning?: string
};
function setI18nRefs(html: html.Node & {i18n?: i18n.AST}, i18n: i18n.Node) {
html.i18n = i18n;
}
/**
* This visitor walks over HTML parse tree and converts information stored in
* i18n-related attributes ("i18n" and "i18n-*") into i18n meta object that is
* stored with other element's and attribute's information.
*/
export class I18nMetaVisitor implements html.Visitor {
// i18n message generation factory
private _createI18nMessage = createI18nMessageFactory(this.interpolationConfig);
constructor(
private interpolationConfig: InterpolationConfig = DEFAULT_INTERPOLATION_CONFIG,
private keepI18nAttrs: boolean = false, private i18nLegacyMessageIdFormat: string = '') {}
private _generateI18nMessage(
nodes: html.Node[], meta: string|i18n.AST = '',
visitNodeFn?: (html: html.Node, i18n: i18n.Node) => void): i18n.Message {
const parsed: I18nMeta =
typeof meta === 'string' ? parseI18nMeta(meta) : metaFromI18nMessage(meta as i18n.Message);
const message = this._createI18nMessage(
nodes, parsed.meaning || '', parsed.description || '', parsed.customId || '', visitNodeFn);
if (!message.id) {
// generate (or restore) message id if not specified in template
message.id = typeof meta !== 'string' && (meta as i18n.Message).id || decimalDigest(message);
}
if (this.i18nLegacyMessageIdFormat === 'xlf') {
message.legacyId = computeDigest(message);
} else if (
this.i18nLegacyMessageIdFormat === 'xlf2' || this.i18nLegacyMessageIdFormat === 'xmb') {
message.legacyId = computeDecimalDigest(message);
} else if (typeof meta !== 'string') {
// This occurs if we are doing the 2nd pass after whitespace removal
// In that case we want to reuse the legacy message generated in the 1st pass
// See `parseTemplate()` in `packages/compiler/src/render3/view/template.ts`
message.legacyId = (meta as i18n.Message).legacyId;
}
return message;
}
visitElement(element: html.Element, context: any): any {
if (hasI18nAttrs(element)) {
const attrs: html.Attribute[] = [];
const attrsMeta: {[key: string]: string} = {};
for (const attr of element.attrs) {
if (attr.name === I18N_ATTR) {
// root 'i18n' node attribute
const i18n = element.i18n || attr.value;
const message = this._generateI18nMessage(element.children, i18n, setI18nRefs);
// do not assign empty i18n meta
if (message.nodes.length) {
element.i18n = message;
}
} else if (attr.name.startsWith(I18N_ATTR_PREFIX)) {
// 'i18n-*' attributes
const key = attr.name.slice(I18N_ATTR_PREFIX.length);
attrsMeta[key] = attr.value;
} else {
// non-i18n attributes
attrs.push(attr);
}
}
// set i18n meta for attributes
if (Object.keys(attrsMeta).length) {
for (const attr of attrs) {
const meta = attrsMeta[attr.name];
// do not create translation for empty attributes
if (meta !== undefined && attr.value) {
attr.i18n = this._generateI18nMessage([attr], attr.i18n || meta);
}
}
}
if (!this.keepI18nAttrs) {
// update element's attributes,
// keeping only non-i18n related ones
element.attrs = attrs;
}
}
html.visitAll(this, element.children);
return element;
}
visitExpansion(expansion: html.Expansion, context: any): any {
let message;
const meta = expansion.i18n;
if (meta instanceof i18n.IcuPlaceholder) {
// set ICU placeholder name (e.g. "ICU_1"),
// generated while processing root element contents,
// so we can reference it when we output translation
const name = meta.name;
message = this._generateI18nMessage([expansion], meta);
const icu = icuFromI18nMessage(message);
icu.name = name;
} else {
// when ICU is a root level translation
message = this._generateI18nMessage([expansion], meta);
}
expansion.i18n = message;
return expansion;
}
visitText(text: html.Text, context: any): any { return text; }
visitAttribute(attribute: html.Attribute, context: any): any { return attribute; }
visitComment(comment: html.Comment, context: any): any { return comment; }
visitExpansionCase(expansionCase: html.ExpansionCase, context: any): any { return expansionCase; }
}
export function metaFromI18nMessage(message: i18n.Message, id: string | null = null): I18nMeta {
return {
id: typeof id === 'string' ? id : message.id || '',
customId: message.customId,
legacyId: message.legacyId,
meaning: message.meaning || '',
description: message.description || ''
};
}
/** I18n separators for metadata **/
const I18N_MEANING_SEPARATOR = '|';
const I18N_ID_SEPARATOR = '@@';
/**
* Parses i18n metas like:
* - "@@id",
* - "description[@@id]",
* - "meaning|description[@@id]"
* and returns an object with parsed output.
*
* @param meta String that represents i18n meta
* @returns Object with id, meaning and description fields
*/
export function parseI18nMeta(meta?: string): I18nMeta {
let customId: string|undefined;
let meaning: string|undefined;
let description: string|undefined;
if (meta) {
const idIndex = meta.indexOf(I18N_ID_SEPARATOR);
const descIndex = meta.indexOf(I18N_MEANING_SEPARATOR);
let meaningAndDesc: string;
[meaningAndDesc, customId] =
(idIndex > -1) ? [meta.slice(0, idIndex), meta.slice(idIndex + 2)] : [meta, ''];
[meaning, description] = (descIndex > -1) ?
[meaningAndDesc.slice(0, descIndex), meaningAndDesc.slice(descIndex + 1)] :
['', meaningAndDesc];
}
return {customId, meaning, description};
}
/**
* Serialize the given `meta` and `messagePart` a string that can be used in a `$localize`
* tagged string. The format of the metadata is the same as that parsed by `parseI18nMeta()`.
*
* @param meta The metadata to serialize
* @param messagePart The first part of the tagged string
*/
export function serializeI18nHead(meta: I18nMeta, messagePart: string): string {
let metaBlock = meta.description || '';
if (meta.meaning) {
metaBlock = `${meta.meaning}|${metaBlock}`;
}
if (meta.customId || meta.legacyId) {
metaBlock = `${metaBlock}@@${meta.customId || meta.legacyId}`;
}
if (metaBlock === '') {
// There is no metaBlock, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} else {
return `:${escapeColons(metaBlock)}:${messagePart}`;
}
}
/**
* Serialize the given `placeholderName` and `messagePart` into strings that can be used in a
* `$localize` tagged string.
*
* @param placeholderName The placeholder name to serialize
* @param messagePart The following message string after this placeholder
*/
export function serializeI18nTemplatePart(placeholderName: string, messagePart: string): string {
if (placeholderName === '') | else {
return `:${placeholderName}:${message | {
// There is no placeholder name block, so we must ensure that any starting colon is escaped.
return escapeStartingColon(messagePart);
} | conditional_block |
server-events-once.ts | // from https://github.com/hapijs/hapi/blob/master/API.md#-servereventsoncecriteria-listener
import { Request, ResponseToolkit, Server, ServerRoute } from "hapi";
const serverRoute: ServerRoute = {
path: '/',
method: 'GET',
handler(request, h) |
};
declare module 'hapi' {
interface ServerEvents {
once(event: 'test1', listener: (update: string) => void): this;
once(event: 'test2', listener: (...updates: string[]) => void): this;
}
}
const server = new Server({
port: 8000,
});
server.route(serverRoute);
server.event('test1');
server.event('test2');
server.events.once('test1', update => { console.log(update); });
server.events.once('test2', (...args) => { console.log(args); });
server.events.emit('test1', 'hello-1');
server.events.emit('test2', 'hello-2'); // Ignored
server.start();
console.log('Server started at: ' + server.info.uri);
| {
return 'oks: ' + request.path;
} | identifier_body |
server-events-once.ts | // from https://github.com/hapijs/hapi/blob/master/API.md#-servereventsoncecriteria-listener
import { Request, ResponseToolkit, Server, ServerRoute } from "hapi";
const serverRoute: ServerRoute = {
path: '/',
method: 'GET',
| (request, h) {
return 'oks: ' + request.path;
}
};
declare module 'hapi' {
interface ServerEvents {
once(event: 'test1', listener: (update: string) => void): this;
once(event: 'test2', listener: (...updates: string[]) => void): this;
}
}
const server = new Server({
port: 8000,
});
server.route(serverRoute);
server.event('test1');
server.event('test2');
server.events.once('test1', update => { console.log(update); });
server.events.once('test2', (...args) => { console.log(args); });
server.events.emit('test1', 'hello-1');
server.events.emit('test2', 'hello-2'); // Ignored
server.start();
console.log('Server started at: ' + server.info.uri);
| handler | identifier_name |
server-events-once.ts | // from https://github.com/hapijs/hapi/blob/master/API.md#-servereventsoncecriteria-listener
import { Request, ResponseToolkit, Server, ServerRoute } from "hapi"; | path: '/',
method: 'GET',
handler(request, h) {
return 'oks: ' + request.path;
}
};
declare module 'hapi' {
interface ServerEvents {
once(event: 'test1', listener: (update: string) => void): this;
once(event: 'test2', listener: (...updates: string[]) => void): this;
}
}
const server = new Server({
port: 8000,
});
server.route(serverRoute);
server.event('test1');
server.event('test2');
server.events.once('test1', update => { console.log(update); });
server.events.once('test2', (...args) => { console.log(args); });
server.events.emit('test1', 'hello-1');
server.events.emit('test2', 'hello-2'); // Ignored
server.start();
console.log('Server started at: ' + server.info.uri); |
const serverRoute: ServerRoute = { | random_line_split |
main.rs | use std::thread;
use std::sync::{Mutex, Arc};
struct Table {
forks: Vec<Mutex<()>>
}
struct Philosopher {
name: String,
left_index: usize,
right_index: usize
}
impl Philosopher {
fn new(name: &str, left_index: usize, right_index: usize) -> Philosopher {
Philosopher {
name: name.to_string(),
left_index: left_index,
right_index: right_index
}
}
fn eat(&self, table: &Table) |
}
fn main() {
let table = Arc::new(Table {
forks: vec![
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(())
]
});
let philosophers = vec![
Philosopher::new("Philosopher 1", 0, 1),
Philosopher::new("Philosopher 2", 1, 2),
Philosopher::new("Philosopher 3", 2, 3),
Philosopher::new("Philosopher 4", 3, 4),
Philosopher::new("Philosopher 5", 0, 4)
];
let thread_handles: Vec<_> = philosophers.into_iter().map(|p| {
let table = table.clone();
thread::spawn(move || {
p.eat(&table);
})
}).collect();
for handle in thread_handles {
handle.join().unwrap();
}
} | {
let _left = table.forks[self.left_index].lock().unwrap();
let _right = table.forks[self.right_index].lock().unwrap();
println!("{} started eating", self.name);
thread::sleep_ms(1000);
println!("{} is done eating.", self.name);
} | identifier_body |
main.rs | use std::thread;
use std::sync::{Mutex, Arc};
struct Table {
forks: Vec<Mutex<()>>
}
struct | {
name: String,
left_index: usize,
right_index: usize
}
impl Philosopher {
fn new(name: &str, left_index: usize, right_index: usize) -> Philosopher {
Philosopher {
name: name.to_string(),
left_index: left_index,
right_index: right_index
}
}
fn eat(&self, table: &Table) {
let _left = table.forks[self.left_index].lock().unwrap();
let _right = table.forks[self.right_index].lock().unwrap();
println!("{} started eating", self.name);
thread::sleep_ms(1000);
println!("{} is done eating.", self.name);
}
}
fn main() {
let table = Arc::new(Table {
forks: vec![
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(())
]
});
let philosophers = vec![
Philosopher::new("Philosopher 1", 0, 1),
Philosopher::new("Philosopher 2", 1, 2),
Philosopher::new("Philosopher 3", 2, 3),
Philosopher::new("Philosopher 4", 3, 4),
Philosopher::new("Philosopher 5", 0, 4)
];
let thread_handles: Vec<_> = philosophers.into_iter().map(|p| {
let table = table.clone();
thread::spawn(move || {
p.eat(&table);
})
}).collect();
for handle in thread_handles {
handle.join().unwrap();
}
} | Philosopher | identifier_name |
main.rs | use std::thread;
use std::sync::{Mutex, Arc};
struct Table {
forks: Vec<Mutex<()>>
}
struct Philosopher {
name: String,
left_index: usize,
right_index: usize
}
impl Philosopher {
fn new(name: &str, left_index: usize, right_index: usize) -> Philosopher {
Philosopher {
name: name.to_string(),
left_index: left_index,
right_index: right_index
}
}
fn eat(&self, table: &Table) {
let _left = table.forks[self.left_index].lock().unwrap();
let _right = table.forks[self.right_index].lock().unwrap();
println!("{} started eating", self.name);
thread::sleep_ms(1000);
println!("{} is done eating.", self.name);
} | fn main() {
let table = Arc::new(Table {
forks: vec![
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(())
]
});
let philosophers = vec![
Philosopher::new("Philosopher 1", 0, 1),
Philosopher::new("Philosopher 2", 1, 2),
Philosopher::new("Philosopher 3", 2, 3),
Philosopher::new("Philosopher 4", 3, 4),
Philosopher::new("Philosopher 5", 0, 4)
];
let thread_handles: Vec<_> = philosophers.into_iter().map(|p| {
let table = table.clone();
thread::spawn(move || {
p.eat(&table);
})
}).collect();
for handle in thread_handles {
handle.join().unwrap();
}
} | }
| random_line_split |
promote.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Promote a package to a specified channel.
//!
//! # Examples
//!
//! ```bash
//! $ hab pkg promote acme/redis/2.0.7/2112010203120101 stable
//! ```
//!//! This will promote the acme package specified to the stable channel.
//!
//! Notes:
//! The package should already have been uploaded to Builder.
//! If the specified channel does not exist, it will be created.
//!
use common::ui::{Status, UIWriter, UI};
use depot_client::{self, Client};
use hcore::package::PackageIdent;
use hyper::status::StatusCode;
use {PRODUCT, VERSION};
use error::{Error, Result};
/// Promote a package to the specified channel.
///
/// # Failures
///
/// * Fails if it cannot find the specified package in Builder
pub fn start(
ui: &mut UI,
url: &str,
ident: &PackageIdent,
channel: &str,
token: &str,
) -> Result<()> | return Err(Error::from(e));
}
}
ui.status(Status::Promoted, ident)?;
Ok(())
}
| {
let depot_client = Client::new(url, PRODUCT, VERSION, None)?;
ui.begin(format!("Promoting {} to channel '{}'", ident, channel))?;
if channel != "stable" && channel != "unstable" {
match depot_client.create_channel(&ident.origin, channel, token) {
Ok(_) => (),
Err(depot_client::Error::APIError(StatusCode::Conflict, _)) => (),
Err(e) => {
println!("Failed to create '{}' channel: {:?}", channel, e);
return Err(Error::from(e));
}
};
}
match depot_client.promote_package(ident, channel, token) {
Ok(_) => (),
Err(e) => {
println!("Failed to promote '{}': {:?}", ident, e); | identifier_body |
promote.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Promote a package to a specified channel.
//!
//! # Examples
//!
//! ```bash
//! $ hab pkg promote acme/redis/2.0.7/2112010203120101 stable
//! ```
//!//! This will promote the acme package specified to the stable channel.
//!
//! Notes:
//! The package should already have been uploaded to Builder.
//! If the specified channel does not exist, it will be created.
//!
use common::ui::{Status, UIWriter, UI};
use depot_client::{self, Client};
use hcore::package::PackageIdent;
use hyper::status::StatusCode;
use {PRODUCT, VERSION};
use error::{Error, Result};
/// Promote a package to the specified channel.
///
/// # Failures
///
/// * Fails if it cannot find the specified package in Builder
pub fn | (
ui: &mut UI,
url: &str,
ident: &PackageIdent,
channel: &str,
token: &str,
) -> Result<()> {
let depot_client = Client::new(url, PRODUCT, VERSION, None)?;
ui.begin(format!("Promoting {} to channel '{}'", ident, channel))?;
if channel != "stable" && channel != "unstable" {
match depot_client.create_channel(&ident.origin, channel, token) {
Ok(_) => (),
Err(depot_client::Error::APIError(StatusCode::Conflict, _)) => (),
Err(e) => {
println!("Failed to create '{}' channel: {:?}", channel, e);
return Err(Error::from(e));
}
};
}
match depot_client.promote_package(ident, channel, token) {
Ok(_) => (),
Err(e) => {
println!("Failed to promote '{}': {:?}", ident, e);
return Err(Error::from(e));
}
}
ui.status(Status::Promoted, ident)?;
Ok(())
}
| start | identifier_name |
promote.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Promote a package to a specified channel.
//!
//! # Examples | //! ```
//!//! This will promote the acme package specified to the stable channel.
//!
//! Notes:
//! The package should already have been uploaded to Builder.
//! If the specified channel does not exist, it will be created.
//!
use common::ui::{Status, UIWriter, UI};
use depot_client::{self, Client};
use hcore::package::PackageIdent;
use hyper::status::StatusCode;
use {PRODUCT, VERSION};
use error::{Error, Result};
/// Promote a package to the specified channel.
///
/// # Failures
///
/// * Fails if it cannot find the specified package in Builder
pub fn start(
ui: &mut UI,
url: &str,
ident: &PackageIdent,
channel: &str,
token: &str,
) -> Result<()> {
let depot_client = Client::new(url, PRODUCT, VERSION, None)?;
ui.begin(format!("Promoting {} to channel '{}'", ident, channel))?;
if channel != "stable" && channel != "unstable" {
match depot_client.create_channel(&ident.origin, channel, token) {
Ok(_) => (),
Err(depot_client::Error::APIError(StatusCode::Conflict, _)) => (),
Err(e) => {
println!("Failed to create '{}' channel: {:?}", channel, e);
return Err(Error::from(e));
}
};
}
match depot_client.promote_package(ident, channel, token) {
Ok(_) => (),
Err(e) => {
println!("Failed to promote '{}': {:?}", ident, e);
return Err(Error::from(e));
}
}
ui.status(Status::Promoted, ident)?;
Ok(())
} | //!
//! ```bash
//! $ hab pkg promote acme/redis/2.0.7/2112010203120101 stable | random_line_split |
skyway-tests.ts | const peerByOption: Peer = new Peer({
key: 'peerKey',
debug: 3
});
peerByOption.listAllPeers((items) => {
for (let item in items) {
console.log(decodeURI(items[item]));
}
});
const peerByIdAndOption: Peer = new Peer('peerid', {
key: 'peerKey',
debug: 3
});
let id = peerByOption.id;
let connections = peerByOption.connections;
peerByOption.disconnect();
peerByOption.destroy();
let connection = peerByOption.connect("id", {
label: 'chat',
serialization: 'none',
metadata: { message: 'hi i want to chat with you!' }
});
let call = peerByOption.call('callto-id', (window as any).localStream);
let openHandler = () => console.log("open");
peerByOption.on("open", openHandler); | peerByOption.on("call", (media) => console.log("call"));
peerByOption.on("close", () => console.log("close"));
peerByOption.on("disconnected", () => console.log("disconnected"));
peerByOption.on("error", (err) => console.log(err)); | peerByOption.on("connection", (c) => console.log("connection")); | random_line_split |
expandFile.spec.ts | /// <reference path="../../typings/tsd.d.ts" />
import mocha = require('mocha')
import path = require('path')
import async = require('async')
import {readFile} from 'fs'
import {expect} from 'chai'
import {expandFile} from '../../lib/expand'
describe('expandFile', () => {
it('should stream file with expanded content', done => {
var expandedStream = expandFile(path.join(__dirname, '../testFiles/test1.txt'))
expandedStream.on('data', (data: Buffer) => {
var line = data.toString()
expect(line.match(/\n/g) || []).length.to.be.at.most(1)
})
expandedStream.on('end', done);
})
it('should restitute big files without tabs', done => {
var output = []
var filePath = path.join(__dirname, '../testFiles/lipsum.txt')
var expandedStream = expandFile(filePath)
expandedStream.on('data', (data: Buffer) => {
var line = data.toString()
output.push(line)
})
var result
var original
async.parallel([
next => {
expandedStream.on('end', () => next(null)) | next => {
readFile(filePath, (err, data: Buffer) => {
original = data.toString()
result = output.join('')
next(null)
})
}
], () => {
expect(result).to.equal(original)
done()
});
})
}) | }, | random_line_split |
train.py | import numpy as np
from model import GAN, discriminator_pixel, discriminator_image, discriminator_patch1, discriminator_patch2, generator, discriminator_dummy
import utils
import os
from PIL import Image
import argparse
from keras import backend as K
# arrange arguments
parser=argparse.ArgumentParser()
parser.add_argument(
'--ratio_gan2seg',
type=int,
help="ratio of gan loss to seg loss",
required=True
)
parser.add_argument(
'--gpu_index',
type=str,
help="gpu index",
required=True
)
parser.add_argument(
'--discriminator',
type=str,
help="type of discriminator",
required=True
)
parser.add_argument(
'--batch_size',
type=int,
help="batch size",
required=True
)
parser.add_argument(
'--dataset',
type=str,
help="dataset name",
required=True
)
FLAGS,_= parser.parse_known_args()
# training settings
os.environ['CUDA_VISIBLE_DEVICES']=FLAGS.gpu_index
n_rounds=10
batch_size=FLAGS.batch_size
n_filters_d=32
n_filters_g=32
val_ratio=0.05
init_lr=2e-4
schedules={'lr_decay':{}, # learning rate and step have the same decay schedule (not necessarily the values)
'step_decay':{}}
alpha_recip=1./FLAGS.ratio_gan2seg if FLAGS.ratio_gan2seg>0 else 0
rounds_for_evaluation=range(n_rounds)
# set dataset
dataset=FLAGS.dataset
img_size= (640,640) if dataset=='DRIVE' else (720,720) # (h,w) [original img size => DRIVE : (584, 565), STARE : (605,700) ]
img_out_dir="{}/segmentation_results_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
model_out_dir="{}/model_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
auc_out_dir="{}/auc_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
train_dir="../data/{}/training/".format(dataset)
test_dir="../data/{}/test/".format(dataset)
if not os.path.isdir(img_out_dir):
os.makedirs(img_out_dir)
if not os.path.isdir(model_out_dir):
os.makedirs(model_out_dir)
if not os.path.isdir(auc_out_dir):
os.makedirs(auc_out_dir)
# set training and validation dataset
train_imgs, train_vessels =utils.get_imgs(train_dir, augmentation=True, img_size=img_size, dataset=dataset)
train_vessels=np.expand_dims(train_vessels, axis=3)
n_all_imgs=train_imgs.shape[0]
n_train_imgs=int((1-val_ratio)*n_all_imgs)
train_indices=np.random.choice(n_all_imgs,n_train_imgs,replace=False)
train_batch_fetcher=utils.TrainBatchFetcher(train_imgs[train_indices,...], train_vessels[train_indices,...], batch_size)
val_imgs, val_vessels=train_imgs[np.delete(range(n_all_imgs),train_indices),...], train_vessels[np.delete(range(n_all_imgs),train_indices),...]
# set test dataset
test_imgs, test_vessels, test_masks=utils.get_imgs(test_dir, augmentation=False, img_size=img_size, dataset=dataset, mask=True)
# create networks
g = generator(img_size, n_filters_g)
if FLAGS.discriminator=='pixel':
d, d_out_shape = discriminator_pixel(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='patch1':
d, d_out_shape = discriminator_patch1(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='patch2':
d, d_out_shape = discriminator_patch2(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='image':
|
else:
d, d_out_shape = discriminator_dummy(img_size, n_filters_d,init_lr)
gan=GAN(g,d,img_size, n_filters_g, n_filters_d,alpha_recip, init_lr)
g.summary()
d.summary()
gan.summary()
# start training
scheduler=utils.Scheduler(n_train_imgs//batch_size, n_train_imgs//batch_size, schedules, init_lr) if alpha_recip>0 else utils.Scheduler(0, n_train_imgs//batch_size, schedules, init_lr)
print "training {} images :".format(n_train_imgs)
for n_round in range(n_rounds):
# train D
utils.make_trainable(d, True)
for i in range(scheduler.get_dsteps()):
real_imgs, real_vessels = next(train_batch_fetcher)
d_x_batch, d_y_batch = utils.input2discriminator(real_imgs, real_vessels, g.predict(real_imgs,batch_size=batch_size), d_out_shape)
d.train_on_batch(d_x_batch, d_y_batch)
# train G (freeze discriminator)
utils.make_trainable(d, False)
for i in range(scheduler.get_gsteps()):
real_imgs, real_vessels = next(train_batch_fetcher)
g_x_batch, g_y_batch=utils.input2gan(real_imgs, real_vessels, d_out_shape)
gan.train_on_batch(g_x_batch, g_y_batch)
# evaluate on validation set
if n_round in rounds_for_evaluation:
# D
d_x_test, d_y_test=utils.input2discriminator(val_imgs, val_vessels, g.predict(val_imgs,batch_size=batch_size), d_out_shape)
loss, acc=d.evaluate(d_x_test,d_y_test, batch_size=batch_size, verbose=0)
utils.print_metrics(n_round+1, loss=loss, acc=acc, type='D')
# G
gan_x_test, gan_y_test=utils.input2gan(val_imgs, val_vessels, d_out_shape)
loss,acc=gan.evaluate(gan_x_test,gan_y_test, batch_size=batch_size, verbose=0)
utils.print_metrics(n_round+1, acc=acc, loss=loss, type='GAN')
# save the model and weights with the best validation loss
with open(os.path.join(model_out_dir,"g_{}_{}_{}.json".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg)),'w') as f:
f.write(g.to_json())
g.save_weights(os.path.join(model_out_dir,"g_{}_{}_{}.h5".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg)))
# update step sizes, learning rates
scheduler.update_steps(n_round)
K.set_value(d.optimizer.lr, scheduler.get_lr())
K.set_value(gan.optimizer.lr, scheduler.get_lr())
# evaluate on test images
if n_round in rounds_for_evaluation:
generated=g.predict(test_imgs,batch_size=batch_size)
generated=np.squeeze(generated, axis=3)
vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(test_vessels, generated , test_masks)
auc_roc=utils.AUC_ROC(vessels_in_mask,generated_in_mask,os.path.join(auc_out_dir,"auc_roc_{}.npy".format(n_round)))
auc_pr=utils.AUC_PR(vessels_in_mask, generated_in_mask,os.path.join(auc_out_dir,"auc_pr_{}.npy".format(n_round)))
utils.print_metrics(n_round+1, auc_pr=auc_pr, auc_roc=auc_roc, type='TESTING')
# print test images
segmented_vessel=utils.remain_in_mask(generated, test_masks)
for index in range(segmented_vessel.shape[0]):
Image.fromarray((segmented_vessel[index,:,:]*255).astype(np.uint8)).save(os.path.join(img_out_dir,str(n_round)+"_{:02}_segmented.png".format(index+1)))
| d, d_out_shape = discriminator_image(img_size, n_filters_d,init_lr) | conditional_block |
train.py | import numpy as np
from model import GAN, discriminator_pixel, discriminator_image, discriminator_patch1, discriminator_patch2, generator, discriminator_dummy
import utils
import os
from PIL import Image
import argparse
from keras import backend as K
# arrange arguments
parser=argparse.ArgumentParser()
parser.add_argument(
'--ratio_gan2seg',
type=int,
help="ratio of gan loss to seg loss", | )
parser.add_argument(
'--gpu_index',
type=str,
help="gpu index",
required=True
)
parser.add_argument(
'--discriminator',
type=str,
help="type of discriminator",
required=True
)
parser.add_argument(
'--batch_size',
type=int,
help="batch size",
required=True
)
parser.add_argument(
'--dataset',
type=str,
help="dataset name",
required=True
)
FLAGS,_= parser.parse_known_args()
# training settings
os.environ['CUDA_VISIBLE_DEVICES']=FLAGS.gpu_index
n_rounds=10
batch_size=FLAGS.batch_size
n_filters_d=32
n_filters_g=32
val_ratio=0.05
init_lr=2e-4
schedules={'lr_decay':{}, # learning rate and step have the same decay schedule (not necessarily the values)
'step_decay':{}}
alpha_recip=1./FLAGS.ratio_gan2seg if FLAGS.ratio_gan2seg>0 else 0
rounds_for_evaluation=range(n_rounds)
# set dataset
dataset=FLAGS.dataset
img_size= (640,640) if dataset=='DRIVE' else (720,720) # (h,w) [original img size => DRIVE : (584, 565), STARE : (605,700) ]
img_out_dir="{}/segmentation_results_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
model_out_dir="{}/model_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
auc_out_dir="{}/auc_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
train_dir="../data/{}/training/".format(dataset)
test_dir="../data/{}/test/".format(dataset)
if not os.path.isdir(img_out_dir):
os.makedirs(img_out_dir)
if not os.path.isdir(model_out_dir):
os.makedirs(model_out_dir)
if not os.path.isdir(auc_out_dir):
os.makedirs(auc_out_dir)
# set training and validation dataset
train_imgs, train_vessels =utils.get_imgs(train_dir, augmentation=True, img_size=img_size, dataset=dataset)
train_vessels=np.expand_dims(train_vessels, axis=3)
n_all_imgs=train_imgs.shape[0]
n_train_imgs=int((1-val_ratio)*n_all_imgs)
train_indices=np.random.choice(n_all_imgs,n_train_imgs,replace=False)
train_batch_fetcher=utils.TrainBatchFetcher(train_imgs[train_indices,...], train_vessels[train_indices,...], batch_size)
val_imgs, val_vessels=train_imgs[np.delete(range(n_all_imgs),train_indices),...], train_vessels[np.delete(range(n_all_imgs),train_indices),...]
# set test dataset
test_imgs, test_vessels, test_masks=utils.get_imgs(test_dir, augmentation=False, img_size=img_size, dataset=dataset, mask=True)
# create networks
g = generator(img_size, n_filters_g)
if FLAGS.discriminator=='pixel':
d, d_out_shape = discriminator_pixel(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='patch1':
d, d_out_shape = discriminator_patch1(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='patch2':
d, d_out_shape = discriminator_patch2(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='image':
d, d_out_shape = discriminator_image(img_size, n_filters_d,init_lr)
else:
d, d_out_shape = discriminator_dummy(img_size, n_filters_d,init_lr)
gan=GAN(g,d,img_size, n_filters_g, n_filters_d,alpha_recip, init_lr)
g.summary()
d.summary()
gan.summary()
# start training
scheduler=utils.Scheduler(n_train_imgs//batch_size, n_train_imgs//batch_size, schedules, init_lr) if alpha_recip>0 else utils.Scheduler(0, n_train_imgs//batch_size, schedules, init_lr)
print "training {} images :".format(n_train_imgs)
for n_round in range(n_rounds):
# train D
utils.make_trainable(d, True)
for i in range(scheduler.get_dsteps()):
real_imgs, real_vessels = next(train_batch_fetcher)
d_x_batch, d_y_batch = utils.input2discriminator(real_imgs, real_vessels, g.predict(real_imgs,batch_size=batch_size), d_out_shape)
d.train_on_batch(d_x_batch, d_y_batch)
# train G (freeze discriminator)
utils.make_trainable(d, False)
for i in range(scheduler.get_gsteps()):
real_imgs, real_vessels = next(train_batch_fetcher)
g_x_batch, g_y_batch=utils.input2gan(real_imgs, real_vessels, d_out_shape)
gan.train_on_batch(g_x_batch, g_y_batch)
# evaluate on validation set
if n_round in rounds_for_evaluation:
# D
d_x_test, d_y_test=utils.input2discriminator(val_imgs, val_vessels, g.predict(val_imgs,batch_size=batch_size), d_out_shape)
loss, acc=d.evaluate(d_x_test,d_y_test, batch_size=batch_size, verbose=0)
utils.print_metrics(n_round+1, loss=loss, acc=acc, type='D')
# G
gan_x_test, gan_y_test=utils.input2gan(val_imgs, val_vessels, d_out_shape)
loss,acc=gan.evaluate(gan_x_test,gan_y_test, batch_size=batch_size, verbose=0)
utils.print_metrics(n_round+1, acc=acc, loss=loss, type='GAN')
# save the model and weights with the best validation loss
with open(os.path.join(model_out_dir,"g_{}_{}_{}.json".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg)),'w') as f:
f.write(g.to_json())
g.save_weights(os.path.join(model_out_dir,"g_{}_{}_{}.h5".format(n_round,FLAGS.discriminator,FLAGS.ratio_gan2seg)))
# update step sizes, learning rates
scheduler.update_steps(n_round)
K.set_value(d.optimizer.lr, scheduler.get_lr())
K.set_value(gan.optimizer.lr, scheduler.get_lr())
# evaluate on test images
if n_round in rounds_for_evaluation:
generated=g.predict(test_imgs,batch_size=batch_size)
generated=np.squeeze(generated, axis=3)
vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(test_vessels, generated , test_masks)
auc_roc=utils.AUC_ROC(vessels_in_mask,generated_in_mask,os.path.join(auc_out_dir,"auc_roc_{}.npy".format(n_round)))
auc_pr=utils.AUC_PR(vessels_in_mask, generated_in_mask,os.path.join(auc_out_dir,"auc_pr_{}.npy".format(n_round)))
utils.print_metrics(n_round+1, auc_pr=auc_pr, auc_roc=auc_roc, type='TESTING')
# print test images
segmented_vessel=utils.remain_in_mask(generated, test_masks)
for index in range(segmented_vessel.shape[0]):
Image.fromarray((segmented_vessel[index,:,:]*255).astype(np.uint8)).save(os.path.join(img_out_dir,str(n_round)+"_{:02}_segmented.png".format(index+1))) | required=True | random_line_split |
baidu.js | var mongoose = require('mongoose');
var Post = require('../models/Post').Post;
var config = require('../config');
var http = require('http');
var site = "";
var token = "";
var options = {
host: 'data.zz.baidu.com',
path: '/urls?site=' + site + '&token=' + token,
method: 'POST',
headers: {
'Accept': '*/*', | var callback = function (res) {
var buffers = [];
var nread = 0;
res.on('data', function (chunk) {
buffers.push(chunk);
nread += chunk.length;
});
res.on('end', function () {
console.log(buffers);
});
}
var req = http.request(options, callback);
mongoose.connect(config.db.production);
var db = mongoose.connection;
db.once('open', function (callback) {
Post.find({}, {pid: 1})
.exec(function (err, posts) {
var urls = posts.map(function (post) {
return 'http://' + config.site.url + '/post/' + post.pid;
});
var data = urls.join('\n');
console.log(data,urls.length);
req.write(data);
req.end();
});
}); | 'Connection': 'Keep-Alive',
'User-Agent': 'curl/7.12.1 '
}
};
| random_line_split |
search-test.ts | import { assert } from 'chai';
import * as figc from 'figc';
import { createClient } from '../lib/solr';
import { dataOk } from './utils/sassert'; | const client = createClient(config.client);
[config.client.path, config.client.core].join('/').replace(/\/$/, '');
describe('Client', function () {
describe('#search("q=*:*")', function () {
it('should find all documents', async function () {
const data = await client.search('q=*:*');
dataOk(data);
assert.deepEqual({ q: '*:*', wt: 'json' }, data.responseHeader.params);
});
});
describe('#search(query)', function () {
it('should find documents describe in the `query` instance of `Query`', async function () {
const query = client.query().q({
title_t: 'test',
});
const data = await client.search(query);
dataOk(data);
});
});
}); |
const config = figc(__dirname + '/config.json'); | random_line_split |
ga_event_viewer.user.js | // ==UserScript==
// @name GAイベントの引数をコンソール出力
// @namespace https://github.com/hosoyama-mediba/userscript
// @version 0.5
// @description GAのイベントパラメータをコンソールにデバッグ出力します
// @author Terunobu Hosoyama <[email protected]>
// @match http://*/*
// @match https://*/*
// @require https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js
// @grant none
// @noframes
// ==/UserScript==
| console.log('GAイベントの引数をデバッグ出力します');
var gaOrig = window.ga;
window.ga = function(...args) {
console.log('ga:', ...args);
gaOrig.apply(window, arguments);
};
clearInterval(gaTimer);
};
gaTimer = setInterval(function() {
if (typeof window.ga !== 'undefined') {
window.ga(gaReadyCallback);
}
}, 500);
`);
$('body').append(script);
})(jQuery.noConflict(true)); | (function($) {
var script = $('<script/>').text(`
var gaTimer;
var gaReadyCallback = function() { | random_line_split |
setting.js | // ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') {
$.setting.width = this.args.width;
}
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
}
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value); | },
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function onClick(e) {
setting.clickHandler();
} | self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
} | random_line_split |
setting.js | // ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') {
$.setting.width = this.args.width;
}
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
}
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value);
self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
}
},
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function | (e) {
setting.clickHandler();
}
| onClick | identifier_name |
setting.js | // ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() |
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value);
self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
}
},
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function onClick(e) {
setting.clickHandler();
}
| {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') {
$.setting.width = this.args.width;
}
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
} | identifier_body |
setting.js | // ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') |
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
}
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value);
self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
}
},
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function onClick(e) {
setting.clickHandler();
}
| {
$.setting.width = this.args.width;
} | conditional_block |
settings.py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-b9xx8+eul3#8q&c@tv^5e!u66j=a6@377$y^b2q!0a%vj+!ny'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = []
CUSTOM_APPS = [
'tasks.apps.TasksConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['todo/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
}, | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/' | { | random_line_split |
forms.py | from django import forms
from .models import Question, Answer, Categories, Customuser
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class add_Question_Form(forms.ModelForm): # just a regular form
question_text = forms.CharField(label=_("question_text"),
widget=forms.Textarea({'cols': '40', 'rows': '5'}))
class Meta:
model = Question
fields = ['question_text', 'upload',
'category1','category2',
'category3','category4']
def clean_text(self):
if question_text == "":
raise forms.ValidationError(
"Need a question",)
else:
return True
def save(self,commit=True):
question = super(add_Question_Form, self).save(commit=False)
question.question_text = self.cleaned_data["question_text"]
if commit:
question.save()
return question
class add_Answer_Form(forms.ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
def clean_text(self):
return self.cleaned_data.get('answer_text')
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
# User's username field and our own 2 fields pass1 and pass2 are used. Later
# we shall set the User's password by user.set_password.
class Meta:
model = Customuser
fields = ("username","email","first_name","department")
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
# To remove invalid passwords like short words, number only cases
auth.password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField( max_length=254,
widget=forms.TextInput( attrs={'autofocus': ''}),
)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def | (self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
return self.cleaned_data
class UserForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ('categories',)
class CustomuserAdminForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ("username","email","first_name","last_name",
'department','groups','is_active','is_staff','is_superuser')
# fields = ['username','password','verify,'first_name','last_name','email','batch',]
################### Django classes ##########################
| clean | identifier_name |
forms.py | from django import forms
from .models import Question, Answer, Categories, Customuser
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class add_Question_Form(forms.ModelForm): # just a regular form
question_text = forms.CharField(label=_("question_text"),
widget=forms.Textarea({'cols': '40', 'rows': '5'}))
class Meta:
model = Question
fields = ['question_text', 'upload',
'category1','category2',
'category3','category4']
def clean_text(self):
|
def save(self,commit=True):
question = super(add_Question_Form, self).save(commit=False)
question.question_text = self.cleaned_data["question_text"]
if commit:
question.save()
return question
class add_Answer_Form(forms.ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
def clean_text(self):
return self.cleaned_data.get('answer_text')
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
# User's username field and our own 2 fields pass1 and pass2 are used. Later
# we shall set the User's password by user.set_password.
class Meta:
model = Customuser
fields = ("username","email","first_name","department")
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
# To remove invalid passwords like short words, number only cases
auth.password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField( max_length=254,
widget=forms.TextInput( attrs={'autofocus': ''}),
)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
return self.cleaned_data
class UserForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ('categories',)
class CustomuserAdminForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ("username","email","first_name","last_name",
'department','groups','is_active','is_staff','is_superuser')
# fields = ['username','password','verify,'first_name','last_name','email','batch',]
################### Django classes ##########################
| if question_text == "":
raise forms.ValidationError(
"Need a question",)
else:
return True | identifier_body |
forms.py | from django import forms
from .models import Question, Answer, Categories, Customuser
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class add_Question_Form(forms.ModelForm): # just a regular form
question_text = forms.CharField(label=_("question_text"),
widget=forms.Textarea({'cols': '40', 'rows': '5'}))
class Meta:
model = Question
fields = ['question_text', 'upload',
'category1','category2',
'category3','category4']
def clean_text(self):
if question_text == "":
|
else:
return True
def save(self,commit=True):
question = super(add_Question_Form, self).save(commit=False)
question.question_text = self.cleaned_data["question_text"]
if commit:
question.save()
return question
class add_Answer_Form(forms.ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
def clean_text(self):
return self.cleaned_data.get('answer_text')
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
# User's username field and our own 2 fields pass1 and pass2 are used. Later
# we shall set the User's password by user.set_password.
class Meta:
model = Customuser
fields = ("username","email","first_name","department")
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
# To remove invalid passwords like short words, number only cases
auth.password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField( max_length=254,
widget=forms.TextInput( attrs={'autofocus': ''}),
)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
return self.cleaned_data
class UserForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ('categories',)
class CustomuserAdminForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ("username","email","first_name","last_name",
'department','groups','is_active','is_staff','is_superuser')
# fields = ['username','password','verify,'first_name','last_name','email','batch',]
################### Django classes ##########################
| raise forms.ValidationError(
"Need a question",) | conditional_block |
forms.py | from django import forms | class add_Question_Form(forms.ModelForm): # just a regular form
question_text = forms.CharField(label=_("question_text"),
widget=forms.Textarea({'cols': '40', 'rows': '5'}))
class Meta:
model = Question
fields = ['question_text', 'upload',
'category1','category2',
'category3','category4']
def clean_text(self):
if question_text == "":
raise forms.ValidationError(
"Need a question",)
else:
return True
def save(self,commit=True):
question = super(add_Question_Form, self).save(commit=False)
question.question_text = self.cleaned_data["question_text"]
if commit:
question.save()
return question
class add_Answer_Form(forms.ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
def clean_text(self):
return self.cleaned_data.get('answer_text')
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
# User's username field and our own 2 fields pass1 and pass2 are used. Later
# we shall set the User's password by user.set_password.
class Meta:
model = Customuser
fields = ("username","email","first_name","department")
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
# To remove invalid passwords like short words, number only cases
auth.password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField( max_length=254,
widget=forms.TextInput( attrs={'autofocus': ''}),
)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
return self.cleaned_data
class UserForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ('categories',)
class CustomuserAdminForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ("username","email","first_name","last_name",
'department','groups','is_active','is_staff','is_superuser')
# fields = ['username','password','verify,'first_name','last_name','email','batch',]
################### Django classes ########################## | from .models import Question, Answer, Categories, Customuser
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
| random_line_split |
NSEC.py | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
from . import exception as dns_exception
from . import rdata as dns_rdata
from . import rdatatype as dns_rdatatype
from . import name as dns_name
class NSEC(dns_rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns_name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns_rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns_rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns_exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns_exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns_name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns_exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns_exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns_exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
| v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap)
b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows:
b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v | identifier_body |
|
NSEC.py | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
from . import exception as dns_exception
from . import rdata as dns_rdata
from . import rdatatype as dns_rdatatype
from . import name as dns_name
class NSEC(dns_rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns_name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns_rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns_rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns_exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns_exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns_name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns_exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns_exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns_exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
|
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap)
b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows:
b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v
| next = next.relativize(origin) | conditional_block |
NSEC.py | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
from . import exception as dns_exception
from . import rdata as dns_rdata
from . import rdatatype as dns_rdatatype
from . import name as dns_name
class NSEC(dns_rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns_name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns_rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns_rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns_exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns_exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns_name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns_exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns_exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns_exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap) | b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v | b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows: | random_line_split |
NSEC.py | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
from . import exception as dns_exception
from . import rdata as dns_rdata
from . import rdatatype as dns_rdatatype
from . import name as dns_name
class NSEC(dns_rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns_name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def | (self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns_rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns_rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns_exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns_exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns_name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns_exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns_exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns_exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap)
b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows:
b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v
| to_text | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.