file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
info.py | " + print_variants.__doc__),
("--no-versions", "do not " + print_versions.__doc__),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
arguments.add_common_arguments(subparser, ["package"])
def section_title(s):
return header_color + s + plain_format
def version(s):
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter(object):
def __init__(self, variants):
self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")
# Formats
fmt_name = "{0} [{1}]"
# Initialize column widths with the length of the
# corresponding headers, as they cannot be shorter
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
for k, e in variants.items():
v, w = e
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
len(str(w)),
len(v.allowed_values), # Allowed values
len(v.description), # Description
)
self.column_widths = (
max(self.column_widths[0], candidate_max_widths[0]),
max(self.column_widths[1], candidate_max_widths[1]),
max(self.column_widths[2], candidate_max_widths[2]),
max(self.column_widths[3], candidate_max_widths[3]),
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_when = min(self.column_widths[1], 30)
max_vals = min(self.column_widths[2], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[3],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
)
self.column_widths = (max_name, max_when, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
self.column_widths[0] + 4,
self.column_widths[1] + 4,
self.column_widths[2] + 4,
)
def default(self, v):
s = "on" if v.default is True else "off"
if not isinstance(v.default, bool):
s = v.default | return s
@property
def lines(self):
if not self.variants:
yield " None"
else:
yield " " + self.fmt % self.headers
underline = tuple([w * "=" for w in self.column_widths])
yield " " + self.fmt % underline
yield ""
for k, e in sorted(self.variants.items()):
v, w = e
name = textwrap.wrap(
"{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0]
)
if all(spec == spack.spec.Spec() for spec in w):
w = "--"
when = textwrap.wrap(str(w), width=self.column_widths[1])
allowed = v.allowed_values.replace("True, False", "on, off")
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
description = []
for d_line in v.description.split("\n"):
description += textwrap.wrap(d_line, width=self.column_widths[3])
for t in zip_longest(name, when, allowed, description, fillvalue=""):
yield " " + self.fmt % t
def print_dependencies(pkg):
"""output build, link, and run package dependencies"""
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
color.cprint(" None")
def print_detectable(pkg):
"""output information on external detection"""
color.cprint("")
color.cprint(section_title("Externally Detectable: "))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, "executables") or hasattr(pkg, "libraries"):
find_attributes = []
if hasattr(pkg, "determine_version"):
find_attributes.append("version")
if hasattr(pkg, "determine_variants"):
find_attributes.append("variants")
# If the package does not define 'determine_version' nor
# 'determine_variants', then it must use some custom detection
# mechanism. In this case, just inform the user it's detectable somehow.
color.cprint(
" True{0}".format(
" (" + ", ".join(find_attributes) + ")" if find_attributes else ""
)
)
else:
color.cprint(" False")
def print_maintainers(pkg):
"""output package maintainers"""
if len(pkg.maintainers) > 0:
mnt = " ".join(["@@" + m for m in pkg.maintainers])
color.cprint("")
color.cprint(section_title("Maintainers: ") + mnt)
def print_phases(pkg):
"""output installation phases"""
if hasattr(pkg, "phases") and pkg.phases:
color.cprint("")
color.cprint(section_title("Installation Phases:"))
phase_str = ""
for phase in pkg.phases:
phase_str += " {0}".format(phase)
color.cprint(phase_str)
def print_tags(pkg):
"""output package tags"""
color.cprint("")
color.cprint(section_title("Tags: "))
if hasattr(pkg, "tags"):
tags = sorted(pkg.tags)
colify(tags, indent=4)
else:
color.cprint(" None")
def print_tests(pkg):
"""output relevant build-time and stand-alone tests"""
# Some built-in base packages (e.g., Autotools) define callback (e.g.,
# check) inherited by descendant packages. These checks may not result
# in build-time testing if the package's build does not implement the
# expected functionality (e.g., a 'check' or 'test' targets).
#
# So the presence of a callback in Spack does not necessarily correspond
# to the actual presence of built-time tests for a package.
for callbacks, phase in [
(pkg.build_time_test_callbacks, "Build"),
(pkg.install_time_test_callbacks, "Install"),
]:
color.cprint("")
color.cprint(section_title("Available {0} Phase Test Methods:".format(phase)))
names = []
if callbacks:
for name in callbacks:
if getattr(pkg, name, False):
names.append(name)
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
# PackageBase defines an empty install/smoke test but we want to know
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
if names:
colify(sorted | random_line_split |
|
log_file.rs | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if !( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0 .. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as | fmt | identifier_name |
|
log_file.rs |
impl Drop for LogFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
impl fmt::Display for LogFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if !( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0 .. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
| fd: libc::c_int,
pub len: usize,
pub max_size: usize,
pub file_uuid: uuid::Uuid
} | random_line_split |
|
log_file.rs | if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if !( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0 .. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block_offset - entry.entry_offset as usize;
let mut entry_data = file.read(entry.entry_offset as usize, entry_data_size)?;
encoding::load_entry_data(&mut entry_data, &mut entry, entry_serial)?;
for txid in &entry.tx_deletions {
deleted_tx.insert(*txid);
}
for txid in &entry.alloc_deletions {
deleted_alloc.insert(*txid);
}
for rtx in entry.transactions {
if ! deleted_tx.contains(&rtx.id) && ! transactions.contains_key(&rtx.id) {
transactions.insert(rtx.id, rtx);
}
}
for ra in entry.allocations {
if ! deleted_alloc.contains(&ra.id) && ! allocations.contains_key(&ra.id) {
allocations.insert(ra.id, ra);
}
}
if entry.previous_entry_location.offset < 16 | {
break; // Cannot have an offset of 0 (first 16 bytes of the file are the UUID)
} | conditional_block |
|
log_file.rs | }
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if !( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> |
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0 .. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block | {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
} | identifier_body |
sketch.js | p = new Particle(random() * width, random() * height);
p.px += random() * 2 - 1;
p.py += random() * 2 - 1;
particles.push(p);
}
constrainPoints();
}
function draw() {
background(125);
updateParticles();
for (let i = 0; i < STEPS; i++) {
updateConstraints();
for (let body1 of bodies) {
body1.calculateBBox();
for (let body2 of bodies) {
if (body1 === body2)
continue;
if (physics.detectCollision(body1, body2))
physics.processCollision();
}
}
constrainPoints();
}
buildGrid();
if (pointDragging) {
if (currP) {
currP.x = mouseX;
currP.y = mouseY;
} else {
currP = getParticleAt(mouseX, mouseY);
}
} else {
currP = null;
}
stroke(100);
for (let x = 0; x < grid_w; x++) {
line(x * GRID_SIZE, 0, x * GRID_SIZE, height);
}
for (let y = 0; y < grid_h; y++) {
line(0, y * GRID_SIZE, width, y * GRID_SIZE);
}
if (drawFill) {
for (let i = 0; i < bodies.length; i++) {
let body = bodies[i];
fill((i * 10) % 255, (i * 5) % 255, (254 - i * 5) % 255);
beginShape();
for (let point of body.vertices) {
vertex(point.x, point.y);
}
endShape();
}
}
// Draw the constraints
stroke(0);
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
line(c.p1.x, c.p1.y, c.p2.x, c.p2.y);
}
noStroke();
// Draw the points
if (drawPoints) {
fill(255, 255, 0);
for (let i = 0; i < particles.length; i++) {
rect(particles[i].x - SIZE_D2, particles[i].y - SIZE_D2, SIZE, SIZE);
}
}
if (showDebugText) {
fill(255);
text('Particles: ' + particles.length + ' | Constraints: ' + constraints.length, 12, 12);
text('Gravity: ' + gravity.x + ', ' + gravity.y, 12, 24);
text('FPS: ' + frameRate(), 12, 38);
text('Delta: ' + deltaTime, 12, 50);
text('Dragging: ' + pointDragging, 12, 64);
}
}
function mousePressed() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
} else if (toolType == TTYPE_TRIANGLE) {
createTriangle(mouseX, mouseY, 25 + random(100));
} else if (toolType == TTYPE_SQUARE) {
createBox(mouseX, mouseY, 25 + random(100));
}
if (isPaused)
redraw();
// let p = new Particle(mouseX, mouseY);
// p.px += random() * 2 - 1;
// p.py += random() * 2 - 1;
// constraints.push(new Constraint(particles[particles.length - 1], p, random() * 10 + 10));
// particles.push(p);
}
function mouseDragged() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
}
}
function mouseReleased() {
mouseInsideSketch = true;
pointDragging = false;
}
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
buildGrid();
}
function buildGrid() {
grid = [];
grid_w = Math.ceil(width / GRID_SIZE);
grid_h = Math.ceil(height / GRID_SIZE);
for (let i = 0; i < grid_w * grid_h; i++)
grid.push([]);
for (let i = 0; i < particles.length; i++) {
let cx = floor(particles[i].x / GRID_SIZE);
let cy = floor(particles[i].y / GRID_SIZE);
if (cx < 0 || cx >= grid_w || cy < 0 || cy >= grid_h)
continue;
grid[cx + cy * grid_w].push(particles[i]);
}
}
function getParticleAt(x, y) {
let cx = floor(x / GRID_SIZE);
let cy = floor(y / GRID_SIZE);
for (let x0 = cx - 1; x0 < cx + 1; x0++) {
for (let y0 = cy - 1; y0 < cy + 1; y0++) {
if (x0 < 0 || x0 >= grid_w || y0 < 0 || y0 >= grid_h)
continue;
let cell = grid[x0 + y0 * grid_w];
for (let i = 0; i < cell.length; i++) {
let pDistX = (cell[i].x - x);
let pDistY = (cell[i].y - y);
if (pDistX * pDistX + pDistY * pDistY < dragDist)
return cell[i];
}
}
}
return null;
}
function updateParticles() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
let old_x = p.x;
let old_y = p.y;
if (p.invmass > 0) {
p.x += gravity.x;
p.y += gravity.y;
p.x += (p.x - p.px);
p.y += (p.y - p.py);
}
p.px = old_x;
p.py = old_y;
}
}
function | () {
let constToBeRemoved = [];
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
if (!c.p1 || !c.p2)
continue;
let dx = c.p1.x - c.p2.x;
let dy = c.p1.y - c.p2.y;
if (dx == 0 && dy == 0) {
dx += Math.random() * 0.1;
dy += Math.random() * 0.1;
}
// let d = Math.sqrt((dx * dx) + (dy * dy));
// if (!c.pushing && d < c.l)
// continue;
// if (canTear) {
// let tearStr = c.l * tearMult;
// if (d > tearStr) {
// constraints[i] = constraints[constraints.length - 1];
// i--;
// constraints.pop();
// continue;
// }
// }
// let percent = ((d - c.l) *
// (c.p1.invmass + c.p2.invmass)) /
// d;
// Squared dist for optimization
let dSq = (dx * dx) + (dy * dy);
if (!c.pushing && dSq < c.lSq)
continue;
if (canTear && c.canTear) {
// let tearStrSq = c.lSq * tearMult;
if (dSq > tearStrSq) {
constraints[i] = constraints[constraints.length - 1];
i--;
constraints.pop();
continue;
}
}
let percent = ((dSq - c.lSq) *
(c.p1.invmass + c.p2.invmass)) /
dSq;
dx *= percent;
dy *= percent;
c.p1.x -= dx * c.p1.invmass;;
c.p1.y -= dy * c.p1.invmass;;
c.p2.x += dx * c.p2.invmass;;
c.p2.y += dy * c.p2.invmass;;
}
}
function constrainPoints() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
if (p.x < SIZE) {
p.x = SIZE;
} else if (p.x >= width - SIZE) {
p.x = width - SIZE;
}
if (p.y < SIZE) {
p.y = SIZE;
} else if (p.y >= height - SIZE) {
p.x -= (p.y - height + SIZE) * (p.x - p.px) * this.physics.friction;
p | updateConstraints | identifier_name |
sketch.js | Points();
}
function draw() {
background(125);
updateParticles();
for (let i = 0; i < STEPS; i++) {
updateConstraints();
for (let body1 of bodies) {
body1.calculateBBox();
for (let body2 of bodies) {
if (body1 === body2)
continue;
if (physics.detectCollision(body1, body2))
physics.processCollision();
}
}
constrainPoints();
}
buildGrid();
if (pointDragging) {
if (currP) {
currP.x = mouseX;
currP.y = mouseY;
} else {
currP = getParticleAt(mouseX, mouseY);
}
} else {
currP = null;
}
stroke(100);
for (let x = 0; x < grid_w; x++) {
line(x * GRID_SIZE, 0, x * GRID_SIZE, height);
}
for (let y = 0; y < grid_h; y++) {
line(0, y * GRID_SIZE, width, y * GRID_SIZE);
}
if (drawFill) {
for (let i = 0; i < bodies.length; i++) {
let body = bodies[i];
fill((i * 10) % 255, (i * 5) % 255, (254 - i * 5) % 255);
beginShape();
for (let point of body.vertices) {
vertex(point.x, point.y);
}
endShape();
}
}
// Draw the constraints
stroke(0);
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
line(c.p1.x, c.p1.y, c.p2.x, c.p2.y);
}
noStroke();
// Draw the points
if (drawPoints) {
fill(255, 255, 0);
for (let i = 0; i < particles.length; i++) {
rect(particles[i].x - SIZE_D2, particles[i].y - SIZE_D2, SIZE, SIZE);
}
}
if (showDebugText) {
fill(255);
text('Particles: ' + particles.length + ' | Constraints: ' + constraints.length, 12, 12);
text('Gravity: ' + gravity.x + ', ' + gravity.y, 12, 24);
text('FPS: ' + frameRate(), 12, 38);
text('Delta: ' + deltaTime, 12, 50);
text('Dragging: ' + pointDragging, 12, 64);
}
}
function mousePressed() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
} else if (toolType == TTYPE_TRIANGLE) {
createTriangle(mouseX, mouseY, 25 + random(100));
} else if (toolType == TTYPE_SQUARE) {
createBox(mouseX, mouseY, 25 + random(100));
}
if (isPaused)
redraw();
// let p = new Particle(mouseX, mouseY);
// p.px += random() * 2 - 1;
// p.py += random() * 2 - 1;
// constraints.push(new Constraint(particles[particles.length - 1], p, random() * 10 + 10));
// particles.push(p);
}
function mouseDragged() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
}
}
function mouseReleased() {
mouseInsideSketch = true;
pointDragging = false;
}
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
buildGrid();
}
function buildGrid() {
grid = [];
grid_w = Math.ceil(width / GRID_SIZE);
grid_h = Math.ceil(height / GRID_SIZE);
for (let i = 0; i < grid_w * grid_h; i++)
grid.push([]);
for (let i = 0; i < particles.length; i++) {
let cx = floor(particles[i].x / GRID_SIZE);
let cy = floor(particles[i].y / GRID_SIZE);
if (cx < 0 || cx >= grid_w || cy < 0 || cy >= grid_h)
continue;
grid[cx + cy * grid_w].push(particles[i]);
}
}
function getParticleAt(x, y) {
let cx = floor(x / GRID_SIZE);
let cy = floor(y / GRID_SIZE);
for (let x0 = cx - 1; x0 < cx + 1; x0++) {
for (let y0 = cy - 1; y0 < cy + 1; y0++) {
if (x0 < 0 || x0 >= grid_w || y0 < 0 || y0 >= grid_h)
continue;
let cell = grid[x0 + y0 * grid_w];
for (let i = 0; i < cell.length; i++) {
let pDistX = (cell[i].x - x);
let pDistY = (cell[i].y - y);
if (pDistX * pDistX + pDistY * pDistY < dragDist)
return cell[i];
}
}
}
return null;
}
function updateParticles() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
let old_x = p.x;
let old_y = p.y;
if (p.invmass > 0) {
p.x += gravity.x;
p.y += gravity.y;
p.x += (p.x - p.px);
p.y += (p.y - p.py);
}
p.px = old_x;
p.py = old_y;
}
}
function updateConstraints() {
let constToBeRemoved = [];
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
if (!c.p1 || !c.p2)
continue;
let dx = c.p1.x - c.p2.x;
let dy = c.p1.y - c.p2.y;
if (dx == 0 && dy == 0) {
dx += Math.random() * 0.1;
dy += Math.random() * 0.1;
}
// let d = Math.sqrt((dx * dx) + (dy * dy));
// if (!c.pushing && d < c.l)
// continue;
// if (canTear) {
// let tearStr = c.l * tearMult;
// if (d > tearStr) {
// constraints[i] = constraints[constraints.length - 1];
// i--;
// constraints.pop();
// continue;
// }
// }
// let percent = ((d - c.l) *
// (c.p1.invmass + c.p2.invmass)) /
// d;
// Squared dist for optimization
let dSq = (dx * dx) + (dy * dy);
if (!c.pushing && dSq < c.lSq)
continue;
if (canTear && c.canTear) {
// let tearStrSq = c.lSq * tearMult;
if (dSq > tearStrSq) {
constraints[i] = constraints[constraints.length - 1];
i--;
constraints.pop();
continue;
}
}
let percent = ((dSq - c.lSq) *
(c.p1.invmass + c.p2.invmass)) /
dSq;
dx *= percent;
dy *= percent;
c.p1.x -= dx * c.p1.invmass;;
c.p1.y -= dy * c.p1.invmass;;
c.p2.x += dx * c.p2.invmass;;
c.p2.y += dy * c.p2.invmass;;
}
}
function constrainPoints() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
if (p.x < SIZE) {
p.x = SIZE;
} else if (p.x >= width - SIZE) {
p.x = width - SIZE;
}
if (p.y < SIZE) {
p.y = SIZE;
} else if (p.y >= height - SIZE) {
p.x -= (p.y - height + SIZE) * (p.x - p.px) * this.physics.friction;
p.y = height - SIZE;
}
}
}
function Particle(x, y) | {
this.x = x;
this.y = y;
this.px = x;
this.py = y;
this.invmass = 0.3;
} | identifier_body |
|
sketch.js | random() * 2 - 1;
p.py += random() * 2 - 1;
particles.push(p);
}
constrainPoints();
}
function draw() {
background(125);
updateParticles();
for (let i = 0; i < STEPS; i++) {
updateConstraints();
for (let body1 of bodies) {
body1.calculateBBox();
for (let body2 of bodies) {
if (body1 === body2)
continue;
if (physics.detectCollision(body1, body2))
physics.processCollision();
}
}
constrainPoints();
}
buildGrid();
if (pointDragging) {
if (currP) {
currP.x = mouseX;
currP.y = mouseY;
} else {
currP = getParticleAt(mouseX, mouseY);
}
} else {
currP = null;
}
stroke(100);
for (let x = 0; x < grid_w; x++) {
line(x * GRID_SIZE, 0, x * GRID_SIZE, height);
}
for (let y = 0; y < grid_h; y++) {
line(0, y * GRID_SIZE, width, y * GRID_SIZE);
}
if (drawFill) {
for (let i = 0; i < bodies.length; i++) {
let body = bodies[i];
fill((i * 10) % 255, (i * 5) % 255, (254 - i * 5) % 255);
beginShape();
for (let point of body.vertices) {
vertex(point.x, point.y);
}
endShape();
}
}
// Draw the constraints
stroke(0);
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
line(c.p1.x, c.p1.y, c.p2.x, c.p2.y);
}
noStroke();
// Draw the points
if (drawPoints) {
fill(255, 255, 0);
for (let i = 0; i < particles.length; i++) {
rect(particles[i].x - SIZE_D2, particles[i].y - SIZE_D2, SIZE, SIZE);
}
}
if (showDebugText) {
fill(255);
text('Particles: ' + particles.length + ' | Constraints: ' + constraints.length, 12, 12);
text('Gravity: ' + gravity.x + ', ' + gravity.y, 12, 24);
text('FPS: ' + frameRate(), 12, 38);
text('Delta: ' + deltaTime, 12, 50);
text('Dragging: ' + pointDragging, 12, 64);
}
}
function mousePressed() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
} else if (toolType == TTYPE_TRIANGLE) {
createTriangle(mouseX, mouseY, 25 + random(100));
} else if (toolType == TTYPE_SQUARE) {
createBox(mouseX, mouseY, 25 + random(100));
}
if (isPaused)
redraw();
// let p = new Particle(mouseX, mouseY);
// p.px += random() * 2 - 1;
// p.py += random() * 2 - 1;
// constraints.push(new Constraint(particles[particles.length - 1], p, random() * 10 + 10));
// particles.push(p);
}
function mouseDragged() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
}
}
function mouseReleased() {
mouseInsideSketch = true;
pointDragging = false;
}
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
buildGrid();
}
function buildGrid() {
grid = [];
grid_w = Math.ceil(width / GRID_SIZE);
grid_h = Math.ceil(height / GRID_SIZE);
for (let i = 0; i < grid_w * grid_h; i++)
grid.push([]);
for (let i = 0; i < particles.length; i++) {
let cx = floor(particles[i].x / GRID_SIZE);
let cy = floor(particles[i].y / GRID_SIZE);
if (cx < 0 || cx >= grid_w || cy < 0 || cy >= grid_h)
continue;
grid[cx + cy * grid_w].push(particles[i]);
}
}
function getParticleAt(x, y) {
let cx = floor(x / GRID_SIZE);
let cy = floor(y / GRID_SIZE);
for (let x0 = cx - 1; x0 < cx + 1; x0++) {
for (let y0 = cy - 1; y0 < cy + 1; y0++) {
if (x0 < 0 || x0 >= grid_w || y0 < 0 || y0 >= grid_h)
continue;
let cell = grid[x0 + y0 * grid_w];
for (let i = 0; i < cell.length; i++) {
let pDistX = (cell[i].x - x);
let pDistY = (cell[i].y - y);
if (pDistX * pDistX + pDistY * pDistY < dragDist)
return cell[i];
}
}
}
return null;
}
function updateParticles() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
let old_x = p.x;
let old_y = p.y;
if (p.invmass > 0) {
p.x += gravity.x;
p.y += gravity.y;
p.x += (p.x - p.px);
p.y += (p.y - p.py);
}
p.px = old_x;
p.py = old_y;
}
}
function updateConstraints() {
let constToBeRemoved = [];
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
if (!c.p1 || !c.p2)
continue;
let dx = c.p1.x - c.p2.x;
let dy = c.p1.y - c.p2.y;
if (dx == 0 && dy == 0) {
dx += Math.random() * 0.1;
dy += Math.random() * 0.1;
}
// let d = Math.sqrt((dx * dx) + (dy * dy));
// if (!c.pushing && d < c.l)
// continue;
// if (canTear) {
// let tearStr = c.l * tearMult;
// if (d > tearStr) {
// constraints[i] = constraints[constraints.length - 1];
// i--;
// constraints.pop();
// continue;
// }
// }
// let percent = ((d - c.l) *
// (c.p1.invmass + c.p2.invmass)) /
// d;
// Squared dist for optimization
let dSq = (dx * dx) + (dy * dy);
if (!c.pushing && dSq < c.lSq)
continue;
if (canTear && c.canTear) {
// let tearStrSq = c.lSq * tearMult;
if (dSq > tearStrSq) {
constraints[i] = constraints[constraints.length - 1];
i--;
constraints.pop();
continue;
}
}
let percent = ((dSq - c.lSq) *
(c.p1.invmass + c.p2.invmass)) /
dSq;
dx *= percent;
dy *= percent;
c.p1.x -= dx * c.p1.invmass;;
c.p1.y -= dy * c.p1.invmass;;
c.p2.x += dx * c.p2.invmass;;
c.p2.y += dy * c.p2.invmass;;
}
}
function constrainPoints() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
if (p.x < SIZE) {
p.x = SIZE;
} else if (p.x >= width - SIZE) {
p.x = width - SIZE;
}
if (p.y < SIZE) {
p.y = SIZE;
} else if (p.y >= height - SIZE) {
p.x -= (p.y - height + SIZE) * (p.x - p.px) * this.physics.friction;
p.y = height - SIZE;
}
}
}
| function Particle(x, y) { | random_line_split |
|
sketch.js | = new Particle(random() * width, random() * height);
p.px += random() * 2 - 1;
p.py += random() * 2 - 1;
particles.push(p);
}
constrainPoints();
}
function draw() {
background(125);
updateParticles();
for (let i = 0; i < STEPS; i++) {
updateConstraints();
for (let body1 of bodies) {
body1.calculateBBox();
for (let body2 of bodies) {
if (body1 === body2)
continue;
if (physics.detectCollision(body1, body2))
physics.processCollision();
}
}
constrainPoints();
}
buildGrid();
if (pointDragging) {
if (currP) {
currP.x = mouseX;
currP.y = mouseY;
} else {
currP = getParticleAt(mouseX, mouseY);
}
} else {
currP = null;
}
stroke(100);
for (let x = 0; x < grid_w; x++) {
line(x * GRID_SIZE, 0, x * GRID_SIZE, height);
}
for (let y = 0; y < grid_h; y++) {
line(0, y * GRID_SIZE, width, y * GRID_SIZE);
}
if (drawFill) {
for (let i = 0; i < bodies.length; i++) {
let body = bodies[i];
fill((i * 10) % 255, (i * 5) % 255, (254 - i * 5) % 255);
beginShape();
for (let point of body.vertices) {
vertex(point.x, point.y);
}
endShape();
}
}
// Draw the constraints
stroke(0);
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
line(c.p1.x, c.p1.y, c.p2.x, c.p2.y);
}
noStroke();
// Draw the points
if (drawPoints) |
if (showDebugText) {
fill(255);
text('Particles: ' + particles.length + ' | Constraints: ' + constraints.length, 12, 12);
text('Gravity: ' + gravity.x + ', ' + gravity.y, 12, 24);
text('FPS: ' + frameRate(), 12, 38);
text('Delta: ' + deltaTime, 12, 50);
text('Dragging: ' + pointDragging, 12, 64);
}
}
function mousePressed() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
} else if (toolType == TTYPE_TRIANGLE) {
createTriangle(mouseX, mouseY, 25 + random(100));
} else if (toolType == TTYPE_SQUARE) {
createBox(mouseX, mouseY, 25 + random(100));
}
if (isPaused)
redraw();
// let p = new Particle(mouseX, mouseY);
// p.px += random() * 2 - 1;
// p.py += random() * 2 - 1;
// constraints.push(new Constraint(particles[particles.length - 1], p, random() * 10 + 10));
// particles.push(p);
}
function mouseDragged() {
if (!mouseInsideSketch ||
mouseX < 0 || mouseX >= width ||
mouseY < 0 || mouseY >= height)
return;
if (toolType == TTYPE_DRAG) {
pointDragging = true;
}
}
function mouseReleased() {
mouseInsideSketch = true;
pointDragging = false;
}
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
buildGrid();
}
function buildGrid() {
grid = [];
grid_w = Math.ceil(width / GRID_SIZE);
grid_h = Math.ceil(height / GRID_SIZE);
for (let i = 0; i < grid_w * grid_h; i++)
grid.push([]);
for (let i = 0; i < particles.length; i++) {
let cx = floor(particles[i].x / GRID_SIZE);
let cy = floor(particles[i].y / GRID_SIZE);
if (cx < 0 || cx >= grid_w || cy < 0 || cy >= grid_h)
continue;
grid[cx + cy * grid_w].push(particles[i]);
}
}
function getParticleAt(x, y) {
let cx = floor(x / GRID_SIZE);
let cy = floor(y / GRID_SIZE);
for (let x0 = cx - 1; x0 < cx + 1; x0++) {
for (let y0 = cy - 1; y0 < cy + 1; y0++) {
if (x0 < 0 || x0 >= grid_w || y0 < 0 || y0 >= grid_h)
continue;
let cell = grid[x0 + y0 * grid_w];
for (let i = 0; i < cell.length; i++) {
let pDistX = (cell[i].x - x);
let pDistY = (cell[i].y - y);
if (pDistX * pDistX + pDistY * pDistY < dragDist)
return cell[i];
}
}
}
return null;
}
function updateParticles() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
let old_x = p.x;
let old_y = p.y;
if (p.invmass > 0) {
p.x += gravity.x;
p.y += gravity.y;
p.x += (p.x - p.px);
p.y += (p.y - p.py);
}
p.px = old_x;
p.py = old_y;
}
}
function updateConstraints() {
let constToBeRemoved = [];
for (let i = 0; i < constraints.length; i++) {
let c = constraints[i];
if (!c.p1 || !c.p2)
continue;
let dx = c.p1.x - c.p2.x;
let dy = c.p1.y - c.p2.y;
if (dx == 0 && dy == 0) {
dx += Math.random() * 0.1;
dy += Math.random() * 0.1;
}
// let d = Math.sqrt((dx * dx) + (dy * dy));
// if (!c.pushing && d < c.l)
// continue;
// if (canTear) {
// let tearStr = c.l * tearMult;
// if (d > tearStr) {
// constraints[i] = constraints[constraints.length - 1];
// i--;
// constraints.pop();
// continue;
// }
// }
// let percent = ((d - c.l) *
// (c.p1.invmass + c.p2.invmass)) /
// d;
// Squared dist for optimization
let dSq = (dx * dx) + (dy * dy);
if (!c.pushing && dSq < c.lSq)
continue;
if (canTear && c.canTear) {
// let tearStrSq = c.lSq * tearMult;
if (dSq > tearStrSq) {
constraints[i] = constraints[constraints.length - 1];
i--;
constraints.pop();
continue;
}
}
let percent = ((dSq - c.lSq) *
(c.p1.invmass + c.p2.invmass)) /
dSq;
dx *= percent;
dy *= percent;
c.p1.x -= dx * c.p1.invmass;;
c.p1.y -= dy * c.p1.invmass;;
c.p2.x += dx * c.p2.invmass;;
c.p2.y += dy * c.p2.invmass;;
}
}
function constrainPoints() {
for (let i = 0; i < particles.length; i++) {
let p = particles[i];
if (p.x < SIZE) {
p.x = SIZE;
} else if (p.x >= width - SIZE) {
p.x = width - SIZE;
}
if (p.y < SIZE) {
p.y = SIZE;
} else if (p.y >= height - SIZE) {
p.x -= (p.y - height + SIZE) * (p.x - p.px) * this.physics.friction;
p | {
fill(255, 255, 0);
for (let i = 0; i < particles.length; i++) {
rect(particles[i].x - SIZE_D2, particles[i].y - SIZE_D2, SIZE, SIZE);
}
} | conditional_block |
annualSearchPage.js | 服务器的参数为:pageSize,pageNumber
queryParams:queryParams,
singleSelect: false,
pageSize: basePage.pageSize,
pageList: basePage.pageList,
search: false, //不显示 搜索框
showColumns: false, //不显示下拉框(选择显示的列)
sidePagination: "server", //服务端请求
clickToSelect: true, //是否启用点击选中行
columns: [
{
field: 'Number',
title: '行号',
width:'1px',
formatter: function (value, row, index) {
return index+1;
},
width:50
},
{
field:'id',
visible:false
},{
field: 'userName',
title: '公司名称',
valign:'middle'
},{
field: 'fileName',
title: '文件标题',
valign:'middle'
},
{
field: 'year',
title: '年份',
valign:'middle'
},{
field: 'uploadTime',
title: '上传时间',
valign:'middle'
},{
field: 'resume',
title: '内容简述',
valign:'middle'
},{
field: 'remarks',
title: '备注',
valign:'middle'
},{
field: 'status',
title: '是否提交',
valign:'middle',
formatter:function(value,data,index){
if(data.status==0){
return '未提交';
}else{
return '已提交';
}
}
},{
field: '',
title: '操作',
valign:'middle',
formatter:function(value,data,index){
var str = '<button style="margin-top: 5px;" onclick="searchRegulations(\''+data.id+'\');" type="button" class="btn btn-info btn-xs" style="margin-right:10px;"><span class="glyphicon glyphicon-search"></span>查看详情</button>';
return str;
}
}
],
onLoadSuccess:function(){},
onLoadError: function () {
}
});
}
function queryParams(params){
var temp = {
pageSize:params.pageSize,
pageNumber:params.pageNumber,
fileName:$('#headNameId').val(),
year:$('#headYearId').val(),
userCode:$('#searchUserName').val()
};
$("#headNameId").val('');
$('#headYearId').val('');
return temp;
}
//增加通知公告
function addInfo(){
window.location.href="<%=basePath%>regulatory/toAdd.do"
}
//删除
function delInfo(){
rootPath = getRootPath();
var id = getSelectionsStr();
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(obj) {
debugger;
if(obj.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
$.commonReq({
url : rootPath + "/annual/deleteRegs.do",
async : true,
data : {"ids":id},
success : function(parame) {
bootbox.alert( "删除成功!");
window.location.href=rootPath+'/annual/toAnnualReviewPage.do';
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
});
}
//修改通知公告
function updateInfo(){
debugger;
$('#saveUpdateBtn').show();
$('#urlDiv2').hide();
$('#urlDiv').show();
var rootPath = getRootPath();
var localhostPath = getRootPath1();
var id = getSelectionsStr()+"";
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$("#formUpdatesInfo").find("input").val("");
$('#myModalAdd').modal('hide');
$("#uploadFileName").html('');
$("#uploadFileId").val('');
$("#uploadFileId").val(id);
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(parame) {
if(parame.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
debugger;
var obj = parame.data;
$("#updateId").val(obj.id);
$("#updateFileName").val(obj.fileName);
$("#updateYearId").val(obj.year);
$("#updateResume").val(obj.resume);
$("#updateRemarks").val(obj.remarks);
if(obj.fileUrl==''){
$("#downloadHref").hide();
}else{
$("#uploadFileInput").val(obj.fileUrl);
$("#downloadHref").attr("href", localhostPath+"/filePath/"+obj.fileUrl );
}
if(obj.fileUrl != null && obj.fileUrl.trim() !=''){
$("#uploadFileId").val(obj.fileUrl);
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim()!=''){
var idAry=nameAry[i].split('^');
var id = idAry[0];
var fileName = idAry[1];
var beforeFileName = $("#uploadFileName").html();
var afterFileName = beforeFileName+" "
+'<span id='+id+'>'+fileName+'</span>'
+'<a onclick="deleteFile('+id+');" style="color:#ff000096;"> 删除</a>';
$("#uploadFileName").html(afterFileName);
}
}
}
$('#myModalUpdate').modal('show');
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
function saveUpdateInfo(){
debugger;
var flag = true;
//检查标题
var name=$('#updateFileName').val();
if(checkNullAndEmpty(name)){
bootbox.alert("环评文件标题不能为空!");
flag=false;
return;
}
//检查年份
var year = $("#updateYearId").val();
if(checkNullAndEmpty(year)){
bootbox.alert("年份不能为空!");
flag=false;
return;
}
var rootPath = getRootPath();
if(flag){
$.commonReq({
url : rootPath + "/annual/updateAnnualRevice.do",
async : true,
data:$("#formUpdateInfo").serialize(),
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
bootbox.alert("修改成功!");
},
error:function(xhr,status,e){
//服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
}
function searchRegulations(id){
debugger;
var localhostPath = getRootPath1();
var rootPath = getRootPath();
$("#formUpdatesInfo").find("input").val("");
$("#submitBtn").show();
$('#myModalUpdate').modal('hide');
$("#searchUploadFileName").html('');
$("#uploadFileId").val('');
$('#urlDiv').hide();
$.commonReq({
url : rootPath + '/annual/selectById.do',
data : {
"id" : id
},
success : function(data) {
debugger;
var obj = data.data;
$("#searchId").val(obj.id);
$("#submitBtn").hide();
| $("#searchFileName").val(obj.fileName);
$("#searchYearId").val(obj.year);
$("#searchResume").val(obj.resume);
$("#searchRemarks").val(obj.remarks);
var afterName = '';
if(obj.fileUrl !='' && obj.fileUrl != null){
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim() != ''){
var idAry=nameAry[i].split('^');
var fileName = idAry[1];
var beforeFileList = $("#searchUploadFileName").html();
var url =localhostPath+'/filePath/'+nameAry[i];
var afterFileList =beforeFileList+' '+ '<a href='+url+' target=_blank>'+fileName+'</a>';
$("#searchUploadFileName").html(afterFileList);
}
}
//$('#urlDiv2').show();
}
| identifier_body |
|
annualSearchPage.js | }
}
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(obj) {
debugger;
if(obj.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
$.commonReq({
url : rootPath + "/annual/deleteRegs.do",
async : true,
data : {"ids":id},
success : function(parame) {
bootbox.alert( "删除成功!");
window.location.href=rootPath+'/annual/toAnnualReviewPage.do';
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
});
}
//修改通知公告
function updateInfo(){
debugger;
$('#saveUpdateBtn').show();
$('#urlDiv2').hide();
$('#urlDiv').show();
var rootPath = getRootPath();
var localhostPath = getRootPath1();
var id = getSelectionsStr()+"";
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$("#formUpdatesInfo").find("input").val("");
$('#myModalAdd').modal('hide');
$("#uploadFileName").html('');
$("#uploadFileId").val('');
$("#uploadFileId").val(id);
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(parame) {
if(parame.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
debugger;
var obj = parame.data;
$("#updateId").val(obj.id);
$("#updateFileName").val(obj.fileName);
$("#updateYearId").val(obj.year);
$("#updateResume").val(obj.resume);
$("#updateRemarks").val(obj.remarks);
if(obj.fileUrl==''){
$("#downloadHref").hide();
}else{
$("#uploadFileInput").val(obj.fileUrl);
$("#downloadHref").attr("href", localhostPath+"/filePath/"+obj.fileUrl );
}
if(obj.fileUrl != null && obj.fileUrl.trim() !=''){
$("#uploadFileId").val(obj.fileUrl);
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim()!=''){
var idAry=nameAry[i].split('^');
var id = idAry[0];
var fileName = idAry[1];
var beforeFileName = $("#uploadFileName").html();
var afterFileName = beforeFileName+" "
+'<span id='+id+'>'+fileName+'</span>'
+'<a onclick="deleteFile('+id+');" style="color:#ff000096;"> 删除</a>';
$("#uploadFileName").html(afterFileName);
}
}
}
$('#myModalUpdate').modal('show');
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
function saveUpdateInfo(){
debugger;
var flag = true;
//检查标题
var name=$('#updateFileName').val();
if(checkNullAndEmpty(name)){
bootbox.alert("环评文件标题不能为空!");
flag=false;
return;
}
//检查年份
var year = $("#updateYearId").val();
if(checkNullAndEmpty(year)){
bootbox.alert("年份不能为空!");
flag=false;
return;
}
var rootPath = getRootPath();
if(flag){
$.commonReq({
url : rootPath + "/annual/updateAnnualRevice.do",
async : true,
data:$("#formUpdateInfo").serialize(),
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
bootbox.alert("修改成功!");
},
error:function(xhr,status,e){
//服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
}
function searchRegulations(id){
debugger;
var localhostPath = getRootPath1();
var rootPath = getRootPath();
$("#formUpdatesInfo").find("input").val("");
$("#submitBtn").show();
$('#myModalUpdate').modal('hide');
$("#searchUploadFileName").html('');
$("#uploadFileId").val('');
$('#urlDiv').hide();
$.commonReq({
url : rootPath + '/annual/selectById.do',
data : {
"id" : id
},
success : function(data) {
debugger;
var obj = data.data;
$("#searchId").val(obj.id);
$("#submitBtn").hide();
$("#searchFileName").val(obj.fileName);
$("#searchYearId").val(obj.year);
$("#searchResume").val(obj.resume);
$("#searchRemarks").val(obj.remarks);
var afterName = '';
if(obj.fileUrl !='' && obj.fileUrl != null){
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim() != ''){
var idAry=nameAry[i].split('^');
var fileName = idAry[1];
var beforeFileList = $("#searchUploadFileName").html();
var url =localhostPath+'/filePath/'+nameAry[i];
var afterFileList =beforeFileList+' '+ '<a href='+url+' target=_blank>'+fileName+'</a>';
$("#searchUploadFileName").html(afterFileList);
}
}
//$('#urlDiv2').show();
}
$('#myModalSearch').modal('show');
}
});
}
//提交
function submitInfo(id){
debugger;
var rootPath = getRootPath();
if(id=='' || id==undefined){
id = $("#searchId").val();
}
if(id==""){
bootbox.alert('请选择你要提交的数据!');
}else{
bootbox.confirm("提交后您将无法对该文件进行修改或删除操作,确定要提交该数据吗?",function(result){
if(result){
$.commonReq({
url : rootPath + "/annual/sumbitAnnualReview.do",
async : true,
data:{"id":id},
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
bootbox.alert("提交成功!");
$('#myModalSearch').modal('hide');
},
error:function(xhr,status,e){
//服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
});
}
}
/*获取选中的值*/
function getSelectionsStr(){
var rows = $('#dt').bootstrapTable('getSelections');
var str="";
if(rows!=null){
for(var i=0;i<rows.length;i++){
str+=(rows[i].id+"")+",";
}
str=str.substring(0,str.lastIndexOf(','));
}
return str;
}
/*验证是否为空*/
function checkNullAndEmpty(value){
if(value==null || value.trim()==''){
return true;
}else{
return false;
}
}
//判断字符串是否为数字
function checkNumber(value){
var re = /^[0-9]+.?[0-9]*$/;
if(null==value||''==value) {
return false;
}else if(!re.test(value)){
return true;
}else{
return false;
}
}
/*时间格式转化*/
function dataFormat(value){
if(value!=null){
var date = new Date(value);
var seperator1 = "-";
//年
var year = date.getFullYear();
//月
var month = date.getMonth() + 1;
//日
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = year + seperator1 + month + seperator1 + strDate;
return currentdate;
}else{ | return "";
} | random_line_split |
|
annualSearchPage.js | box.alert('只能选中一行');
return;
}
}
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(obj) {
debugger;
if(obj.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
$.commonReq({
url : rootPath + "/annual/deleteRegs.do",
async : true,
data : {"ids":id},
success : function(parame) {
bootbox.alert( "删除成功!");
window.location.href=rootPath+'/annual/toAnnualReviewPage.do';
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
});
}
//修改通知公告
function updateInfo(){
debugger;
$('#saveUpdateBtn').show();
$('#urlDiv2').hide();
$('#urlDiv').show();
var rootPath = getRootPath();
var localhostPath = getRootPath1();
var id = getSelectionsStr()+"";
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$("#formUpdatesInfo").find("input").val("");
$('#myModalAdd').modal('hide');
$("#uploadFileName").html('');
$("#uploadFileId").val('');
$("#uploadFileId").val(id);
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(parame) {
if(parame.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
debugger;
var obj = parame.data;
$("#updateId").val(obj.id);
$("#updateFileName").val(obj.fileName);
$("#updateYearId").val(obj.year);
$("#updateResume").val(obj.resume);
$("#updateRemarks").val(obj.remarks);
if(obj.fileUrl==''){
$("#downloadHref").hide();
}else{
$("#uploadFileInput").val(obj.fileUrl);
$("#downloadHref").attr("href", localhostPath+"/filePath/"+obj.fileUrl );
}
if(obj.fileUrl != null && obj.fileUrl.trim() !=''){
$("#uploadFileId").val(obj.fileUrl);
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim()!=''){
var idAry=nameAry[i].split('^');
var id = idAry[0];
var fileName = idAry[1];
var beforeFileName = $("#uploadFileName").html();
var afterFileName = beforeFileName+" "
+'<span id='+id+'>'+fileName+'</span>'
+'<a onclick="deleteFile('+id+');" style="color:#ff000096;"> 删除</a>';
$("#uploadFileName").html(afterFileName);
}
}
}
$('#myModalUpdate').modal('show');
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
function saveUpdateInfo(){
debugger;
var flag = true;
//检查标题
var name=$('#updateFileName').val();
if(checkNullAndEmpty(name)){
bootbox.alert("环评文件标题不能为空!");
flag=false;
return;
}
//检查年份
var year = $("#updateYearId").val();
if(checkNullAndEmpty(year)){
bootbox.alert("年份不能为空!");
flag=false;
return;
}
var rootPath = getRootPath();
if(flag){
$.commonReq({
url : rootPath + "/annual/updateAnnualRevice.do",
async : true,
data:$("#formUpdateInfo").serialize(),
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
bootbox.alert("修改成功!");
},
error:function(xhr,status,e){
//服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
}
function searchRegulations(id){
debugger;
var localhostPath = getRootPath1();
var rootPath = getRootPath();
$("#formUpdatesInfo").find("input").val("");
$("#submitBtn").show();
$('#myModalUpdate').modal('hide');
$("#searchUploadFileName").html('');
$("#uploadFileId").val('');
$('#urlDiv').hide();
$.commonReq({
url : rootPath + '/annual/selectById.do',
data : {
"id" : id
},
success : function(data) {
debugger;
var obj = data.data;
$("#searchId").val(obj.id);
$("#submitBtn").hide();
$("#searchFileName").val(obj.fileName);
$("#searchYearId").val(obj.year);
$("#searchResume").val(obj.resume);
$("#searchRemarks").val(obj.remarks);
var afterName = '';
if(obj.fileUrl !='' && obj.fileUrl != null){
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim() != ''){
var idAry=nameAry[i].split('^');
var fileName = idAry[1];
var beforeFileList = $("#searchUploadFileName").html();
var url =localhostPath+'/filePath/'+nameAry[i];
var afterFileList =beforeFileList+' '+ '<a href='+url+' target=_blank>'+fileName+'</a>';
$("#searchUploadFileName").html(afterFileList);
}
}
//$('#urlDiv2').show();
}
$('#myModalSearch').modal('show');
}
});
}
//提交
function submitInfo(id){
debugger;
var rootPath = getRootPath();
if(id=='' || id==undefined){
id = $("#searchId").val();
}
if(id==""){
bootbox.alert('请选择你要提交的数据!');
}else{
bootbox.confirm("提交后您将无法对该文件进行修改或删除操作,确定要提交该数据吗?",function(result){
if(result){
$.commonReq({
url : rootPath + "/annual/sumbitAnnualReview.do",
async : true,
data:{"id":id},
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
bootbox.alert("提交成功!");
$('#myModalSearch').modal('hide');
},
error:function(xhr,status,e){
//服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
});
}
}
/*获取选中的值*/
function getSelectionsStr(){
var rows = $('#dt').bootstrapTable('getSelections');
var str="";
if(rows!=null){
for(var i=0;i<rows.length;i++){
str+=(rows[i].id+"")+",";
}
str=str.substring(0,str.lastIndexOf(','));
}
return str;
}
/*验证是否为空*/
function checkNullAndEmpty(value){
if(value==null || value.trim()==''){
return true;
}else{
return false;
}
}
//判断字符串是否为数字
function checkNumber(value){
var re = /^[0-9]+.?[0-9]*$/;
if(null==value||''==value) {
return false;
}else if(!re.test(value)){
return true;
}else{
return false;
}
}
/*时间格式转化*/
function dataFormat(value){
if(value!=null){
var date = new Date(value);
var seperator1 = "-";
//年
var year = date.getFullYear();
//月
var month = date.getMonth() + 1;
//日
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = year + seperator1 + month + seperator1 + strDate;
| re | identifier_name |
|
annualSearchPage.js | striped: true, //是否显示行间隔色
cache: false, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
pagination: true,
queryParamsType:'', //默认值为 'limit' ,在默认情况下 传给服务端的参数为:offset,limit,sort
// 设置为 '' 在这种情况下传给服务器的参数为:pageSize,pageNumber
queryParams:queryParams,
singleSelect: false,
pageSize: basePage.pageSize,
pageList: basePage.pageList,
search: false, //不显示 搜索框
showColumns: false, //不显示下拉框(选择显示的列)
sidePagination: "server", //服务端请求
clickToSelect: true, //是否启用点击选中行
columns: [
{
field: 'Number',
title: '行号',
width:'1px',
formatter: function (value, row, index) {
return index+1;
},
width:50
},
{
field:'id',
visible:false
},{
field: 'userName',
title: '公司名称',
valign:'middle'
},{
field: 'fileName',
title: '文件标题',
valign:'middle'
},
{
field: 'year',
title: '年份',
valign:'middle'
},{
field: 'uploadTime',
title: '上传时间',
valign:'middle'
},{
field: 'resume',
title: '内容简述',
valign:'middle'
},{
field: 'remarks',
title: '备注',
valign:'middle'
},{
field: 'status',
title: '是否提交',
valign:'middle',
formatter:function(value,data,index){
if(data.status==0){
return '未提交';
}else{
return '已提交';
}
}
},{
field: '',
title: '操作',
valign:'middle',
formatter:function(value,data,index){
var str = '<button style="margin-top: 5px;" onclick="searchRegulations(\''+data.id+'\');" type="button" class="btn btn-info btn-xs" style="margin-right:10px;"><span class="glyphicon glyphicon-search"></span>查看详情</button>';
return str;
}
}
],
onLoadSuccess:function(){},
onLoadError: function () {
}
});
}
function queryParams(params){
var temp = {
pageSize:params.pageSize,
pageNumber:params.pageNumber,
fileName:$('#headNameId').val(),
year:$('#headYearId').val(),
userCode:$('#searchUserName').val()
};
$("#headNameId").val('');
$('#headYearId').val('');
return temp;
}
//增加通知公告
function addInfo(){
window.location.href="<%=basePath%>regulatory/toAdd.do"
}
//删除
function delInfo(){
rootPath = getRootPath();
var id = getSelectionsStr();
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(obj) {
debugger;
if(obj.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
$.commonReq({
url : rootPath + "/annual/deleteRegs.do",
async : true,
data : {"ids":id},
success : function(parame) {
bootbox.alert( "删除成功!");
window.location.href=rootPath+'/annual/toAnnualReviewPage.do';
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
});
}
//修改通知公告
function updateInfo(){
debugger;
$('#saveUpdateBtn').show();
$('#urlDiv2').hide();
$('#urlDiv').show();
var rootPath = getRootPath();
var localhostPath = getRootPath1();
var id = getSelectionsStr()+"";
if(id==''){
bootbox.alert('请选择要编辑的行!');
return;
}else{
if(id.indexOf(',')!=-1){
bootbox.alert('只能选中一行');
return;
}
}
$("#formUpdatesInfo").find("input").val("");
$('#myModalAdd').modal('hide');
$("#uploadFileName").html('');
$("#uploadFileId").val('');
$("#uploadFileId").val(id);
$.commonReq({
url : rootPath + "/annual/selectById.do",
async : true,
data : {"id":id},
success : function(parame) {
if(parame.data.status==1){
bootbox.alert('该文件已提交审核,无法对其操作!');
return;
}
debugger;
var obj = parame.data;
$("#updateId").val(obj.id);
$("#updateFileName").val(obj.fileName);
$("#updateYearId").val(obj.year);
$("#updateResume").val(obj.resume);
$("#updateRemarks").val(obj.remarks);
if(obj.fileUrl==''){
$("#downloadHref").hide();
}else{
$("#uploadFileInput").val(obj.fileUrl);
$("#downloadHref").attr("href", localhostPath+"/filePath/"+obj.fileUrl );
}
if(obj.fileUrl != null && obj.fileUrl.trim() !=''){
$("#uploadFileId").val(obj.fileUrl);
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim()!=''){
var idAry=nameAry[i].split('^');
var id = idAry[0];
var fileName = idAry[1];
var beforeFileName = $("#uploadFileName").html();
var afterFileName = beforeFileName+" "
+'<span id='+id+'>'+fileName+'</span>'
+'<a onclick="deleteFile('+id+');" style="color:#ff000096;"> 删除</a>';
$("#uploadFileName").html(afterFileName);
}
}
}
$('#myModalUpdate').modal('show');
},
error : function(parame) {
bootbox.alert('服务器请求失败!');
}
});
}
function saveUpdateInfo(){
debugger;
var flag = true;
//检查标题
var name=$('#updateFileName').val();
if(checkNullAndEmpty(name)){
bootbox.alert("环评文件标题不能为空!");
flag=false;
return;
}
//检查年份
var year = $("#updateYearId").val();
if(checkNullAndEmpty(year)){
bootbox.alert("年份不能为空!");
flag=false;
return;
}
var rootPath = getRootPath();
if(flag){
$.commonReq({
url : rootPath + "/annual/updateAnnualRevice.do",
async : true,
data:$("#formUpdateInfo").serialize(),
success : function(data) {
$('#myModalAdd').modal('hide');
$('#myModalUpdate').modal('hide');
$('#dt').bootstrapTable('refresh', {url:rootPath + '/annual/findAll.do'});
| //服务器响应失败时的处理函数
bootbox.alert('服务器请求失败!');
}
});
}
}
function searchRegulations(id){
debugger;
var localhostPath = getRootPath1();
var rootPath = getRootPath();
$("#formUpdatesInfo").find("input").val("");
$("#submitBtn").show();
$('#myModalUpdate').modal('hide');
$("#searchUploadFileName").html('');
$("#uploadFileId").val('');
$('#urlDiv').hide();
$.commonReq({
url : rootPath + '/annual/selectById.do',
data : {
"id" : id
},
success : function(data) {
debugger;
var obj = data.data;
$("#searchId").val(obj.id);
$("#submitBtn").hide();
$("#searchFileName").val(obj.fileName);
$("#searchYearId").val(obj.year);
$("#searchResume").val(obj.resume);
$("#searchRemarks").val(obj.remarks);
var afterName = '';
if(obj.fileUrl !='' && obj.fileUrl != null){
var nameAry = obj.fileUrl.split('|');
for(var i=0;i<nameAry.length;i++){
if(nameAry[i].trim() != ''){
var idAry=nameAry[i].split('^');
var fileName = idAry[1];
var beforeFileList = $("#searchUploadFileName").html();
| bootbox.alert("修改成功!");
},
error:function(xhr,status,e){
| conditional_block |
symm_icon.rs | 0., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h;
self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h);
self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) |
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as | {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
} | identifier_body |
symm_icon.rs | 10., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h; | self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize | self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h); | random_line_split |
symm_icon.rs | f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)} ,
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn reset(&mut self) {
self.speed = DEFAULT_SPEED;
self.apcx = self.w as f32 / 2.;
self.apcy = self.h as f32 / 2.;
self.rad = if self.apcx > self.apcy {self.apcy} else {self.apcx};
self.k = 0;
self.x = 0.01;
self.y = 0.003;
self.iter = 0;
self.icon = Array2D::filled_with(0_u32, self.w, self.h);
self.image = Array2D::filled_with(0_u32, self.w, self.h);
self.set_colors(self.color_set);
for m in 0..self.w {
for n in 0..self.h {
let color = self.get_color(self.icon[(m, n)]);
self.set_point_color(m, n, color);
}
}
}
fn set_point_color(&mut self, x : usize, y : usize, color : u32) {
self.image[(x, y)] = color;
}
fn get_color(&mut self, col : u32) -> u32 {
let col = col & 0x00ffffff;
if col * self.speed > MAX_COLORS {
while (col * self.speed > COLOR_SPEED) && (self.speed > 3) { self.speed-=1 }
self.color_list[MAX_COLORS as usize]
} else | {
self.color_list[(col * self.speed) as usize]
} | conditional_block |
|
symm_icon.rs | 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h;
self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h);
self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)} ,
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn | reset | identifier_name |
|
component.rs | Function>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn | (engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: &Function | deserialize_file | identifier_name |
component.rs | Function>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> |
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: & | {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
} | identifier_body |
component.rs | /// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module | pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec()) | random_line_split |
|
utils.ts | Id}" aria-live="assertive" aria-atomic="true" style="position: absolute; top: 10px; right: 10px;z-index:1051;opacity:1">
<div class="toast-header">
<img src="/img/logo.png" class="rounded mr-2" style="height: 16px">
<strong class="mr-auto">${title}</strong>
<button type="button" class="ml-2 mb-1 close no-outline" data-dismiss="toast" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body">
${msg}
</div>
</div></div>`)
$(`#${dialogId}`).toast('show')
},
closeDialog (dialogId) {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
delete window[dialogId + 'okCb']
},
loading (content, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
const loadingCb = function () {
$(`#${dialogId}`).modal('hide')
}
window[dialogId + 'okCb'] = loadingCb
$('body').append(`
<div class="modal no-user-select" tabindex="-1" data-backdrop="static" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-sm modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-body text-center">
<div class="text-center m-3 text-white text-muted">${content}</div>
<div class="progress">
<div class="progress-bar progress-bar-striped progress-bar-animated" role="progressbar"
aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width: 100%"></div>
</div>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
openDialog (title, content, okText, cancelText = '', okCb: any = null, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
if (!okCb) {
okCb = function () {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
}
}
window[dialogId + 'okCb'] = okCb
$('body').append(`
<div class="modal" tabindex="-1" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-header no-border">
<h5 class="modal-title ${title ? '' : 'd-none'}">${title}</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
${content}
</div>
<div class="modal-footer no-border">
<button type="button" class="btn btn-secondary ${cancelText ? '' : 'd-none'}" data-dismiss="modal">${cancelText}</button>
<button type="button" class="btn btn-primary" onclick="${dialogId}okCb('${dialogId}')">${okText}</button>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
get (url, data = {}, cb) {
$.ajax({
headers: {
token: this.getJwt()
},
url: url,
data: data,
crossDomain: true,
success: (data) => cb(data),
dataType: 'json'
})
},
post (url, data = {}, files: Record<string, any>, cb) {
const fd: FormData = new FormData()
for (const key in data) {
fd.append(key, data[key])
}
for (const file in files) {
fd.append(file, files[file])
}
$.ajax({
headers: {
token: this.getJwt()
},
method: 'post',
processData: false,
contentType: false,
url: url,
data: fd,
crossDomain: true,
success: (data) => cb(data),
error: (data) => cb(data),
dataType: 'json' | objs.forEach(obj => {
if (obj) {
Object.keys(obj).forEach(key => {
const val = obj[key]
if (this.isPlainObject(val)) {
// 递归
if (this.isPlainObject(result[key])) {
result[key] = this.deepMerge(result[key], val)
} else {
result[key] = this.deepMerge(val)
}
} else {
result[key] = val
}
})
}
})
// console.log(result)
return result
},
isPlainObject (val) {
return toString.call(val) === '[object Object]'
},
/**
* 切换显示Popper弹出菜单
*
* @param vueObject Vue对象,必须有notifyDismissAllMenu方法
* @param openWhere
* @param openForm
* @param trigger
* @param placement
* @param offset
* @param dismissAllMenu
*/
togglePopper: function (vueObject, openWhere, openForm, trigger, placement: Placement = 'bottom-end', offset = [0, 10], dismissAllMenu = true) {
const oldState = vueObject[trigger]
if (dismissAllMenu) vueObject.notifyDismissAllMenu({ trigger })
vueObject[trigger] = !oldState
if (!vueObject[trigger]) return
vueObject.$nextTick(function () {
const el = this.$refs[openForm].$el || this.$refs[openForm] // 是组件的话,需要用里面的el
const popper = createPopper(openWhere, el, {
placement,
modifiers: [
{
name: 'offset',
options: {
offset: offset
}
}
]
})
})
},
/**
* 获取当前在屏幕中显示的页面id
* @return Array<string>
*/
getPageIdInScreen: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const pageInScreen: Array<string> = []
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const pageid = $(el).attr('data-page-ref') as string
pageInScreen.push(pageid)
})
return pageInScreen
},
/**
* 判断给定的元素是否完全进入了指定的页面
* @return boolean
*/
elementIsInPage (el: HTMLElement | SVGElement, page: HTMLElement) {
if (!el || !page) return false
const elRect: DOMRect = el.getBoundingClientRect()
const pageRect: DOMRect = page.getBoundingClientRect()
if (elRect.left >= pageRect.left && elRect.right <= pageRect.right && elRect.top >= pageRect.top && elRect.bottom <= pageRect.bottom) {
return true
}
return false
},
/**
* 获取当前在屏幕中显示的,在屏幕中心的页面id, 用页面的y坐标和中心点的y坐标的距离差最小的那个,如果距离屏幕中心的页面有被选中的,则返回选中的
*/
getPageIdInScreenCenter: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const screenCenterX = clientWidth / 2
const screenCenterY = clientHeight / 2
// console.log(`screen center: ${screenCenterX}x${screenCenterY}`)
const pageInScreen = {}
let selected = ''
let selectedPageDist = 0
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const centerY = rect.height / 2 + rect.top
const centerX = rect.width / 2 + rect.left
const dist = Math.sqrt(Math.pow(centerY - screenCenterY, 2) + Math.pow(centerX - screenCenterX, 2))
const pageid = $(el).attr('data-page-ref') as string
pageInScreen[dist] = pageid
if ($(el).hasClass('selected')) {
selectedPageDist = dist
selected = pageid
}
})
// console.log(pageInScreen)
const dists = Object.keys(pageIn | })
},
deepMerge (...objs) {
const result = Object.create(null) | random_line_split |
utils.ts | torage.setItem('jwt', jwt)
},
getJwt () {
return window.sessionStorage.getItem('jwt')
},
saveDesign (api, design, cb: any = null) {
const jwt = this.getJwt()
if (!design.pages || design.pages.length === 0) return
// console.log(design)
const files = {}
let fileCount = 0
const promises: any = []
for (const pageindex in design.pages) {
const page = design.pages[pageindex]
const node = $(`#${page.id} .scaled-content`).get(0)
if (!node) continue
promises.push(domtoimage.toBlob(node))
}
new Promise((resolve) => {
if (promises.length === 0) {
resolve()
return
}
for (let pageindex = 0; pageindex < promises.length; pageindex++) {
promises[pageindex].then(blob => {
files[`preview_url[${pageindex}]`] = new File([blob], `preview-${pageindex}.png`)
fileCount++
if (fileCount >= design.pages.length) {
resolve()
}
}).catch(err => {
console.error('domtoimage oops, something went wrong!', err)
fileCount++
if (fileCount >= design.pages.length) {
resolve()
}
})
}
}).then((blobs) => {
// console.log(files)
this.post(api + 'design/save.json', { meta: JSON.stringify(design) }, files, (rst) => {
if (cb) {
cb(rst)
}
if (!rst || !rst.success) {
this.toast('保存失败', rst.msg || '自动保存失败')
}
}, 'json')
})
},
toast (title, msg) {
const dialogId = this.uuid(8, 16, 'tst')
$('body').append(`<div class=" d-flex justify-content-center align-items-center">
<div class="toast" role="alert" data-delay="3000" id="${dialogId}" aria-live="assertive" aria-atomic="true" style="position: absolute; top: 10px; right: 10px;z-index:1051;opacity:1">
<div class="toast-header">
<img src="/img/logo.png" class="rounded mr-2" style="height: 16px">
<strong class="mr-auto">${title}</strong>
<button type="button" class="ml-2 mb-1 close no-outline" data-dismiss="toast" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body">
${msg}
</div>
</div></div>`)
$(`#${dialogId}`).toast('show')
},
closeDialog (dialogId) {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
delete window[dialogId + 'okCb']
},
loading (content, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
const loadingCb = function () {
$(`#${dialogId}`).modal('hide')
}
window[dialogId + 'okCb'] = loadingCb
$('body').append(`
<div class="modal no-user-select" tabindex="-1" data-backdrop="static" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-sm modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-body text-center">
<div class="text-center m-3 text-white text-muted">${content}</div>
<div class="progress">
<div class="progress-bar progress-bar-striped progress-bar-animated" role="progressbar"
aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width: 100%"></div>
</div>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
openDialog (title, content, okText, cancelText = '', okCb: any = null, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
if (!okCb) {
okCb = function () {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
}
}
window[dialogId + 'okCb'] = okCb
$('body').append(`
<div class="modal" tabindex="-1" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-header no-border">
<h5 class="modal-title ${title ? '' : 'd-none'}">${title}</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
${content}
</div>
<div class="modal-footer no-border">
<button type="button" class="btn btn-secondary ${cancelText ? '' : 'd-none'}" data-dismiss="modal">${cancelText}</button>
<button type="button" class="btn btn-primary" onclick="${dialogId}okCb('${dialogId}')">${okText}</button>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
get (url, data = {}, cb) {
$.ajax({
headers: {
token: this.getJwt()
},
url: url,
data: data,
crossDomain: true,
success: (data) => cb(data),
dataType: 'json'
})
},
post (url, data = {}, files: Record<string, any>, cb) {
const fd: FormData = new FormData()
for (const key in data) {
fd.append(key, data[key])
}
for (const file in files) {
fd.append(file, files[file])
}
$.ajax({
headers: {
token: this.getJwt()
},
method: 'post',
processData: false,
contentType: false,
url: url,
data: fd,
crossDomain: true,
success: (data) => cb(data),
error: (data) => cb(data),
dataType: 'json'
})
},
deepMerge (...objs) {
const result = Object.create(null)
objs.forEach(obj => {
if (obj) {
Object.keys(obj).forEach(key => {
const val = obj[key]
if (this.isPlainObject(val)) {
// 递归
if (this.isPlainObject(result[key])) {
result[key] = this.deepMerge(result[key], val)
} else {
result[key] = this.deepMerge(val)
}
} else {
result[key] = val
}
})
}
})
// console.log(result)
return result
},
isPlainObject (val) {
return toString.call(val) === '[object Object]'
},
/**
* 切换显示Popper弹出菜单
*
* @param vueObject Vue对象,必须有notifyDismissAllMenu方法
* @param openWhere
* @param openForm
* @param trigger
* @param placement
* @param offset
* @param dismissAllMenu
*/
togglePopper: function (vueObject, openWhere, openForm, trigger, placement: Placement = 'bottom-end', offset = [0, 10], dismissAllMenu = true) {
const oldState = vueObject[trigger]
if (dismissAllMenu) vueObject.notifyDismissAllMenu({ trigger })
vueObject[trigger] = !oldState
if (!vueObject[trigger]) return
vueObject.$nextTick(function () {
const el = this.$refs[openForm].$el || this.$refs[openForm] // 是组件的话,需要用里面的el
const popper = createPopper(openWhere, el, {
placement,
modifiers: [
{
name: 'offset',
options: {
offset: offset
}
}
]
})
})
},
/**
* 获取当前在屏幕中显示的页面id
* @return Array<string>
*/
getPageIdInScreen: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const pageInScreen: Array<string> = []
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const pageid = $(el).attr('data-page-ref') as string
pageInScreen.push(pageid)
})
return pageInScreen
},
/**
* � | essionS | identifier_name |
|
utils.ts | }" aria-live="assertive" aria-atomic="true" style="position: absolute; top: 10px; right: 10px;z-index:1051;opacity:1">
<div class="toast-header">
<img src="/img/logo.png" class="rounded mr-2" style="height: 16px">
<strong class="mr-auto">${title}</strong>
<button type="button" class="ml-2 mb-1 close no-outline" data-dismiss="toast" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body">
${msg}
</div>
</div></div>`)
$(`#${dialogId}`).toast('show')
},
closeDialog (dialogId) {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
delete window[dialogId + 'okCb']
},
loading (content, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
const loadingCb = function () {
$(`#${dialogId}`).modal('hide')
}
window[dialogId + 'okCb'] = loadingCb
$('body').append(`
<div class="modal no-user-select" tabindex="-1" data-backdrop="static" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-sm modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-body text-center">
<div class="text-center m-3 text-white text-muted">${content}</div>
<div class="progress">
<div class="progress-bar progress-bar-striped progress-bar-animated" role="progressbar"
aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width: 100%"></div>
</div>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
openDialog (title, content, okText, cancelText = '', okCb: any = null, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
if (!okCb) {
okCb = function () {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
}
}
window[dialogId + 'okCb'] = okCb
$('body').append(`
<div class="modal" tabindex="-1" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-header no-border">
<h5 class="modal-title ${title ? '' : 'd-none'}">${title}</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
${content}
</div>
<div class="modal-footer no-border">
<button type="button" class="btn btn-secondary ${cancelText ? '' : 'd-none'}" data-dismiss="modal">${cancelText}</button>
<button type="button" class="btn btn-primary" onclick="${dialogId}okCb('${dialogId}')">${okText}</button>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
get (url, data = {}, cb) {
$.ajax({
headers: {
token: this.getJwt()
},
url: url,
data: data,
crossDomain: true,
success: (data) => cb(data),
dataType: 'json'
})
},
post (url, data = {}, files: Record<string, any>, cb) {
const fd: FormData = new FormData()
fo | },
deepMerge (...objs) {
const result = Obj
ect.create(null)
objs.forEach(obj => {
if (obj) {
Object.keys(obj).forEach(key => {
const val = obj[key]
if (this.isPlainObject(val)) {
// 递归
if (this.isPlainObject(result[key])) {
result[key] = this.deepMerge(result[key], val)
} else {
result[key] = this.deepMerge(val)
}
} else {
result[key] = val
}
})
}
})
// console.log(result)
return result
},
isPlainObject (val) {
return toString.call(val) === '[object Object]'
},
/**
* 切换显示Popper弹出菜单
*
* @param vueObject Vue对象,必须有notifyDismissAllMenu方法
* @param openWhere
* @param openForm
* @param trigger
* @param placement
* @param offset
* @param dismissAllMenu
*/
togglePopper: function (vueObject, openWhere, openForm, trigger, placement: Placement = 'bottom-end', offset = [0, 10], dismissAllMenu = true) {
const oldState = vueObject[trigger]
if (dismissAllMenu) vueObject.notifyDismissAllMenu({ trigger })
vueObject[trigger] = !oldState
if (!vueObject[trigger]) return
vueObject.$nextTick(function () {
const el = this.$refs[openForm].$el || this.$refs[openForm] // 是组件的话,需要用里面的el
const popper = createPopper(openWhere, el, {
placement,
modifiers: [
{
name: 'offset',
options: {
offset: offset
}
}
]
})
})
},
/**
* 获取当前在屏幕中显示的页面id
* @return Array<string>
*/
getPageIdInScreen: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const pageInScreen: Array<string> = []
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const pageid = $(el).attr('data-page-ref') as string
pageInScreen.push(pageid)
})
return pageInScreen
},
/**
* 判断给定的元素是否完全进入了指定的页面
* @return boolean
*/
elementIsInPage (el: HTMLElement | SVGElement, page: HTMLElement) {
if (!el || !page) return false
const elRect: DOMRect = el.getBoundingClientRect()
const pageRect: DOMRect = page.getBoundingClientRect()
if (elRect.left >= pageRect.left && elRect.right <= pageRect.right && elRect.top >= pageRect.top && elRect.bottom <= pageRect.bottom) {
return true
}
return false
},
/**
* 获取当前在屏幕中显示的,在屏幕中心的页面id, 用页面的y坐标和中心点的y坐标的距离差最小的那个,如果距离屏幕中心的页面有被选中的,则返回选中的
*/
getPageIdInScreenCenter: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const screenCenterX = clientWidth / 2
const screenCenterY = clientHeight / 2
// console.log(`screen center: ${screenCenterX}x${screenCenterY}`)
const pageInScreen = {}
let selected = ''
let selectedPageDist = 0
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const centerY = rect.height / 2 + rect.top
const centerX = rect.width / 2 + rect.left
const dist = Math.sqrt(Math.pow(centerY - screenCenterY, 2) + Math.pow(centerX - screenCenterX, 2))
const pageid = $(el).attr('data-page-ref') as string
pageInScreen[dist] = pageid
if ($(el).hasClass('selected')) {
selectedPageDist = dist
selected = pageid
}
})
// console.log(pageInScreen)
const dists = Object.keys | r (const key in data) {
fd.append(key, data[key])
}
for (const file in files) {
fd.append(file, files[file])
}
$.ajax({
headers: {
token: this.getJwt()
},
method: 'post',
processData: false,
contentType: false,
url: url,
data: fd,
crossDomain: true,
success: (data) => cb(data),
error: (data) => cb(data),
dataType: 'json'
}) | identifier_body |
utils.ts | }" aria-live="assertive" aria-atomic="true" style="position: absolute; top: 10px; right: 10px;z-index:1051;opacity:1">
<div class="toast-header">
<img src="/img/logo.png" class="rounded mr-2" style="height: 16px">
<strong class="mr-auto">${title}</strong>
<button type="button" class="ml-2 mb-1 close no-outline" data-dismiss="toast" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body">
${msg}
</div>
</div></div>`)
$(`#${dialogId}`).toast('show')
},
closeDialog (dialogId) {
$(`#${dialogId}`).modal('hide')
$(`#${dialogId}`).remove()
delete window[dialogId + 'okCb']
},
loading (content, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
const loadingCb = function () {
$(`#${dialogId}`).modal('hide')
}
window[dialogId + 'okCb'] = loadingCb
$('body').append(`
<div class="modal no-user-select" tabindex="-1" data-backdrop="static" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-sm modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-body text-center">
<div class="text-center m-3 text-white text-muted">${content}</div>
<div class="progress">
<div class="progress-bar progress-bar-striped progress-bar-animated" role="progressbar"
aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width: 100%"></div>
</div>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
openDialog (title, content, okText, cancelText = '', okCb: any = null, dialogId = '') {
if (!dialogId) dialogId = this.uuid(8, 16, 'dlg')
if (!okCb) {
okCb = function () {
$(`#${dialo | dy').append(`
<div class="modal" tabindex="-1" role="dialog" id="${dialogId}">
<div class="modal-dialog modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-header no-border">
<h5 class="modal-title ${title ? '' : 'd-none'}">${title}</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
${content}
</div>
<div class="modal-footer no-border">
<button type="button" class="btn btn-secondary ${cancelText ? '' : 'd-none'}" data-dismiss="modal">${cancelText}</button>
<button type="button" class="btn btn-primary" onclick="${dialogId}okCb('${dialogId}')">${okText}</button>
</div>
</div>
</div>
</div>`)
$(`#${dialogId}`).modal('show')
return dialogId
},
get (url, data = {}, cb) {
$.ajax({
headers: {
token: this.getJwt()
},
url: url,
data: data,
crossDomain: true,
success: (data) => cb(data),
dataType: 'json'
})
},
post (url, data = {}, files: Record<string, any>, cb) {
const fd: FormData = new FormData()
for (const key in data) {
fd.append(key, data[key])
}
for (const file in files) {
fd.append(file, files[file])
}
$.ajax({
headers: {
token: this.getJwt()
},
method: 'post',
processData: false,
contentType: false,
url: url,
data: fd,
crossDomain: true,
success: (data) => cb(data),
error: (data) => cb(data),
dataType: 'json'
})
},
deepMerge (...objs) {
const result = Object.create(null)
objs.forEach(obj => {
if (obj) {
Object.keys(obj).forEach(key => {
const val = obj[key]
if (this.isPlainObject(val)) {
// 递归
if (this.isPlainObject(result[key])) {
result[key] = this.deepMerge(result[key], val)
} else {
result[key] = this.deepMerge(val)
}
} else {
result[key] = val
}
})
}
})
// console.log(result)
return result
},
isPlainObject (val) {
return toString.call(val) === '[object Object]'
},
/**
* 切换显示Popper弹出菜单
*
* @param vueObject Vue对象,必须有notifyDismissAllMenu方法
* @param openWhere
* @param openForm
* @param trigger
* @param placement
* @param offset
* @param dismissAllMenu
*/
togglePopper: function (vueObject, openWhere, openForm, trigger, placement: Placement = 'bottom-end', offset = [0, 10], dismissAllMenu = true) {
const oldState = vueObject[trigger]
if (dismissAllMenu) vueObject.notifyDismissAllMenu({ trigger })
vueObject[trigger] = !oldState
if (!vueObject[trigger]) return
vueObject.$nextTick(function () {
const el = this.$refs[openForm].$el || this.$refs[openForm] // 是组件的话,需要用里面的el
const popper = createPopper(openWhere, el, {
placement,
modifiers: [
{
name: 'offset',
options: {
offset: offset
}
}
]
})
})
},
/**
* 获取当前在屏幕中显示的页面id
* @return Array<string>
*/
getPageIdInScreen: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const pageInScreen: Array<string> = []
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const pageid = $(el).attr('data-page-ref') as string
pageInScreen.push(pageid)
})
return pageInScreen
},
/**
* 判断给定的元素是否完全进入了指定的页面
* @return boolean
*/
elementIsInPage (el: HTMLElement | SVGElement, page: HTMLElement) {
if (!el || !page) return false
const elRect: DOMRect = el.getBoundingClientRect()
const pageRect: DOMRect = page.getBoundingClientRect()
if (elRect.left >= pageRect.left && elRect.right <= pageRect.right && elRect.top >= pageRect.top && elRect.bottom <= pageRect.bottom) {
return true
}
return false
},
/**
* 获取当前在屏幕中显示的,在屏幕中心的页面id, 用页面的y坐标和中心点的y坐标的距离差最小的那个,如果距离屏幕中心的页面有被选中的,则返回选中的
*/
getPageIdInScreenCenter: function () {
const clientWidth = document.body.clientWidth
const clientHeight = document.body.clientHeight
const screenCenterX = clientWidth / 2
const screenCenterY = clientHeight / 2
// console.log(`screen center: ${screenCenterX}x${screenCenterY}`)
const pageInScreen = {}
let selected = ''
let selectedPageDist = 0
$('.editor').each(function (idx, el) {
const rect = el.getBoundingClientRect()
if (rect.bottom <= 83 /* 滚出了workspace区域 */ || rect.right <= 0 || rect.left >= clientWidth || rect.top >= clientHeight) {
return
}
const centerY = rect.height / 2 + rect.top
const centerX = rect.width / 2 + rect.left
const dist = Math.sqrt(Math.pow(centerY - screenCenterY, 2) + Math.pow(centerX - screenCenterX, 2))
const pageid = $(el).attr('data-page-ref') as string
pageInScreen[dist] = pageid
if ($(el).hasClass('selected')) {
selectedPageDist = dist
selected = pageid
}
})
// console.log(pageInScreen)
const dists = Object | gId}`).modal('hide')
$(`#${dialogId}`).remove()
}
}
window[dialogId + 'okCb'] = okCb
$('bo | conditional_block |
kerdenSOM.py | stackdata['path']['path']
uppath = os.path.abspath(os.path.join(path, ".."))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#======================
def insertKerDenSOM(self, binned=None):
### Preliminary data
projectid = apProject.getProjectIdFromAlignStackId(self.params['alignstackid'])
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
numclass = self.params['xdim']*self.params['ydim']
pathdata = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### KerDen SOM Params object
kerdenq = appiondata.ApKerDenSOMParamsData()
kerdenq['mask_diam'] = 2.0*self.params['maskrad']
kerdenq['x_dimension'] = self.params['xdim']
kerdenq['y_dimension'] = self.params['ydim']
kerdenq['convergence'] = self.params['converge']
kerdenq['run_seconds'] = time.time()-self.t0
### Align Analysis Run object
analysisq = appiondata.ApAlignAnalysisRunData()
analysisq['runname'] = self.params['runname']
analysisq['path'] = pathdata
analysisq['description'] = self.params['description']
analysisq['alignstack'] = alignstackdata
analysisq['hidden'] = False
### linked through cluster not analysis
#analysisq['kerdenparams'] = kerdenq
### Clustering Run object
clusterrunq = appiondata.ApClusteringRunData()
clusterrunq['runname'] = self.params['runname']
clusterrunq['description'] = self.params['description']
# what if we binned the aligned stack to get the new one
if binned is None:
boxsize = alignstackdata['boxsize']
pixelsize = alignstackdata['pixelsize']
else:
boxsize = alignstackdata['boxsize'] / binned
pixelsize = alignstackdata['pixelsize'] * binned
clusterrunq['boxsize'] = boxsize
clusterrunq['pixelsize'] = pixelsize
clusterrunq['num_particles'] = self.params['numpart']
clusterrunq['alignstack'] = alignstackdata
clusterrunq['analysisrun'] = analysisq
clusterrunq['kerdenparams'] = kerdenq
### Clustering Stack object
clusterstackq = appiondata.ApClusteringStackData()
clusterstackq['avg_imagicfile'] = "kerdenstack"+self.timestamp+".hed"
clusterstackq['num_classes'] = numclass
clusterstackq['clusterrun'] = clusterrunq
clusterstackq['path'] = pathdata
clusterstackq['hidden'] = False
imagicfile = os.path.join(self.params['rundir'], clusterstackq['avg_imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find average stack file: "+imagicfile)
### looping over clusters
apDisplay.printColor("Inserting particle classification data, please wait", "cyan")
for i in range(numclass):
classnum = i+1
classroot = "%s.%d"% (self.timestamp, classnum-1)
classdocfile = os.path.join(self.params['rundir'], classroot)
partlist = self.readClassDocFile(classdocfile)
### Clustering Particle object
clusterrefq = appiondata.ApClusteringReferenceData()
clusterrefq['refnum'] = classnum
clusterrefq['avg_mrcfile'] = classroot+".mrc"
clusterrefq['clusterrun'] = clusterrunq
clusterrefq['path'] = pathdata
clusterrefq['num_particles'] = len(partlist)
clusterrefq['ssnr_resolution'] = self.cluster_resolution[i]
### looping over particles
sys.stderr.write(".")
for partnum in partlist:
alignpartdata = self.getAlignParticleData(partnum, alignstackdata)
### Clustering Particle objects
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = clusterstackq
clusterpartq['alignparticle'] = alignpartdata
clusterpartq['partnum'] = partnum
clusterpartq['refnum'] = classnum
clusterpartq['clusterreference'] = clusterrefq
### finally we can insert parameters
if self.params['commit'] is True:
clusterpartq.insert()
#=====================
def getAlignParticleData(self, partnum, alignstackdata):
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = alignstackdata
alignpartq['partnum'] = partnum
alignparts = alignpartq.query(results=1)
return alignparts[0]
#=====================
def readClassDocFile(self, docfile):
if not os.path.isfile(docfile):
return []
partlist = []
f = open(docfile, 'r')
for line in f:
sline = line.strip()
if re.match("[0-9]+", sline):
# numbers start at zero
partnum = int(sline)+1
partlist.append(partnum)
f.close()
if not partlist:
return []
partlist.sort()
return partlist
#======================
def runKerdenSOM(self, indata):
"""
From http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/KerDenSOM
KerDenSOM stands for "Kernel Probability Density Estimator Self-Organizing Map".
It maps a set of high dimensional input vectors into a two-dimensional grid.
"""
apDisplay.printMsg("Running KerDen SOM")
outstamp = os.path.join(self.params['rundir'], self.timestamp)
kerdencmd = ( "xmipp_classify_kerdensom -verb 1 -i %s -o %s -xdim %d -ydim %d -saveclusters "%
(indata, outstamp, self.params['xdim'], self.params['ydim'])
)
### convergence criteria
if self.params['converge'] == "fast":
kerdencmd += " -eps 1e-5 "
elif self.params['converge'] == "slow":
kerdencmd += " -eps 1e-9 "
else:
kerdencmd += " -eps 1e-7 "
apDisplay.printColor(kerdencmd, "cyan")
proc = subprocess.Popen(kerdencmd, shell=True)
proc.wait()
time.sleep(1)
return
#======================
def fileId(self, fname):
ext = os.path.splitext(fname)[1]
num = int(ext[1:])
return num
#======================
def sortFile(self, a, b):
if self.fileId(a) > self.fileId(b):
return 1
return -1
#======================
def createMontageByEMAN(self):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### create crappy files
emancmd = ( "proc2d "+self.instack+" crap.mrc first=0 last=0 mask=1" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
emancmd = ( "proc2d crap.mrc crap.png" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
stackname = "kerdenstack"+self.timestamp+".hed"
count = 0
numclass = self.params['xdim']*self.params['ydim']
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
#listname = self.timestamp+str(i)
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
emancmd = ( "proc2d crap.mrc "+stackname+" " )
sys.stderr.write("skipping "+listname+"\n")
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False) | emancmd = ("proc2d %s %s list=%s average"%
| ### create png
shutil.copy("crap.png", listname+".png")
else:
### average particles | random_line_split |
kerdenSOM.py | data['path']['path']
uppath = os.path.abspath(os.path.join(path, ".."))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#======================
def insertKerDenSOM(self, binned=None):
### Preliminary data
projectid = apProject.getProjectIdFromAlignStackId(self.params['alignstackid'])
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
numclass = self.params['xdim']*self.params['ydim']
pathdata = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### KerDen SOM Params object
kerdenq = appiondata.ApKerDenSOMParamsData()
kerdenq['mask_diam'] = 2.0*self.params['maskrad']
kerdenq['x_dimension'] = self.params['xdim']
kerdenq['y_dimension'] = self.params['ydim']
kerdenq['convergence'] = self.params['converge']
kerdenq['run_seconds'] = time.time()-self.t0
### Align Analysis Run object
analysisq = appiondata.ApAlignAnalysisRunData()
analysisq['runname'] = self.params['runname']
analysisq['path'] = pathdata
analysisq['description'] = self.params['description']
analysisq['alignstack'] = alignstackdata
analysisq['hidden'] = False
### linked through cluster not analysis
#analysisq['kerdenparams'] = kerdenq
### Clustering Run object
clusterrunq = appiondata.ApClusteringRunData()
clusterrunq['runname'] = self.params['runname']
clusterrunq['description'] = self.params['description']
# what if we binned the aligned stack to get the new one
if binned is None:
boxsize = alignstackdata['boxsize']
pixelsize = alignstackdata['pixelsize']
else:
boxsize = alignstackdata['boxsize'] / binned
pixelsize = alignstackdata['pixelsize'] * binned
clusterrunq['boxsize'] = boxsize
clusterrunq['pixelsize'] = pixelsize
clusterrunq['num_particles'] = self.params['numpart']
clusterrunq['alignstack'] = alignstackdata
clusterrunq['analysisrun'] = analysisq
clusterrunq['kerdenparams'] = kerdenq
### Clustering Stack object
clusterstackq = appiondata.ApClusteringStackData()
clusterstackq['avg_imagicfile'] = "kerdenstack"+self.timestamp+".hed"
clusterstackq['num_classes'] = numclass
clusterstackq['clusterrun'] = clusterrunq
clusterstackq['path'] = pathdata
clusterstackq['hidden'] = False
imagicfile = os.path.join(self.params['rundir'], clusterstackq['avg_imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find average stack file: "+imagicfile)
### looping over clusters
apDisplay.printColor("Inserting particle classification data, please wait", "cyan")
for i in range(numclass):
classnum = i+1
classroot = "%s.%d"% (self.timestamp, classnum-1)
classdocfile = os.path.join(self.params['rundir'], classroot)
partlist = self.readClassDocFile(classdocfile)
### Clustering Particle object
clusterrefq = appiondata.ApClusteringReferenceData()
clusterrefq['refnum'] = classnum
clusterrefq['avg_mrcfile'] = classroot+".mrc"
clusterrefq['clusterrun'] = clusterrunq
clusterrefq['path'] = pathdata
clusterrefq['num_particles'] = len(partlist)
clusterrefq['ssnr_resolution'] = self.cluster_resolution[i]
### looping over particles
sys.stderr.write(".")
for partnum in partlist:
alignpartdata = self.getAlignParticleData(partnum, alignstackdata)
### Clustering Particle objects
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = clusterstackq
clusterpartq['alignparticle'] = alignpartdata
clusterpartq['partnum'] = partnum
clusterpartq['refnum'] = classnum
clusterpartq['clusterreference'] = clusterrefq
### finally we can insert parameters
if self.params['commit'] is True:
clusterpartq.insert()
#=====================
def getAlignParticleData(self, partnum, alignstackdata):
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = alignstackdata
alignpartq['partnum'] = partnum
alignparts = alignpartq.query(results=1)
return alignparts[0]
#=====================
def readClassDocFile(self, docfile):
if not os.path.isfile(docfile):
return []
partlist = []
f = open(docfile, 'r')
for line in f:
sline = line.strip()
if re.match("[0-9]+", sline):
# numbers start at zero
|
f.close()
if not partlist:
return []
partlist.sort()
return partlist
#======================
def runKerdenSOM(self, indata):
"""
From http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/KerDenSOM
KerDenSOM stands for "Kernel Probability Density Estimator Self-Organizing Map".
It maps a set of high dimensional input vectors into a two-dimensional grid.
"""
apDisplay.printMsg("Running KerDen SOM")
outstamp = os.path.join(self.params['rundir'], self.timestamp)
kerdencmd = ( "xmipp_classify_kerdensom -verb 1 -i %s -o %s -xdim %d -ydim %d -saveclusters "%
(indata, outstamp, self.params['xdim'], self.params['ydim'])
)
### convergence criteria
if self.params['converge'] == "fast":
kerdencmd += " -eps 1e-5 "
elif self.params['converge'] == "slow":
kerdencmd += " -eps 1e-9 "
else:
kerdencmd += " -eps 1e-7 "
apDisplay.printColor(kerdencmd, "cyan")
proc = subprocess.Popen(kerdencmd, shell=True)
proc.wait()
time.sleep(1)
return
#======================
def fileId(self, fname):
ext = os.path.splitext(fname)[1]
num = int(ext[1:])
return num
#======================
def sortFile(self, a, b):
if self.fileId(a) > self.fileId(b):
return 1
return -1
#======================
def createMontageByEMAN(self):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### create crappy files
emancmd = ( "proc2d "+self.instack+" crap.mrc first=0 last=0 mask=1" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
emancmd = ( "proc2d crap.mrc crap.png" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
stackname = "kerdenstack"+self.timestamp+".hed"
count = 0
numclass = self.params['xdim']*self.params['ydim']
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
#listname = self.timestamp+str(i)
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
emancmd = ( "proc2d crap.mrc "+stackname+" " )
sys.stderr.write("skipping "+listname+"\n")
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### create png
shutil.copy("crap.png", listname+".png")
else:
### average particles
emancmd = ("proc2d %s %s list=%s average"%
| partnum = int(sline)+1
partlist.append(partnum) | conditional_block |
kerdenSOM.py |
#======================
def insertKerDenSOM(self, binned=None):
### Preliminary data
projectid = apProject.getProjectIdFromAlignStackId(self.params['alignstackid'])
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
numclass = self.params['xdim']*self.params['ydim']
pathdata = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### KerDen SOM Params object
kerdenq = appiondata.ApKerDenSOMParamsData()
kerdenq['mask_diam'] = 2.0*self.params['maskrad']
kerdenq['x_dimension'] = self.params['xdim']
kerdenq['y_dimension'] = self.params['ydim']
kerdenq['convergence'] = self.params['converge']
kerdenq['run_seconds'] = time.time()-self.t0
### Align Analysis Run object
analysisq = appiondata.ApAlignAnalysisRunData()
analysisq['runname'] = self.params['runname']
analysisq['path'] = pathdata
analysisq['description'] = self.params['description']
analysisq['alignstack'] = alignstackdata
analysisq['hidden'] = False
### linked through cluster not analysis
#analysisq['kerdenparams'] = kerdenq
### Clustering Run object
clusterrunq = appiondata.ApClusteringRunData()
clusterrunq['runname'] = self.params['runname']
clusterrunq['description'] = self.params['description']
# what if we binned the aligned stack to get the new one
if binned is None:
boxsize = alignstackdata['boxsize']
pixelsize = alignstackdata['pixelsize']
else:
boxsize = alignstackdata['boxsize'] / binned
pixelsize = alignstackdata['pixelsize'] * binned
clusterrunq['boxsize'] = boxsize
clusterrunq['pixelsize'] = pixelsize
clusterrunq['num_particles'] = self.params['numpart']
clusterrunq['alignstack'] = alignstackdata
clusterrunq['analysisrun'] = analysisq
clusterrunq['kerdenparams'] = kerdenq
### Clustering Stack object
clusterstackq = appiondata.ApClusteringStackData()
clusterstackq['avg_imagicfile'] = "kerdenstack"+self.timestamp+".hed"
clusterstackq['num_classes'] = numclass
clusterstackq['clusterrun'] = clusterrunq
clusterstackq['path'] = pathdata
clusterstackq['hidden'] = False
imagicfile = os.path.join(self.params['rundir'], clusterstackq['avg_imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find average stack file: "+imagicfile)
### looping over clusters
apDisplay.printColor("Inserting particle classification data, please wait", "cyan")
for i in range(numclass):
classnum = i+1
classroot = "%s.%d"% (self.timestamp, classnum-1)
classdocfile = os.path.join(self.params['rundir'], classroot)
partlist = self.readClassDocFile(classdocfile)
### Clustering Particle object
clusterrefq = appiondata.ApClusteringReferenceData()
clusterrefq['refnum'] = classnum
clusterrefq['avg_mrcfile'] = classroot+".mrc"
clusterrefq['clusterrun'] = clusterrunq
clusterrefq['path'] = pathdata
clusterrefq['num_particles'] = len(partlist)
clusterrefq['ssnr_resolution'] = self.cluster_resolution[i]
### looping over particles
sys.stderr.write(".")
for partnum in partlist:
alignpartdata = self.getAlignParticleData(partnum, alignstackdata)
### Clustering Particle objects
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = clusterstackq
clusterpartq['alignparticle'] = alignpartdata
clusterpartq['partnum'] = partnum
clusterpartq['refnum'] = classnum
clusterpartq['clusterreference'] = clusterrefq
### finally we can insert parameters
if self.params['commit'] is True:
clusterpartq.insert()
#=====================
def getAlignParticleData(self, partnum, alignstackdata):
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = alignstackdata
alignpartq['partnum'] = partnum
alignparts = alignpartq.query(results=1)
return alignparts[0]
#=====================
def readClassDocFile(self, docfile):
if not os.path.isfile(docfile):
return []
partlist = []
f = open(docfile, 'r')
for line in f:
sline = line.strip()
if re.match("[0-9]+", sline):
# numbers start at zero
partnum = int(sline)+1
partlist.append(partnum)
f.close()
if not partlist:
return []
partlist.sort()
return partlist
#======================
def runKerdenSOM(self, indata):
"""
From http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/KerDenSOM
KerDenSOM stands for "Kernel Probability Density Estimator Self-Organizing Map".
It maps a set of high dimensional input vectors into a two-dimensional grid.
"""
apDisplay.printMsg("Running KerDen SOM")
outstamp = os.path.join(self.params['rundir'], self.timestamp)
kerdencmd = ( "xmipp_classify_kerdensom -verb 1 -i %s -o %s -xdim %d -ydim %d -saveclusters "%
(indata, outstamp, self.params['xdim'], self.params['ydim'])
)
### convergence criteria
if self.params['converge'] == "fast":
kerdencmd += " -eps 1e-5 "
elif self.params['converge'] == "slow":
kerdencmd += " -eps 1e-9 "
else:
kerdencmd += " -eps 1e-7 "
apDisplay.printColor(kerdencmd, "cyan")
proc = subprocess.Popen(kerdencmd, shell=True)
proc.wait()
time.sleep(1)
return
#======================
def fileId(self, fname):
ext = os.path.splitext(fname)[1]
num = int(ext[1:])
return num
#======================
def sortFile(self, a, b):
if self.fileId(a) > self.fileId(b):
return 1
return -1
#======================
def createMontageByEMAN(self):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### create crappy files
emancmd = ( "proc2d "+self.instack+" crap.mrc first=0 last=0 mask=1" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
emancmd = ( "proc2d crap.mrc crap.png" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
stackname = "kerdenstack"+self.timestamp+".hed"
count = 0
numclass = self.params['xdim']*self.params['ydim']
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
#listname = self.timestamp+str(i)
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
emancmd = ( "proc2d crap.mrc "+stackname+" " )
sys.stderr.write("skipping "+listname+"\n")
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### create png
shutil.copy("crap.png", listname+".png")
| self.alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
path = self.alignstackdata['path']['path']
uppath = os.path.abspath(os.path.join(path, ".."))
self.params['rundir'] = os.path.join(uppath, self.params['runname']) | identifier_body |
|
kerdenSOM.py | data['path']['path']
uppath = os.path.abspath(os.path.join(path, ".."))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#======================
def insertKerDenSOM(self, binned=None):
### Preliminary data
projectid = apProject.getProjectIdFromAlignStackId(self.params['alignstackid'])
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
numclass = self.params['xdim']*self.params['ydim']
pathdata = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### KerDen SOM Params object
kerdenq = appiondata.ApKerDenSOMParamsData()
kerdenq['mask_diam'] = 2.0*self.params['maskrad']
kerdenq['x_dimension'] = self.params['xdim']
kerdenq['y_dimension'] = self.params['ydim']
kerdenq['convergence'] = self.params['converge']
kerdenq['run_seconds'] = time.time()-self.t0
### Align Analysis Run object
analysisq = appiondata.ApAlignAnalysisRunData()
analysisq['runname'] = self.params['runname']
analysisq['path'] = pathdata
analysisq['description'] = self.params['description']
analysisq['alignstack'] = alignstackdata
analysisq['hidden'] = False
### linked through cluster not analysis
#analysisq['kerdenparams'] = kerdenq
### Clustering Run object
clusterrunq = appiondata.ApClusteringRunData()
clusterrunq['runname'] = self.params['runname']
clusterrunq['description'] = self.params['description']
# what if we binned the aligned stack to get the new one
if binned is None:
boxsize = alignstackdata['boxsize']
pixelsize = alignstackdata['pixelsize']
else:
boxsize = alignstackdata['boxsize'] / binned
pixelsize = alignstackdata['pixelsize'] * binned
clusterrunq['boxsize'] = boxsize
clusterrunq['pixelsize'] = pixelsize
clusterrunq['num_particles'] = self.params['numpart']
clusterrunq['alignstack'] = alignstackdata
clusterrunq['analysisrun'] = analysisq
clusterrunq['kerdenparams'] = kerdenq
### Clustering Stack object
clusterstackq = appiondata.ApClusteringStackData()
clusterstackq['avg_imagicfile'] = "kerdenstack"+self.timestamp+".hed"
clusterstackq['num_classes'] = numclass
clusterstackq['clusterrun'] = clusterrunq
clusterstackq['path'] = pathdata
clusterstackq['hidden'] = False
imagicfile = os.path.join(self.params['rundir'], clusterstackq['avg_imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find average stack file: "+imagicfile)
### looping over clusters
apDisplay.printColor("Inserting particle classification data, please wait", "cyan")
for i in range(numclass):
classnum = i+1
classroot = "%s.%d"% (self.timestamp, classnum-1)
classdocfile = os.path.join(self.params['rundir'], classroot)
partlist = self.readClassDocFile(classdocfile)
### Clustering Particle object
clusterrefq = appiondata.ApClusteringReferenceData()
clusterrefq['refnum'] = classnum
clusterrefq['avg_mrcfile'] = classroot+".mrc"
clusterrefq['clusterrun'] = clusterrunq
clusterrefq['path'] = pathdata
clusterrefq['num_particles'] = len(partlist)
clusterrefq['ssnr_resolution'] = self.cluster_resolution[i]
### looping over particles
sys.stderr.write(".")
for partnum in partlist:
alignpartdata = self.getAlignParticleData(partnum, alignstackdata)
### Clustering Particle objects
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = clusterstackq
clusterpartq['alignparticle'] = alignpartdata
clusterpartq['partnum'] = partnum
clusterpartq['refnum'] = classnum
clusterpartq['clusterreference'] = clusterrefq
### finally we can insert parameters
if self.params['commit'] is True:
clusterpartq.insert()
#=====================
def getAlignParticleData(self, partnum, alignstackdata):
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = alignstackdata
alignpartq['partnum'] = partnum
alignparts = alignpartq.query(results=1)
return alignparts[0]
#=====================
def readClassDocFile(self, docfile):
if not os.path.isfile(docfile):
return []
partlist = []
f = open(docfile, 'r')
for line in f:
sline = line.strip()
if re.match("[0-9]+", sline):
# numbers start at zero
partnum = int(sline)+1
partlist.append(partnum)
f.close()
if not partlist:
return []
partlist.sort()
return partlist
#======================
def runKerdenSOM(self, indata):
"""
From http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/KerDenSOM
KerDenSOM stands for "Kernel Probability Density Estimator Self-Organizing Map".
It maps a set of high dimensional input vectors into a two-dimensional grid.
"""
apDisplay.printMsg("Running KerDen SOM")
outstamp = os.path.join(self.params['rundir'], self.timestamp)
kerdencmd = ( "xmipp_classify_kerdensom -verb 1 -i %s -o %s -xdim %d -ydim %d -saveclusters "%
(indata, outstamp, self.params['xdim'], self.params['ydim'])
)
### convergence criteria
if self.params['converge'] == "fast":
kerdencmd += " -eps 1e-5 "
elif self.params['converge'] == "slow":
kerdencmd += " -eps 1e-9 "
else:
kerdencmd += " -eps 1e-7 "
apDisplay.printColor(kerdencmd, "cyan")
proc = subprocess.Popen(kerdencmd, shell=True)
proc.wait()
time.sleep(1)
return
#======================
def fileId(self, fname):
ext = os.path.splitext(fname)[1]
num = int(ext[1:])
return num
#======================
def sortFile(self, a, b):
if self.fileId(a) > self.fileId(b):
return 1
return -1
#======================
def | (self):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### create crappy files
emancmd = ( "proc2d "+self.instack+" crap.mrc first=0 last=0 mask=1" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
emancmd = ( "proc2d crap.mrc crap.png" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
stackname = "kerdenstack"+self.timestamp+".hed"
count = 0
numclass = self.params['xdim']*self.params['ydim']
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
#listname = self.timestamp+str(i)
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
emancmd = ( "proc2d crap.mrc "+stackname+" " )
sys.stderr.write("skipping "+listname+"\n")
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### create png
shutil.copy("crap.png", listname+".png")
else:
### average particles
emancmd = ("proc2d %s %s list=%s average"%
| createMontageByEMAN | identifier_name |
tarjetas.js | andeTarjeta').css("width"));
*/
}
catch (e){
console.log(e.message);
}
if (tarjetaActual.favorita == 1) {
$('#btnCambiarTarjetaFavorita').addClass("ui-btn-favorito");
/* TODO
actualizar la tarjeta en la base de datos
*/
}
else {
$('#btnCambiarTarjetaFavorita').removeClass("ui-btn-favorito");
}
// Se carga la página con la tarjeta
if (cambiarPagina){
$.mobile.changePage($('#PaginaDetalleTarjeta'));
}
PararEvento(event);
}
function ReversoTarjeta(event){
//console.log("Entra en ReversoTarjeta");
try{
//Eliminamos las clases antiguas
$('#lblTituloTarjetaReverso').removeClass();
for(i=1;i<=15;i++){
$('#PaginaReversoTarjeta').removeClass('fondo'+i);
}
//Añadimos las clases nuevas
$('#imgGrandeTarjetaReverso').attr('src', tarjetaActual.foto);
$('#lblTituloTarjetaReverso').html(tarjetaActual.titulo2).addClass('fuente-' + tarjetaActual.fuente);
$('#PaginaReversoTarjeta').addClass('fondo'+tarjetaActual.fondo);
//console.log("Clases de reverso tarjeta "+PaginaReversoTarjeta.className);
// Se cambia a la página del reverso
$.mobile.changePage($('#PaginaReversoTarjeta'), {transition: 'flip',changeHash:'false'});
ReproducirSonidoEstatico();
PararEvento(event);
}
catch (e){
console.log(e.message);
}
}
/**
* ActualizarTarjeta. Actualiza los datos de la tarjeta actual, con los datos pasados a la función
*
*@param tarjeta Datos de la tarjeta actualizada
*/
function ActualizarTarjeta(event, tarjeta){
var listaTemp = [];
var datosAntiguos;
$.each(listaTarjetas, function(i, item) {
if (tarjeta.id == item.id) {
datosAntiguos = item;
listaTemp.push(tarjeta);
}
else {
listaTemp.push(item);
}
});
listaTarjetas = listaTemp;
// Actualización en la base de datos
var sql = "UPDATE Tarjetas SET titulo1='" + $.trim(tarjeta.titulo1) +
"', titulo2='" + $.trim(tarjeta.titulo2) +
"', fondo='" + $.trim(tarjeta.fondo) +
"', foto='" + $.trim(tarjeta.foto) +
"', sonido='" + $.trim(tarjeta.sonido) +
"', favorita=" + tarjeta.favorita +
", anchoFoto=" + tarjeta.anchoFoto +
", altoFoto=" + tarjeta.altoFoto +
", fuente='" + tarjeta.fuente +
"', idiomaA='" +idiomaSecundario.toLowerCase() +
"', idiomaDe='" +idiomaPrincipal.toLowerCase() +
"' WHERE id=" + tarjeta.id;
console.log("Actualizamos una tarjeta--> "+sql);
bwBD.transaction(function(tx){
tx.executeSql(sql);
}, errorBD);
// Eliminación de las clases de los elementos antiguos (fondo, fuente, tamaño de la fuente)
for(i=1;i<=15;i++){
$('#PaginaDetalleTarjeta').removeClass('fondo'+i);
$('#PaginaReversoTarjeta').removeClass('fondo'+i);
}
//console.log("Clases de detalle tarjeta "+PaginaDetalleTarjeta.className);
//console.log("Clases de reverso tarjeta "+PaginaReversoTarjeta.className);
$('#lblTituloTarjeta').removeClass();
$('#lblTituloTarjetaReverso').removeClass();
// Actualizar los datos de la tarjeta actual, con los nuevos datos de la tarjeta actualizada
CargarTarjeta(event, tarjeta.id, false);
}
/**
* ComprobarEliminarTarjeta. Comprueba si el usuario ha seleccionado la eliminación de la tarjeta actual.
*/
function ComprobarEliminarTarjeta(event){
if (parseInt(event) == 1){
EliminaTarjetaActual(event)
}
}
/**
* EliminaTarjetaActual. Realiza la eliminación de la tarjeta actualmente seleccionada, tanto de la lista de tarjetas como de la base de datos.
*/
function EliminaTarjetaActual(event){
var listaTemp = [];
try{
// Eliminación de la tarjeta de la lista actual
$.each(listaTarjetas, function(i, item){
if (item.id != tarjetaActual.id) {!
listaTemp.push(item);
}
});
listaTarjetas = listaTemp;
// Eliminación de la tarjeta de la BD
var sql = "delete from Tarjetas where id=" + tarjetaActual.id;
bwBD.transaction(function(tx){
tx.executeSql(sql);
}, errorBD);
// ... actualización de la lista de tarjetas ...
RepresentarListaTarjetas(categoriaActual);
tarjetaActual = null;
// ... y actualización de la lista de categorías
RepresentarCategorias();
// Cargar la página de las tarjetas de la categoría actual
history.back();
}
catch (e){
console.log("Error en EliminarTarjetaActual: " + e.message);
}
}
function EliminarTarjetasPorCategoria(categoria){
var listaTemp = [];
// Eliminación de la tarjeta de la lista actual
$.each(listaTarjetas, function(i, item){
if (item.categoria != categoria) {
listaTemp.push(item);
}
});
listaTarjetas = listaTemp;
var sql = "delete from Tarjetas where categoria=" + categoria;
bwBD.transaction(function(tx){
tx.executeSql(sql);
}, errorBD);
tarjetaActual = null;
// ... y actualización de la lista de categorías
RepresentarCategorias();
// Cargar la página de las tarjetas de la categoría actual
$.mobile.changePage($('#PaginaCategorias'));
}
/**
* EliminarListaTarjetas. Vacía completamente la lista de las tarjetas.
*/
function EliminarListaTarjetas(){
listaTarjetas = [];
}
function LimpiarTraduccion(){
$('#pnlResultadoTraduccion').removeClass("in");
}
/*
* Obtiene un Token de acceso al servidor de Microsoft Translate a través del servicio web
*/
function getAccessToken(){
//Editado--> Pedro
var urlObtenerAccesToken = 'http://www.bubblewords.info/WSTraducciones/GetAccessToken.asmx/getToken';
$.ajax({
url: urlObtenerAccesToken + '?callback=?',
type: "GET",
dataType: 'jsonp',
success: function(data){
console.log("AccessToken Recibido");
accessToken=data[0];
estadoServidor=true;
//navigator.notification.confirm("Hemos obtenido el token de acceso: "+accessToken)
if (intervaloSinConexion){
clearInterval(intervaloSinConexion);
intervaloSinConexion=undefined;
console.warn("El servidor esta disponible, cambiamos o establecemos el intervalo a 9 minutos");
intervaloNormal = setInterval(getAccessToken, 9 * 60 * 1000);
}
hayConexion=true;
},
timeout:5000,
error: function(x, t, m) {
console.log("AccessToken No recibido");
if (hayConexion ==true){
/*
*En caso de que se tenga conexion de red, pero no sea accesible el servicio web que nos devuelve el token de acceso
*solicitamos un token de acceso cada 30 segundos, hasta que el servidor responda, en cuyo caso se para el intervalo
*sin conexión y comienza el intervalo normal de 9 minutos
*/
if (intervaloNormal){
clearInterval(intervaloNormal);
intervaloNormal=undefined;
console.warn("El servidor no esta disponible, cambiamos el intervalo a 30 segundos");
intervaloSinConexion = setInterval(getAccessToken, 30 * 1000);
}
if(t==="timeout") {
if (estadoServidor==true){
navigator.notification.confirm(res_servidor_no_disponible,'',res_titulo_servidor_no_disponible,res_Aceptar);
}
estadoServidor=false;
} else {
if (estadoServidor==true){
navigator.notification.confirm(res_servidor_no_disponible+" Er | ror: "+t,'',res_titulo_servidor_no_disponible,res_Aceptar);
}
estadoServidor=false;
}
}
}
});
}
/*
* Obtie | conditional_block |
|
tarjetas.js | break;
}
}
else {
ancho = anchoTablet;
}
var columna =1;
$.each(listaTarjetas, function(i, item) {
console.log("Comprobamos esta tarjeta para añadirla a la categoría ("+categoria.id+"): "+item.id+" con la categoria: "+item.categoria);
if ( ( (favoritas) && (item.favorita == 1) ) || ( (!favoritas) && (item.categoria == categoria.id) ) ) {
// Maquetación de la tabla que llevará cada una de las imágenes relacionadas con la tarjeta
if(columna ==1){
texto+="<tr>";
}
texto += "<td><div class='contenedorImg'><a href=\'javascript:;\' onClick=\'CargarTarjeta(event," + item.id + ", true)\' onTouchStart=\'(event," + item.id +
")\'><div class= 'divImgTarjeta conSombra'><img id=\'img" + item.id + "\' src=\'img/imagen_no_disponible_230.jpg\' />" + "</div></a></div></td>";
columna++;
if (columna ==4){
texto+="</tr>";
columna=1;
}
listaImagenesACargar.push(item);
contador += 1;
}
});
// Actualización del grid de imágenes
//console.log("Este es el texto: " + texto);
$('#lblListaTarjetas').html(texto);
//ObtenerTarjetasPorCategoria(categoria.id);
if (tarjetasPorCategoria.length <=2){
var altoPag =parseFloat($('#PaginaDetalleCategoria').height());
var altoImagen = altoPag-(altoPag*0.4);
//navigator.notification.alert("El alto de la pagina es de: "+altoPag+"+px el alto de la imagen es: "+altoImagen+"px");
$('.contenedorImg img').css('max-height',altoImagen.toString()+"px");
}
// Una vez que se haya cargado la lista de imágenes, hay que cargar sus rutas
$.each(listaImagenesACargar, function(i, item){
CargarFoto("img" + item.id, item.foto);
});
}
/**
* CargarFo
to. Intenta cargar la foto pasada como parámetro. Si lo consigue, la redimensiona para que se muestre
* correctamente en la lista de tarjetas. Si no consigue cargarla, deja la imagen que esta en el identificador correspondiente.
*
* @param identificador id de la imagen donde cargará la foto
* @param rutaFoto ruta en el dispositivo donde se encuentra la foto
* @param anchoFoto ancho en pixels de la foto original
* @param altoFoto alto en pixels de la foto original
*/
function CargarFoto(identificador, rutaFoto){
if (activarPhoneGap) {
// Solamente se comprueba si exista la fotografía en el caso de que está activado el PhoneGap.
if ($.trim(rutaFoto).length > 0) {
// Se comprueba que no es la imagen por defecto
if (rutaFoto.indexOf('img/imagen_no_disponible') >= 0) {
//console.log("La foto a cargar es la de por defecto");
}
else {
window.resolveLocalFileSystemURI(rutaFoto, function(fileEntry){
$("#" + identificador).attr("src", rutaFoto).on('load', function(){
/*
if (anchoFoto < altoFoto){
switch(tipoDispositivo){
case "iPhone3":
alto = ((altoFoto * anchoiPhone3) / anchoFoto).toFixed(0);
ancho = anchoiPhone3;
break;
case "iPhone4":
alto = ((altoFoto * anchoiPhone4) / anchoFoto).toFixed(0);
ancho = anchoiPhone4;
break;
case "tablet":
alto = ((altoFoto * anchoTablet) / anchoFoto).toFixed(0);
ancho = anchoTablet;
break;
}
// En el caso de que la altura sea mayor que el ancho, hay que desplazar la imagen para que quede centrada
// en altura
//$("#" + identificador).css("position", "relative").css("top", "-" + ((alto - ancho) / 2).toFixed(0).toString() + "px");
}
else {
switch(tipoDispositivo){
case "iPhone3":
ancho = ((anchoFoto * anchoiPhone3) / altoFoto).toFixed(0);
alto = anchoiPhone3;
break;
case "iPhone4":
ancho = ((anchoFoto * anchoiPhone4) / altoFoto).toFixed(0);
alto = anchoiPhone4;
break;
case "tablet":
ancho = ((anchoFoto * anchoTablet) / altoFoto).toFixed(0);
alto = anchoTablet;
break;
}
// En el caso de que la anchura sea mayor que la altura, hay que desplazar la imagen para que quede
// centrada en anchura
$('#' + identificador).css("position", "relative").css("left", "-" + ((ancho - alto)/2).toFixed(0).toString() + "px");
}
$('#' + identificador).attr("width", ancho);
$('#' + identificador).attr("height", alto);
*/
//console.log("Ancho: " + anchoFoto + ", alto: " + altoFoto);
});
}, function(error){
console.log("Ha fallado la carga del archivo " + rutaFoto);
});
}
}
}
}
/**
* NuevaTarjeta. Inserta una nueva tarjeta con los datos pasados como parámetros.
*
* @param categoria identificador de la categoría a la que pertenece la tarjeta
* @param titulo1 título 1 de la tarjeta
* @param titulo2 título 2 de la tarjeta
* @param fondo nombre de la imagen del fondo de la tarjeta
* @param foto nombre de la imagen principal de la tarjeta
* @param sonido nombre del sonido de la tarjeta
* @param ancho ancho en pixels de la foto
* @param alto alto en pixels de la imagen
* @param fuente tipografía asociada a la tarjeta
* @param tamanioFuente tamaño en pixels de la fuente utilizada
*/
function NuevaTarjeta(categoria, titulo1, titulo2, fondo, foto, sonido, ancho, alto, fuente){
var maxId = 0;
//console.log("llego a NuevaTarjeta");
try{
// obtención del último identificador utilizado
$.each(listaTarjetas, function(i, item){
if (item.id > maxId) {
maxId = item.id;
}
});
// Inserción de la tarjeta en la lista de tarjetas actuales (para la categoría actual) ...
listaTarjetas.push({
'id': (maxId+1),
'categoria': categoria,
'titulo1': titulo1,
'titulo2': titulo2,
'fondo': fondo,
'foto': foto,
'sonido': sonido,
'favorita': 0,
'anchoFoto': ancho,
'altoFoto': alto,
'fuente':fuente,
'idiomaA':idiomaSecundario.toLowerCase(),
'idiomaDe':idiomaPrincipal.toLowerCase()
});
// ... e inserción de la tarjeta en la base de datos
var sql = "insert into Tarjetas(id, categoria, titulo1, titulo2, fondo, foto, sonido, favorita, anchoFoto, altoFoto, fuente, idiomaA, idiomaDe ) values(" +
(maxId+1) + "," + categoria + ",\'" + titulo1 + "\',\'" + titulo2 + "\',\'" + fondo + "\',\'" + foto + "\',\'" + sonido + "\',0," + ancho +
"," + alto + ",'" + fuente + "','"+idiomaSecundario.toLowerCase()+"','"+idiomaPrincipal.toLowerCase()+"')";
console.log | lListaTarjetas').html("");
var texto = "";
var letra = "";
var contador = 0;
var listaImagenesACargar = [];
if (favoritas)
$('#h1NombreCategoria').html(res_Favoritos)
mostrarFavoritas = favoritas;
if (activarPhoneGap){
switch(tipoDispositivo){
case "iPhone3":
ancho = anchoiPhone3;
break;
case "iPhone4":
ancho = anchoiPhone4;
break;
case "tablet":
ancho = anchoTablet; | identifier_body |
|
tarjetas.js | }
}
else {
ancho = anchoTablet;
}
var columna =1;
$.each(listaTarjetas, function(i, item) {
console.log("Comprobamos esta tarjeta para añadirla a la categoría ("+categoria.id+"): "+item.id+" con la categoria: "+item.categoria);
if ( ( (favoritas) && (item.favorita == 1) ) || ( (!favoritas) && (item.categoria == categoria.id) ) ) {
// Maquetación de la tabla que llevará cada una de las imágenes relacionadas con la tarjeta
if(columna ==1){
texto+="<tr>";
}
texto += "<td><div class='contenedorImg'><a href=\'javascript:;\' onClick=\'CargarTarjeta(event," + item.id + ", true)\' onTouchStart=\'(event," + item.id +
")\'><div class= 'divImgTarjeta conSombra'><img id=\'img" + item.id + "\' src=\'img/imagen_no_disponible_230.jpg\' />" + "</div></a></div></td>";
columna++;
if (columna ==4){
texto+="</tr>";
columna=1;
}
listaImagenesACargar.push(item);
contador += 1;
}
});
// Actualización del grid de imágenes
//console.log("Este es el texto: " + texto);
$('#lblListaTarjetas').html(texto);
//ObtenerTarjetasPorCategoria(categoria.id);
if (tarjetasPorCategoria.length <=2){
var altoPag =parseFloat($('#PaginaDetalleCategoria').height());
var altoImagen = altoPag-(altoPag*0.4);
//navigator.notification.alert("El alto de la pagina es de: "+altoPag+"+px el alto de la imagen es: "+altoImagen+"px");
$('.contenedorImg img').css('max-height',altoImagen.toString()+"px");
}
// Una vez que se haya cargado la lista de imágenes, hay que cargar sus rutas
$.each(listaImagenesACargar, function(i, item){
CargarFoto("img" + item.id, item.foto);
});
}
/**
* CargarFoto. Intenta cargar la foto pasada como parámetro. Si lo consigue, la redimensiona para que se muestre
* correctamente en la lista de tarjetas. Si no consigue cargarla, deja la imagen que esta en el identificador correspondiente.
*
* @param identificador id de la imagen donde cargará la foto
* @param rutaFoto ruta en el dispositivo donde se encuentra la foto
* @param anchoFoto ancho en pixels de la foto original
* @param altoFoto alto en pixels de la foto original
*/
function CargarFoto(identificador, rutaFoto){
if (activarPhoneGap) {
// Solamente se comprueba si exista la fotografía en el caso de que está activado el PhoneGap.
if ($.trim(rutaFoto).length > 0) {
// Se comprueba que no es la imagen por defecto
if (rutaFoto.indexOf('img/imagen_no_disponible') >= 0) {
//console.log("La foto a cargar es la de por defecto");
}
else {
window.resolveLocalFileSystemURI(rutaFoto, function(fileEntry){
$("#" + identificador).attr("src", rutaFoto).on('load', function(){
/*
if (anchoFoto < altoFoto){
switch(tipoDispositivo){
case "iPhone3":
alto = ((altoFoto * anchoiPhone3) / anchoFoto).toFixed(0);
ancho = anchoiPhone3;
break;
case "iPhone4":
alto = ((altoFoto * anchoiPhone4) / anchoFoto).toFixed(0);
ancho = anchoiPhone4;
break;
case "tablet":
alto = ((altoFoto * anchoTablet) / anchoFoto).toFixed(0);
ancho = anchoTablet;
break;
}
// En el caso de que la altura sea mayor que el ancho, hay que desplazar la imagen para que quede centrada
// en altura
//$("#" + identificador).css("position", "relative").css("top", "-" + ((alto - ancho) / 2).toFixed(0).toString() + "px");
}
else {
switch(tipoDispositivo){
case "iPhone3":
ancho = ((anchoFoto * anchoiPhone3) / altoFoto).toFixed(0);
alto = anchoiPhone3;
break;
case "iPhone4":
ancho = ((anchoFoto * anchoiPhone4) / altoFoto).toFixed(0);
alto = anchoiPhone4;
break;
case "tablet":
ancho = ((anchoFoto * anchoTablet) / altoFoto).toFixed(0);
alto = anchoTablet;
break;
}
// En el caso de que la anchura sea mayor que la altura, hay que desplazar la imagen para que quede
// centrada en anchura
$('#' + identificador).css("position", "relative").css("left", "-" + ((ancho - alto)/2).toFixed(0).toString() + "px");
}
$('#' + identificador).attr("width", ancho);
$('#' + identificador).attr("height", alto);
*/
//console.log("Ancho: " + anchoFoto + ", alto: " + altoFoto);
});
}, function(error){
console.log("Ha fallado la carga del archivo " + rutaFoto);
});
}
}
}
}
/**
* NuevaTarjeta. Inserta una nueva tarjeta con los datos pasados como parámetros.
*
* @param categoria identificador de la categoría a la que pertenece la tarjeta
* @param titulo1 título 1 de la tarjeta
* @param titulo2 título 2 de la tarjeta
* @param fondo nombre de la imagen del fondo de la tarjeta
* @param foto nombre de la imagen principal de la tarjeta
* @param sonido nombre del sonido de la tarjeta
* @param ancho ancho en pixels de la foto
* @param alto alto en pixels de la imagen
* @param fuente tipografía asociada a la tarjeta
* @param tamanioFuente tamaño en pixels de la fuente utilizada
*/
function NuevaTarjeta(categoria, titulo1, titulo2, fondo, foto, sonido, ancho, alto, fuente){
var maxId = 0;
//console.log("llego a NuevaTarjeta");
try{
// obtención del último identificador utilizado
$.each(listaTarjetas, function(i, item){
if (item.id > maxId) {
maxId = item.id;
}
});
// Inserción de la tarjeta en la lista de tarjetas actuales (para la categoría actual) ...
listaTarjetas.push({
'id': (maxId+1),
'categoria': categoria,
'titulo1': titulo1,
'titulo2': titulo2,
'fondo': fondo,
'foto': foto,
'sonido': sonido,
'favorita': 0,
'anchoFoto': ancho,
'altoFoto': alto,
'fuente':fuente,
'idiomaA':idiomaSecundario.toLowerCase(),
'idiomaDe':idiomaPrincipal.toLowerCase()
});
// ... e inserción de la tarjeta en la base de datos
var sql = "insert into Tarjetas(id, categoria, titulo1, titulo2, fondo, foto, sonido, favorita, anchoFoto, altoFoto, fuente, idiomaA, idiomaDe ) values(" +
(maxId+1) + "," + categoria + ",\'" + titulo1 + "\',\'" + titulo2 + "\',\'" + fondo + "\',\'" + foto + "\',\'" + sonido + "\',0," + ancho +
"," + alto + ",'" + fuente + "','"+idiomaSecundario.toLowerCase()+"','"+idiomaPrincipal.toLowerCase()+"')";
console.log("El fondo es el numero: "+fondo);
console.log("Creamos una nueva tarjeta, SQL: "+sql);
bwBD.transaction(function(tx){
tx.executeSql(sql);
}, errorBD);
//Refrescamos el array con las tarjetas de la categoria actual
//console.log("la categoria es: "+categoria);
//console.log("Llamamos a obtenertarjetasporcategoria");
ObtenerTarjetasPorCategoria(categoria);
//console | case "tablet":
ancho = anchoTablet;
break; | random_line_split |
|
tarjetas.js | a, favoritas){
$('#lblListaTarjetas').html("");
var texto = "";
var letra = "";
var contador = 0;
var listaImagenesACargar = [];
if (favoritas)
$('#h1NombreCategoria').html(res_Favoritos)
mostrarFavoritas = favoritas;
if (activarPhoneGap){
switch(tipoDispositivo){
case "iPhone3":
ancho = anchoiPhone3;
break;
case "iPhone4":
ancho = anchoiPhone4;
break;
case "tablet":
ancho = anchoTablet;
break;
}
}
else {
ancho = anchoTablet;
}
var columna =1;
$.each(listaTarjetas, function(i, item) {
console.log("Comprobamos esta tarjeta para añadirla a la categoría ("+categoria.id+"): "+item.id+" con la categoria: "+item.categoria);
if ( ( (favoritas) && (item.favorita == 1) ) || ( (!favoritas) && (item.categoria == categoria.id) ) ) {
// Maquetación de la tabla que llevará cada una de las imágenes relacionadas con la tarjeta
if(columna ==1){
texto+="<tr>";
}
texto += "<td><div class='contenedorImg'><a href=\'javascript:;\' onClick=\'CargarTarjeta(event," + item.id + ", true)\' onTouchStart=\'(event," + item.id +
")\'><div class= 'divImgTarjeta conSombra'><img id=\'img" + item.id + "\' src=\'img/imagen_no_disponible_230.jpg\' />" + "</div></a></div></td>";
columna++;
if (columna ==4){
texto+="</tr>";
columna=1;
}
listaImagenesACargar.push(item);
contador += 1;
}
});
// Actualización del grid de imágenes
//console.log("Este es el texto: " + texto);
$('#lblListaTarjetas').html(texto);
//ObtenerTarjetasPorCategoria(categoria.id);
if (tarjetasPorCategoria.length <=2){
var altoPag =parseFloat($('#PaginaDetalleCategoria').height());
var altoImagen = altoPag-(altoPag*0.4);
//navigator.notification.alert("El alto de la pagina es de: "+altoPag+"+px el alto de la imagen es: "+altoImagen+"px");
$('.contenedorImg img').css('max-height',altoImagen.toString()+"px");
}
// Una vez que se haya cargado la lista de imágenes, hay que cargar sus rutas
$.each(listaImagenesACargar, function(i, item){
CargarFoto("img" + item.id, item.foto);
});
}
/**
* CargarFoto. Intenta cargar la foto pasada como parámetro. Si lo consigue, la redimensiona para que se muestre
* correctamente en la lista de tarjetas. Si no consigue cargarla, deja la imagen que esta en el identificador correspondiente.
*
* @param identificador id de la imagen donde cargará la foto
* @param rutaFoto ruta en el dispositivo donde se encuentra la foto
* @param anchoFoto ancho en pixels de la foto original
* @param altoFoto alto en pixels de la foto original
*/
function CargarFoto(identificador, rutaFoto){
if (activarPhoneGap) {
// Solamente se comprueba si exista la fotografía en el caso de que está activado el PhoneGap.
if ($.trim(rutaFoto).length > 0) {
// Se comprueba que no es la imagen por defecto
if (rutaFoto.indexOf('img/imagen_no_disponible') >= 0) {
//console.log("La foto a cargar es la de por defecto");
}
else {
window.resolveLocalFileSystemURI(rutaFoto, function(fileEntry){
$("#" + identificador).attr("src", rutaFoto).on('load', function(){
/*
if (anchoFoto < altoFoto){
switch(tipoDispositivo){
case "iPhone3":
alto = ((altoFoto * anchoiPhone3) / anchoFoto).toFixed(0);
ancho = anchoiPhone3;
break;
case "iPhone4":
alto = ((altoFoto * anchoiPhone4) / anchoFoto).toFixed(0);
ancho = anchoiPhone4;
break;
case "tablet":
alto = ((altoFoto * anchoTablet) / anchoFoto).toFixed(0);
ancho = anchoTablet;
break;
}
// En el caso de que la altura sea mayor que el ancho, hay que desplazar la imagen para que quede centrada
// en altura
//$("#" + identificador).css("position", "relative").css("top", "-" + ((alto - ancho) / 2).toFixed(0).toString() + "px");
}
else {
switch(tipoDispositivo){
case "iPhone3":
ancho = ((anchoFoto * anchoiPhone3) / altoFoto).toFixed(0);
alto = anchoiPhone3;
break;
case "iPhone4":
ancho = ((anchoFoto * anchoiPhone4) / altoFoto).toFixed(0);
alto = anchoiPhone4;
break;
case "tablet":
ancho = ((anchoFoto * anchoTablet) / altoFoto).toFixed(0);
alto = anchoTablet;
break;
}
// En el caso de que la anchura sea mayor que la altura, hay que desplazar la imagen para que quede
// centrada en anchura
$('#' + identificador).css("position", "relative").css("left", "-" + ((ancho - alto)/2).toFixed(0).toString() + "px");
}
$('#' + identificador).attr("width", ancho);
$('#' + identificador).attr("height", alto);
*/
//console.log("Ancho: " + anchoFoto + ", alto: " + altoFoto);
});
}, function(error){
console.log("Ha fallado la carga del archivo " + rutaFoto);
});
}
}
}
}
/**
* NuevaTarjeta. Inserta una nueva tarjeta con los datos pasados como parámetros.
*
* @param categoria identificador de la categoría a la que pertenece la tarjeta
* @param titulo1 título 1 de la tarjeta
* @param titulo2 título 2 de la tarjeta
* @param fondo nombre de la imagen del fondo de la tarjeta
* @param foto nombre de la imagen principal de la tarjeta
* @param sonido nombre del sonido de la tarjeta
* @param ancho ancho en pixels de la foto
* @param alto alto en pixels de la imagen
* @param fuente tipografía asociada a la tarjeta
* @param tamanioFuente tamaño en pixels de la fuente utilizada
*/
function NuevaTarjeta(categoria, titulo1, titulo2, fondo, foto, sonido, ancho, alto, fuente){
var maxId = 0;
//console.log("llego a NuevaTarjeta");
try{
// obtención del último identificador utilizado
$.each(listaTarjetas, function(i, item){
if (item.id > maxId) {
maxId = item.id;
}
});
// Inserción de la tarjeta en la lista de tarjetas actuales (para la categoría actual) ...
listaTarjetas.push({
'id': (maxId+1),
'categoria': categoria,
'titulo1': titulo1,
'titulo2': titulo2,
'fondo': fondo,
'foto': foto,
'sonido': sonido,
'favorita': 0,
'anchoFoto': ancho,
'altoFoto': alto,
'fuente':fuente,
'idiomaA':idiomaSecundario.toLowerCase(),
'idiomaDe':idiomaPrincipal.toLowerCase()
});
// ... e inserción de la tarjeta en la base de datos
var sql = "insert into Tarjetas(id, categoria, titulo1, titulo2, fondo, foto, sonido, favorita, anchoFoto, altoFoto, fuente, idiomaA, idiomaDe ) values(" +
(maxId+1) + "," + categoria + ",\'" + titulo1 + "\',\'" + titulo2 + "\',\'" + fondo + "\',\'" + foto + "\',\'" + sonido + "\',0," + ancho +
"," + alto + ",'" + fuente + "','"+idiomaSecundario | arListaTarjetas(categori | identifier_name |
|
annotate.rs | Date,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists != &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn | (
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if !expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if !album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name != file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len() != data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name | get_tracks_data | identifier_name |
annotate.rs | Date,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists != &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}", | }
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if !expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if !album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name != file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len() != data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: " | tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
}); | random_line_split |
annotate.rs | ,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" |
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists != &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if !expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if !album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name != file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len() != data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name | {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
} | conditional_block |
annotate.rs | ,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists != &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String |
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if !expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if !album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name != file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len() != data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name | {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
} | identifier_body |
terminal.rs | (unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::FormatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug)]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if !(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last | t_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
| _split_idx = idx;
}
}
if las | conditional_block |
terminal.rs | text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if !(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct TermWriter<'a, W: Write+'a> {
terminal: &'a Terminal,
out: W
}
impl<'a, W: 'a> TermWriter<'a, W>
where W: Write
{
fn fmt(&mut self, fmt: FormatLike) {
write!(&mut self.out, "\x01").unwrap();
let color = fmt_to_color(fmt);
if let Some(cap) = self.terminal.terminfo.get::<cap::SetAForeground>() {
expand!(&mut self.out, cap.as_ref(); color).unwrap();
}
write!(&mut self.out, "\x02").unwrap();
}
fn reset_fmt(&mut self) {
write!(&mut self.out, "\x01").unwrap();
let terminfo = &self.terminal.terminfo;
if let Some(cap) = terminfo.get::<cap::ExitAttributeMode>() {
expand!(&mut self.out, cap.as_ref();).unwrap();
} else if let Some(cap) = terminfo.get::<cap::SetAttributes>() {
expand!(&mut self.out, cap.as_ref(); 0).unwrap();
} else if let Some(cap) = terminfo.get::<cap::OrigPair>() {
expand!(&mut self.out, cap.as_ref();).unwrap()
}
write!(&mut self.out, "\x02").unwrap();
}
}
impl<'a, W: 'a> Write for TermWriter<'a, W>
where W: Write
{
fn flush(&mut self) -> | Resul | identifier_name |
|
terminal.rs | (unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::Fo | )]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if !(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
| rmatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug | identifier_body |
terminal.rs | (unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::FormatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug)]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return; | self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if !(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct | }
} | random_line_split |
mock.rs | Fixed,
FromGenericPair,
};
use currencies::BasicCurrencyAdapter;
use frame_support::traits::GenesisBuild;
use frame_support::weights::Weight;
use frame_support::{construct_runtime, parameter_types};
use frame_system;
use hex_literal::hex;
use permissions::Scope;
use sp_core::H256;
use sp_runtime::testing::Header;
use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Zero};
use sp_runtime::{AccountId32, Perbill};
pub type AccountId = AccountId32;
pub type BlockNumber = u64;
pub type Amount = i128;
pub type AssetId = common::AssetId32<common::PredefinedAssetId>;
pub type TechAccountId = common::TechAccountId<AccountId, TechAssetId, DEXId>;
type TechAssetId = common::TechAssetId<common::PredefinedAssetId>;
type DEXId = common::DEXId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
pub fn alice() -> AccountId {
AccountId32::from([1u8; 32])
}
pub fn fees_account_a() -> AccountId {
AccountId32::from([2u8; 32])
}
pub fn fees_account_b() -> AccountId {
AccountId32::from([3u8; 32])
}
pub fn liquidity_provider_a() -> AccountId {
AccountId32::from([4u8; 32])
}
pub fn liquidity_provider_b() -> AccountId |
pub fn liquidity_provider_c() -> AccountId {
AccountId32::from([6u8; 32])
}
pub const DEX_A_ID: DEXId = common::DEXId::Polkaswap;
parameter_types! {
pub GetBaseAssetId: AssetId = common::XOR.into();
pub GetIncentiveAssetId: AssetId = common::PSWAP.into();
pub const PoolTokenAId: AssetId = common::AssetId32::from_bytes(hex!("0211110000000000000000000000000000000000000000000000000000000000"));
pub const PoolTokenBId: AssetId = common::AssetId32::from_bytes(hex!("0222220000000000000000000000000000000000000000000000000000000000"));
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const GetDefaultFee: u16 = 30;
pub const GetDefaultProtocolFee: u16 = 0;
pub GetPswapDistributionTechAccountId: TechAccountId = {
let tech_account_id = TechAccountId::from_generic_pair(
crate::TECH_ACCOUNT_PREFIX.to_vec(),
crate::TECH_ACCOUNT_MAIN.to_vec(),
);
tech_account_id
};
pub GetPswapDistributionAccountId: AccountId = {
let tech_account_id = GetPswapDistributionTechAccountId::get();
let account_id =
technical::Module::<Runtime>::tech_account_id_to_account_id(&tech_account_id)
.expect("Failed to get ordinary account id for technical account id.");
account_id
};
pub const GetDefaultSubscriptionFrequency: BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
| {
AccountId32::from([5u8; 32])
} | identifier_body |
mock.rs | AccountId = AccountId32;
pub type BlockNumber = u64;
pub type Amount = i128;
pub type AssetId = common::AssetId32<common::PredefinedAssetId>;
pub type TechAccountId = common::TechAccountId<AccountId, TechAssetId, DEXId>;
type TechAssetId = common::TechAssetId<common::PredefinedAssetId>;
type DEXId = common::DEXId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
pub fn alice() -> AccountId {
AccountId32::from([1u8; 32])
}
pub fn fees_account_a() -> AccountId {
AccountId32::from([2u8; 32])
}
pub fn fees_account_b() -> AccountId {
AccountId32::from([3u8; 32])
}
pub fn liquidity_provider_a() -> AccountId {
AccountId32::from([4u8; 32])
}
pub fn liquidity_provider_b() -> AccountId {
AccountId32::from([5u8; 32])
}
pub fn liquidity_provider_c() -> AccountId {
AccountId32::from([6u8; 32])
}
pub const DEX_A_ID: DEXId = common::DEXId::Polkaswap;
parameter_types! {
pub GetBaseAssetId: AssetId = common::XOR.into();
pub GetIncentiveAssetId: AssetId = common::PSWAP.into();
pub const PoolTokenAId: AssetId = common::AssetId32::from_bytes(hex!("0211110000000000000000000000000000000000000000000000000000000000"));
pub const PoolTokenBId: AssetId = common::AssetId32::from_bytes(hex!("0222220000000000000000000000000000000000000000000000000000000000"));
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const GetDefaultFee: u16 = 30;
pub const GetDefaultProtocolFee: u16 = 0;
pub GetPswapDistributionTechAccountId: TechAccountId = {
let tech_account_id = TechAccountId::from_generic_pair(
crate::TECH_ACCOUNT_PREFIX.to_vec(),
crate::TECH_ACCOUNT_MAIN.to_vec(),
);
tech_account_id
};
pub GetPswapDistributionAccountId: AccountId = {
let tech_account_id = GetPswapDistributionTechAccountId::get();
let account_id =
technical::Module::<Runtime>::tech_account_id_to_account_id(&tech_account_id)
.expect("Failed to get ordinary account id for technical account id.");
account_id
};
pub const GetDefaultSubscriptionFrequency: BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOL".to_vec()),
AssetName(b"Pool Token".to_vec()),
18,
Balance::from(0u32),
true,
)],
initial_permission_owners: Vec::new(),
initial_permissions: Vec::new(),
subscribed_accounts: Vec::new(),
burn_info: (fixed!(0), fixed!(0.10), fixed!(0.30)),
}
}
}
impl ExtBuilder {
pub fn | with_accounts | identifier_name |
|
mock.rs | : BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOL".to_vec()),
AssetName(b"Pool Token".to_vec()),
18,
Balance::from(0u32),
true,
)],
initial_permission_owners: Vec::new(),
initial_permissions: Vec::new(),
subscribed_accounts: Vec::new(),
burn_info: (fixed!(0), fixed!(0.10), fixed!(0.30)),
}
}
}
impl ExtBuilder {
pub fn with_accounts(accounts: Vec<(AccountId, AssetId, Balance)>) -> Self {
let permissioned_account_id = GetPswapDistributionAccountId::get();
Self {
endowed_accounts: accounts,
endowed_assets: vec![
(
common::XOR.into(),
alice(),
AssetSymbol(b"XOR".to_vec()),
AssetName(b"SORA".to_vec()),
18,
Balance::zero(),
true,
),
(
common::PSWAP.into(),
alice(),
AssetSymbol(b"PSWAP".to_vec()),
AssetName(b"Polkaswap".to_vec()),
10,
Balance::zero(),
true,
),
(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOLA".to_vec()),
AssetName(b"Pool A".to_vec()),
18,
Balance::zero(),
true,
),
(
PoolTokenBId::get(),
alice(),
AssetSymbol(b"POOLB".to_vec()),
AssetName(b"Pool B".to_vec()),
18,
Balance::zero(),
true,
),
],
initial_permission_owners: vec![],
initial_permissions: vec![(
permissioned_account_id,
Scope::Unlimited,
vec![permissions::MINT, permissions::BURN],
)],
subscribed_accounts: vec![
(fees_account_a(), (DEX_A_ID, PoolTokenAId::get(), 5, 0)),
(fees_account_b(), (DEX_A_ID, PoolTokenBId::get(), 7, 0)),
],
burn_info: (fixed!(0.1), fixed!(0.10), fixed!(0.40)),
}
}
}
impl Default for ExtBuilder {
fn default() -> Self {
ExtBuilder::with_accounts(vec![
(fees_account_a(), common::XOR.into(), balance!(1)),
(fees_account_a(), common::PSWAP.into(), balance!(6)),
(liquidity_provider_a(), PoolTokenAId::get(), balance!(3)),
(liquidity_provider_b(), PoolTokenAId::get(), balance!(2)),
(liquidity_provider_c(), PoolTokenAId::get(), balance!(1)),
(liquidity_provider_a(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_b(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_c(), PoolTokenBId::get(), balance!(10)),
])
}
}
impl ExtBuilder {
pub fn build(self) -> sp_io::TestExternalities {
let mut t = SystemConfig::default().build_storage::<Runtime>().unwrap();
let mut vec = self
.endowed_accounts
.iter()
.map(|(acc, ..)| (acc.clone(), 0))
.chain(vec![
(alice(), 0),
(fees_account_a(), 0),
(fees_account_b(), 0),
(GetPswapDistributionAccountId::get(), 0),
(GetParliamentAccountId::get(), 0),
])
.collect::<Vec<_>>();
vec.sort_by_key(|x| x.0.clone());
vec.dedup_by(|x, y| x.0 == y.0);
BalancesConfig { balances: vec }
.assimilate_storage(&mut t)
.unwrap();
PermissionsConfig {
initial_permissions: self.initial_permissions, | random_line_split |
||
list_view_items.rs | texts` is empty, or if the number of texts is greater than
/// the number of columns.
pub fn add<S: AsRef<str>>(&self,
texts: &[S], icon_index: Option<u32>) -> WinResult<u32>
{
if texts.is_empty() {
panic!("No texts passed when adding a ListView item.");
}
let mut lvi = LVITEM::default();
lvi.mask = co::LVIF::TEXT | co::LVIF::IMAGE;
lvi.iItem = 0x0fff_ffff; // insert as the last one
lvi.iImage = match icon_index {
Some(idx) => idx as _,
None => -1,
};
let mut wtext = WString::from_str(texts[0].as_ref());
lvi.set_pszText(Some(&mut wtext));
let new_idx = self.hwnd().SendMessage(lvm::InsertItem { lvitem: &lvi })?;
for (idx, text) in texts.iter().skip(1).enumerate() {
self.set_text(new_idx, idx as u32 + 1, text.as_ref())?;
}
Ok(new_idx)
}
/// Retrieves the total number of items by sending an
/// [`LVM_GETITEMCOUNT`](crate::msg::lvm::GetItemCount) message.
pub fn count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetItemCount {})
}
/// Deletes the items at the given indexes by sending an
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) message.
///
/// The indexes are iterated backwards, so the last item will be deleted
/// first.
pub fn delete(&self, item_indexes: &[u32]) -> WinResult<()> {
for idx in item_indexes.iter().rev() {
self.hwnd().SendMessage(lvm::DeleteItem {
index: *idx,
})?;
}
Ok(())
}
/// Deletes all items by sending an
/// [`LVM_DELETEALLITEMS`](crate::msg::lvm::DeleteAllItems) message.
pub fn delete_all(&self) -> WinResult<()> {
self.hwnd().SendMessage(lvm::DeleteAllItems {})
}
/// Deletes the selected items by sending
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) messages.
pub fn delete_selected(&self) -> WinResult<()> {
loop {
match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::SELECTED,
}) {
Some(index) => self.hwnd().SendMessage(lvm::DeleteItem { index })?,
None => break,
};
}
Ok(())
}
/// Scrolls the list by sending an
/// [`LVM_ENSUREVISIBLE`](crate::msg::lvm::EnsureVisible) message so that an
/// item is visible in the list.
pub fn ensure_visible(&self, item_index: u32) -> WinResult<()> {
self.hwnd().SendMessage(lvm::EnsureVisible {
index: item_index,
entirely_visible: true,
})
}
/// Searches for an item with the given text, case-insensitive, by sending
/// an [`LVM_FINDITEM`](crate::msg::lvm::FindItem) message.
pub fn find(&self, text: &str) -> Option<u32> {
let mut buf = WString::from_str(text);
let mut lvfi = LVFINDINFO::default();
lvfi.flags = co::LVFI::STRING;
lvfi.set_psz(Some(&mut buf));
self.hwnd().SendMessage(lvm::FindItem {
start_index: None,
lvfindinfo: &mut lvfi,
})
}
/// Retrieves the index of the focused item by sending an
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) message.
pub fn focused(&self) -> Option<u32> {
self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::FOCUSED,
})
}
/// Retrieves the item at the specified position by sending an
/// [`LVM_HITTEST`](crate::msg::lvm::HitTest) message
pub fn hit_test(&self, info: &mut LVHITTESTINFO) -> Option<u32> {
self.hwnd().SendMessage(lvm::HitTest { info })
}
/// Tells if the item is the focused one by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_focused(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::FOCUSED,
}).has(co::LVIS::FOCUSED)
}
/// Tells if the item is selected by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_selected(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::SELECTED,
}).has(co::LVIS::SELECTED)
}
/// Tells if the item is currently visible by sending an
/// [`LVM_ISITEMVISIBLE`](crate::msg::lvm::IsItemVisible) message.
pub fn is_visible(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::IsItemVisible { index: item_index })
}
/// Retrieves the actual index of the unique ID by sending an
/// [`LVM_MAPIDTOINDEX`](crate::msg::lvm::MapIdToIndex) message.
pub fn map_id_to_index(&self, item_id: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIdToIndex { id: item_id })
}
/// Retrieves an unique ID for the given index by sending an
/// [`LVM_MAPINDEXTOID`](crate::msg::lvm::MapIndexToId) message.
pub fn map_index_to_id(&self, item_index: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIndexToId { index: item_index })
}
/// Retrieves the bound rectangle of item by sending an
/// [`LVM_GETITEMRECT`](crate::msg::lvm::GetItemRect) message.
pub fn rect(&self, item_index: u32, portion: co::LVIR) -> WinResult<RECT> {
let mut rc = RECT::default();
self.hwnd().SendMessage(lvm::GetItemRect {
index: item_index,
rect: &mut rc,
portion,
})?;
Ok(rc)
}
/// Retrieves the indexes of the selected items by sending
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) messages.
pub fn selected(&self) -> Vec<u32> {
let mut items = Vec::with_capacity(self.selected_count() as _);
let mut idx = None;
loop {
idx = match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: idx,
relationship: co::LVNI::SELECTED,
}) {
Some(idx) => {
items.push(idx);
Some(idx)
},
None => break,
};
}
items
}
/// Retrieves the number of selected items by sending an
/// [`LVM_GETSELECTEDCOUNT`](crate::msg::lvm::GetSelectedCount) message.
pub fn selected_count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetSelectedCount {})
}
/// Sets the focused item by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_focused(&self, item_index: u32) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::FOCUSED;
lvi.state = co::LVIS::FOCUSED;
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(item_index),
lvitem: &lvi,
})
}
/// Sets or remove the selection from the given item indexes by sending
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) messages.
pub fn | (&self,
set: bool, item_indexes: &[u32]) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
for idx in item_indexes.iter() {
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(*idx),
lvitem: &lvi,
})?;
| set_selected | identifier_name |
list_view_items.rs | { index })?,
None => break,
};
}
Ok(())
}
/// Scrolls the list by sending an
/// [`LVM_ENSUREVISIBLE`](crate::msg::lvm::EnsureVisible) message so that an
/// item is visible in the list.
pub fn ensure_visible(&self, item_index: u32) -> WinResult<()> {
self.hwnd().SendMessage(lvm::EnsureVisible {
index: item_index,
entirely_visible: true,
})
}
/// Searches for an item with the given text, case-insensitive, by sending
/// an [`LVM_FINDITEM`](crate::msg::lvm::FindItem) message.
pub fn find(&self, text: &str) -> Option<u32> {
let mut buf = WString::from_str(text);
let mut lvfi = LVFINDINFO::default();
lvfi.flags = co::LVFI::STRING;
lvfi.set_psz(Some(&mut buf));
self.hwnd().SendMessage(lvm::FindItem {
start_index: None,
lvfindinfo: &mut lvfi,
})
}
/// Retrieves the index of the focused item by sending an
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) message.
pub fn focused(&self) -> Option<u32> {
self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::FOCUSED,
})
}
/// Retrieves the item at the specified position by sending an
/// [`LVM_HITTEST`](crate::msg::lvm::HitTest) message
pub fn hit_test(&self, info: &mut LVHITTESTINFO) -> Option<u32> {
self.hwnd().SendMessage(lvm::HitTest { info })
}
/// Tells if the item is the focused one by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_focused(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::FOCUSED,
}).has(co::LVIS::FOCUSED)
}
/// Tells if the item is selected by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_selected(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::SELECTED,
}).has(co::LVIS::SELECTED)
}
/// Tells if the item is currently visible by sending an
/// [`LVM_ISITEMVISIBLE`](crate::msg::lvm::IsItemVisible) message.
pub fn is_visible(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::IsItemVisible { index: item_index })
}
/// Retrieves the actual index of the unique ID by sending an
/// [`LVM_MAPIDTOINDEX`](crate::msg::lvm::MapIdToIndex) message.
pub fn map_id_to_index(&self, item_id: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIdToIndex { id: item_id })
}
/// Retrieves an unique ID for the given index by sending an
/// [`LVM_MAPINDEXTOID`](crate::msg::lvm::MapIndexToId) message.
pub fn map_index_to_id(&self, item_index: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIndexToId { index: item_index })
}
/// Retrieves the bound rectangle of item by sending an
/// [`LVM_GETITEMRECT`](crate::msg::lvm::GetItemRect) message.
pub fn rect(&self, item_index: u32, portion: co::LVIR) -> WinResult<RECT> {
let mut rc = RECT::default();
self.hwnd().SendMessage(lvm::GetItemRect {
index: item_index,
rect: &mut rc,
portion,
})?;
Ok(rc)
}
/// Retrieves the indexes of the selected items by sending
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) messages.
pub fn selected(&self) -> Vec<u32> {
let mut items = Vec::with_capacity(self.selected_count() as _);
let mut idx = None;
loop {
idx = match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: idx,
relationship: co::LVNI::SELECTED,
}) {
Some(idx) => {
items.push(idx);
Some(idx)
},
None => break,
};
}
items
}
/// Retrieves the number of selected items by sending an
/// [`LVM_GETSELECTEDCOUNT`](crate::msg::lvm::GetSelectedCount) message.
pub fn selected_count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetSelectedCount {})
}
/// Sets the focused item by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_focused(&self, item_index: u32) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::FOCUSED;
lvi.state = co::LVIS::FOCUSED;
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(item_index),
lvitem: &lvi,
})
}
/// Sets or remove the selection from the given item indexes by sending
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) messages.
pub fn set_selected(&self,
set: bool, item_indexes: &[u32]) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
for idx in item_indexes.iter() {
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(*idx),
lvitem: &lvi,
})?;
}
Ok(())
}
/// Sets or remove the selection for all items by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_selected_all(&self, set: bool) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
self.hwnd().SendMessage(lvm::SetItemState {
index: None,
lvitem: &lvi,
})
}
/// Sets the text of an item under a column by sending an
/// [`LVM_SETITEMTEXT`](crate::msg::lvm::SetItemText) message.
pub fn set_text(&self,
item_index: u32, column_index: u32, text: &str) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.iSubItem = column_index as _;
let mut wtext = WString::from_str(text);
lvi.set_pszText(Some(&mut wtext));
self.hwnd().SendMessage(lvm::SetItemText {
index: item_index,
lvitem: &lvi,
})
}
/// Retrieves the text of an item under a column by sending an
/// [`LVM_GETITEMTEXT`](crate::msg::lvm::GetItemText) message.
///
/// The passed buffer will be automatically allocated.
///
/// This method can be more performant than
/// [`text_str`](crate::gui::ListViewItems::text_str) because the buffer can be
/// reused, avoiding multiple allocations. However, it has the inconvenient
/// of the manual conversion from [`WString`](crate::WString) to `String`.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::{gui, WString};
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// let mut buf = WString::default();
/// my_list.items().text(0, 2, &mut buf); // 1st item, 3rd column
///
/// println!("Text: {}", buf.to_string());
/// ```
pub fn text(&self, item_index: u32, column_index: u32, buf: &mut WString) {
Self::text_retrieve(self.hwnd(), item_index, column_index, buf)
}
pub(in crate::gui::native_controls) fn text_retrieve(
hwnd: HWND, item_index: u32, column_index: u32, buf: &mut WString)
| {
| random_line_split |
|
index.js | '),
document.getElementById('step-4-part-1-puzzlespot'),
document.getElementById('step-4-part-2-puzzlepiece-container'),
document.getElementById('step-4-part-2-puzzlespot')
]);
draggables.on('drag', function(el, source) {
// Hide the "drag to start" when the BEGIN puzzle piece is dragged
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 0;
}
});
draggables.on('cancel', function (el, container, source) {
// Show the "drag to start" when the BEGIN puzzle piece is dragged and dropped outside a dragula container
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 1;
}
});
draggables.on('drop', function (el, source, target, sibling) {
// console.log("element", el); // The draggable puzzle piece
// console.log("source", source); // The missing puzzle piece div is the source for some reason
// console.log("target", target); // The container holding the puzzle piece is the target for some reason
// Transition to the next page
const isLandingPagePuzzlePiece = source.classList.contains('landing-page-puzzlepiece');
if (isLandingPagePuzzlePiece ||
source.classList.contains('puzzle-piece') ||
source.classList.contains('puzzle-piece-big') &&
source.classList.contains('missing')
) {
// Go to the next page and disable the next button on the following page
setTimeout(() => {
transitionToNextPage();
target.parentElement.querySelector('.next-button')?.removeAttribute('disabled');
}, 400);
// Hide the puzzle piece and show the BEGIN button instead once the user navigates past the landing page
setTimeout(() => {
if (isLandingPagePuzzlePiece) {
document.getElementById('landing-page-nextback-container').style.opacity = 1;
document.getElementById('landing-page-puzzle-grid').style.display = 'none';
}
}, 1500);
}
});
/**
* Give a visual hint to the user by animating puzzle pieces when the user is
* hovering over a missing puzzle piece element.
*/
const puzzlePuzzleEls = Array.from(document.querySelectorAll('.puzzle-piece, .puzzle-piece-big'));
const missingPuzzleEls = Array.from(document.querySelectorAll('.missing'));
for (const missingPuzzleEl of missingPuzzleEls) {
missingPuzzleEl.addEventListener('mouseenter', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '2s';
puzzleEl.style.animationName = 'pulse';
}
}
});
missingPuzzleEl.addEventListener('mouseleave', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '6s';
puzzleEl.style.animationName = 'bounce';
}
}
});
}
/**
* Control transitions between pages. The current page is encoded in the URL as an id
* e.g. https://esse-dev.github.io/a-web-monetization-story#page-0
*/
const pageEls = document.getElementsByClassName('page-container');
let currentPageNum = -1;
let pageElIdCounter = 0;
for (const pageEl of pageEls) {
pageEl.id = `page-${pageElIdCounter}`;
pageElIdCounter++;
}
// The 'popstate' event is triggered when the user navigates toa new URL within the current website.
// For instance, this happens when the user presses the browser back button.
window.addEventListener('popstate', showPageInURL);
// Once website is loaded show current page (to prevent images and fonts from showing up late)
document.fonts.ready.then(showPageInURL);
// Page was getting scrolled halfway between pages when resizing, transitionToPageInURL should
// handle scrolling back to the proper position once the resize happens.
window.addEventListener('resize', () => showPage(currentPageNum));
function showPageInURL() {
// Get the page number encoded in the URL. If there is no page in the URL, default to 0.
const pageInUrl = parseInt(window.location.hash.replace('#page-', '')) || 0;
if (pageInUrl !== currentPageNum) {
const isGoingToPreviousPage = pageInUrl === currentPageNum - 1;
showPage(pageInUrl, isGoingToPreviousPage);
}
}
function transitionToPage(nextPageNum, reverseAnimation = false) {
const currentPageEl = pageEls[currentPageNum];
let delay = 0;
// Get all animated elements in the current page element.
const animatedEls =
currentPageEl.querySelectorAll('.animate-in, .animate-out');
const animatedOutEls =
currentPageEl.querySelectorAll('.animate-out');
const animatedInEls =
currentPageEl.querySelectorAll('.animate-in');
// Hide all animated elements in the current page.
// setTimeout is used so .animate-in elements are hidden AFTER transitioning to the next page.
setTimeout(() => {
for (const animatedEl of Array.from(animatedEls).reverse()) {
const elIsAnimatingOut =
(animatedEl.classList.contains('animate-out') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-in') && reverseAnimation);
if (!elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
setTimeout(() => {
animatedEl.style.opacity = 0;
}, 800);
}
if (elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
animatedEl.style.opacity = 0;
delay += 0.1;
}
}
}, 10);
// Once all elements in the current page are hidden, show the next page.
const isPageAnimatingOut = (animatedOutEls.length > 0 && !reverseAnimation) ||
(animatedInEls.length > 0 && reverseAnimation);
const totalPageAnimateOutTime = delay*100 + 200;
setTimeout(() => {
window.location.href = '#page-' + nextPageNum;
// Showing the next page is handled by the popstate listener
}, isPageAnimatingOut ? totalPageAnimateOutTime + 400 : 20);
}
const navDotEls = Array.from(document.getElementsByClassName('nav-dot'));
for (let i = 0; i < navDotEls.length; i++) {
const navDotEl = navDotEls[i];
navDotEl.addEventListener('click', () => {
transitionToPage(i, true);
});
}
const MAX_PAGE_NUM = navDotEls.length - 1;
function transitionToNextPage() {
if (currentPageNum < MAX_PAGE_NUM) {
transitionToPage(currentPageNum + 1);
}
}
function | () {
if (currentPageNum > 0) {
transitionToPage(currentPageNum - 1, true);
}
}
// showPage is used by transitionToPage and transitionToPageInURL
// not recommended to be called manually!
function showPage(nextPageNum, reverseAnimation = false) {
currentPageNum = nextPageNum;
const nextPageEl = pageEls[nextPageNum];
nextPageEl.scrollIntoView();
let delay = 0;
const animatedEls = nextPageEl.querySelectorAll('.animate-in, .animate-out');
for (const animatedEl of animatedEls) {
const elIsAnimatingIn =
(animatedEl.classList.contains('animate-in') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-out') && reverseAnimation);
if (!elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
}
if (elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
}
animatedEl.style.opacity = 1;
delay += 0.1;
}
const navEl = document.getElementsByClassName('nav-dot-container')[0];
// Hide the navigation element on the landing page and the thank you page
if (currentPageNum === 0 || currentPageNum === MAX_PAGE_NUM) {
navEl.style.opacity = 0;
} else {
navEl.style.opacity = 1;
}
const navDogEl = document.getElementById('nav-dog');
const navDotWidth = navEl.offsetWidth / navDotEls.length;
const navDogElOffset = 19; // higher number = move further left
navDogEl.style.left = (navDotWidth/2 + navDotWidth*currentPageNum - navDogElOffset) + 'px';
let navDotCounter = 0;
for (const navDotEl of navDotEls) {
if (navDotCounter <= currentPageNum) {
navDotEl.removeAttribute('disabled');
}
navDotCounter++;
}
if (!nextPageEl.querySelector('.page-light-background')) {
document.getElementById('footer').classList.add('dark-footer');
document.getElementById('footer').classList.remove('light-footer');
} else {
document.getElementById('footer').classList.add('light-footer');
document.getElementById('footer').classList.remove('dark | transitionToPreviousPage | identifier_name |
index.js | '),
document.getElementById('step-4-part-1-puzzlespot'),
document.getElementById('step-4-part-2-puzzlepiece-container'),
document.getElementById('step-4-part-2-puzzlespot')
]);
draggables.on('drag', function(el, source) {
// Hide the "drag to start" when the BEGIN puzzle piece is dragged
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 0;
}
});
draggables.on('cancel', function (el, container, source) {
// Show the "drag to start" when the BEGIN puzzle piece is dragged and dropped outside a dragula container
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 1;
}
});
draggables.on('drop', function (el, source, target, sibling) {
// console.log("element", el); // The draggable puzzle piece
// console.log("source", source); // The missing puzzle piece div is the source for some reason
// console.log("target", target); // The container holding the puzzle piece is the target for some reason
// Transition to the next page
const isLandingPagePuzzlePiece = source.classList.contains('landing-page-puzzlepiece');
if (isLandingPagePuzzlePiece ||
source.classList.contains('puzzle-piece') ||
source.classList.contains('puzzle-piece-big') &&
source.classList.contains('missing')
) {
// Go to the next page and disable the next button on the following page
setTimeout(() => {
transitionToNextPage();
target.parentElement.querySelector('.next-button')?.removeAttribute('disabled');
}, 400);
// Hide the puzzle piece and show the BEGIN button instead once the user navigates past the landing page
setTimeout(() => {
if (isLandingPagePuzzlePiece) {
document.getElementById('landing-page-nextback-container').style.opacity = 1;
document.getElementById('landing-page-puzzle-grid').style.display = 'none';
}
}, 1500);
}
});
/**
* Give a visual hint to the user by animating puzzle pieces when the user is
* hovering over a missing puzzle piece element.
*/
const puzzlePuzzleEls = Array.from(document.querySelectorAll('.puzzle-piece, .puzzle-piece-big'));
const missingPuzzleEls = Array.from(document.querySelectorAll('.missing'));
for (const missingPuzzleEl of missingPuzzleEls) {
missingPuzzleEl.addEventListener('mouseenter', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '2s';
puzzleEl.style.animationName = 'pulse';
}
}
});
missingPuzzleEl.addEventListener('mouseleave', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '6s';
puzzleEl.style.animationName = 'bounce';
}
}
});
}
/**
* Control transitions between pages. The current page is encoded in the URL as an id
* e.g. https://esse-dev.github.io/a-web-monetization-story#page-0
*/
const pageEls = document.getElementsByClassName('page-container');
let currentPageNum = -1;
| pageEl.id = `page-${pageElIdCounter}`;
pageElIdCounter++;
}
// The 'popstate' event is triggered when the user navigates toa new URL within the current website.
// For instance, this happens when the user presses the browser back button.
window.addEventListener('popstate', showPageInURL);
// Once website is loaded show current page (to prevent images and fonts from showing up late)
document.fonts.ready.then(showPageInURL);
// Page was getting scrolled halfway between pages when resizing, transitionToPageInURL should
// handle scrolling back to the proper position once the resize happens.
window.addEventListener('resize', () => showPage(currentPageNum));
function showPageInURL() {
// Get the page number encoded in the URL. If there is no page in the URL, default to 0.
const pageInUrl = parseInt(window.location.hash.replace('#page-', '')) || 0;
if (pageInUrl !== currentPageNum) {
const isGoingToPreviousPage = pageInUrl === currentPageNum - 1;
showPage(pageInUrl, isGoingToPreviousPage);
}
}
function transitionToPage(nextPageNum, reverseAnimation = false) {
const currentPageEl = pageEls[currentPageNum];
let delay = 0;
// Get all animated elements in the current page element.
const animatedEls =
currentPageEl.querySelectorAll('.animate-in, .animate-out');
const animatedOutEls =
currentPageEl.querySelectorAll('.animate-out');
const animatedInEls =
currentPageEl.querySelectorAll('.animate-in');
// Hide all animated elements in the current page.
// setTimeout is used so .animate-in elements are hidden AFTER transitioning to the next page.
setTimeout(() => {
for (const animatedEl of Array.from(animatedEls).reverse()) {
const elIsAnimatingOut =
(animatedEl.classList.contains('animate-out') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-in') && reverseAnimation);
if (!elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
setTimeout(() => {
animatedEl.style.opacity = 0;
}, 800);
}
if (elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
animatedEl.style.opacity = 0;
delay += 0.1;
}
}
}, 10);
// Once all elements in the current page are hidden, show the next page.
const isPageAnimatingOut = (animatedOutEls.length > 0 && !reverseAnimation) ||
(animatedInEls.length > 0 && reverseAnimation);
const totalPageAnimateOutTime = delay*100 + 200;
setTimeout(() => {
window.location.href = '#page-' + nextPageNum;
// Showing the next page is handled by the popstate listener
}, isPageAnimatingOut ? totalPageAnimateOutTime + 400 : 20);
}
const navDotEls = Array.from(document.getElementsByClassName('nav-dot'));
for (let i = 0; i < navDotEls.length; i++) {
const navDotEl = navDotEls[i];
navDotEl.addEventListener('click', () => {
transitionToPage(i, true);
});
}
const MAX_PAGE_NUM = navDotEls.length - 1;
function transitionToNextPage() {
if (currentPageNum < MAX_PAGE_NUM) {
transitionToPage(currentPageNum + 1);
}
}
function transitionToPreviousPage() {
if (currentPageNum > 0) {
transitionToPage(currentPageNum - 1, true);
}
}
// showPage is used by transitionToPage and transitionToPageInURL
// not recommended to be called manually!
function showPage(nextPageNum, reverseAnimation = false) {
currentPageNum = nextPageNum;
const nextPageEl = pageEls[nextPageNum];
nextPageEl.scrollIntoView();
let delay = 0;
const animatedEls = nextPageEl.querySelectorAll('.animate-in, .animate-out');
for (const animatedEl of animatedEls) {
const elIsAnimatingIn =
(animatedEl.classList.contains('animate-in') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-out') && reverseAnimation);
if (!elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
}
if (elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
}
animatedEl.style.opacity = 1;
delay += 0.1;
}
const navEl = document.getElementsByClassName('nav-dot-container')[0];
// Hide the navigation element on the landing page and the thank you page
if (currentPageNum === 0 || currentPageNum === MAX_PAGE_NUM) {
navEl.style.opacity = 0;
} else {
navEl.style.opacity = 1;
}
const navDogEl = document.getElementById('nav-dog');
const navDotWidth = navEl.offsetWidth / navDotEls.length;
const navDogElOffset = 19; // higher number = move further left
navDogEl.style.left = (navDotWidth/2 + navDotWidth*currentPageNum - navDogElOffset) + 'px';
let navDotCounter = 0;
for (const navDotEl of navDotEls) {
if (navDotCounter <= currentPageNum) {
navDotEl.removeAttribute('disabled');
}
navDotCounter++;
}
if (!nextPageEl.querySelector('.page-light-background')) {
document.getElementById('footer').classList.add('dark-footer');
document.getElementById('footer').classList.remove('light-footer');
} else {
document.getElementById('footer').classList.add('light-footer');
document.getElementById('footer').classList.remove('dark-footer | let pageElIdCounter = 0;
for (const pageEl of pageEls) { | random_line_split |
index.js | '),
document.getElementById('step-4-part-1-puzzlespot'),
document.getElementById('step-4-part-2-puzzlepiece-container'),
document.getElementById('step-4-part-2-puzzlespot')
]);
draggables.on('drag', function(el, source) {
// Hide the "drag to start" when the BEGIN puzzle piece is dragged
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 0;
}
});
draggables.on('cancel', function (el, container, source) {
// Show the "drag to start" when the BEGIN puzzle piece is dragged and dropped outside a dragula container
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 1;
}
});
draggables.on('drop', function (el, source, target, sibling) {
// console.log("element", el); // The draggable puzzle piece
// console.log("source", source); // The missing puzzle piece div is the source for some reason
// console.log("target", target); // The container holding the puzzle piece is the target for some reason
// Transition to the next page
const isLandingPagePuzzlePiece = source.classList.contains('landing-page-puzzlepiece');
if (isLandingPagePuzzlePiece ||
source.classList.contains('puzzle-piece') ||
source.classList.contains('puzzle-piece-big') &&
source.classList.contains('missing')
) {
// Go to the next page and disable the next button on the following page
setTimeout(() => {
transitionToNextPage();
target.parentElement.querySelector('.next-button')?.removeAttribute('disabled');
}, 400);
// Hide the puzzle piece and show the BEGIN button instead once the user navigates past the landing page
setTimeout(() => {
if (isLandingPagePuzzlePiece) {
document.getElementById('landing-page-nextback-container').style.opacity = 1;
document.getElementById('landing-page-puzzle-grid').style.display = 'none';
}
}, 1500);
}
});
/**
* Give a visual hint to the user by animating puzzle pieces when the user is
* hovering over a missing puzzle piece element.
*/
const puzzlePuzzleEls = Array.from(document.querySelectorAll('.puzzle-piece, .puzzle-piece-big'));
const missingPuzzleEls = Array.from(document.querySelectorAll('.missing'));
for (const missingPuzzleEl of missingPuzzleEls) {
missingPuzzleEl.addEventListener('mouseenter', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '2s';
puzzleEl.style.animationName = 'pulse';
}
}
});
missingPuzzleEl.addEventListener('mouseleave', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '6s';
puzzleEl.style.animationName = 'bounce';
}
}
});
}
/**
* Control transitions between pages. The current page is encoded in the URL as an id
* e.g. https://esse-dev.github.io/a-web-monetization-story#page-0
*/
const pageEls = document.getElementsByClassName('page-container');
let currentPageNum = -1;
let pageElIdCounter = 0;
for (const pageEl of pageEls) {
pageEl.id = `page-${pageElIdCounter}`;
pageElIdCounter++;
}
// The 'popstate' event is triggered when the user navigates toa new URL within the current website.
// For instance, this happens when the user presses the browser back button.
window.addEventListener('popstate', showPageInURL);
// Once website is loaded show current page (to prevent images and fonts from showing up late)
document.fonts.ready.then(showPageInURL);
// Page was getting scrolled halfway between pages when resizing, transitionToPageInURL should
// handle scrolling back to the proper position once the resize happens.
window.addEventListener('resize', () => showPage(currentPageNum));
function showPageInURL() {
// Get the page number encoded in the URL. If there is no page in the URL, default to 0.
const pageInUrl = parseInt(window.location.hash.replace('#page-', '')) || 0;
if (pageInUrl !== currentPageNum) {
const isGoingToPreviousPage = pageInUrl === currentPageNum - 1;
showPage(pageInUrl, isGoingToPreviousPage);
}
}
function transitionToPage(nextPageNum, reverseAnimation = false) {
const currentPageEl = pageEls[currentPageNum];
let delay = 0;
// Get all animated elements in the current page element.
const animatedEls =
currentPageEl.querySelectorAll('.animate-in, .animate-out');
const animatedOutEls =
currentPageEl.querySelectorAll('.animate-out');
const animatedInEls =
currentPageEl.querySelectorAll('.animate-in');
// Hide all animated elements in the current page.
// setTimeout is used so .animate-in elements are hidden AFTER transitioning to the next page.
setTimeout(() => {
for (const animatedEl of Array.from(animatedEls).reverse()) {
const elIsAnimatingOut =
(animatedEl.classList.contains('animate-out') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-in') && reverseAnimation);
if (!elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
setTimeout(() => {
animatedEl.style.opacity = 0;
}, 800);
}
if (elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
animatedEl.style.opacity = 0;
delay += 0.1;
}
}
}, 10);
// Once all elements in the current page are hidden, show the next page.
const isPageAnimatingOut = (animatedOutEls.length > 0 && !reverseAnimation) ||
(animatedInEls.length > 0 && reverseAnimation);
const totalPageAnimateOutTime = delay*100 + 200;
setTimeout(() => {
window.location.href = '#page-' + nextPageNum;
// Showing the next page is handled by the popstate listener
}, isPageAnimatingOut ? totalPageAnimateOutTime + 400 : 20);
}
const navDotEls = Array.from(document.getElementsByClassName('nav-dot'));
for (let i = 0; i < navDotEls.length; i++) {
const navDotEl = navDotEls[i];
navDotEl.addEventListener('click', () => {
transitionToPage(i, true);
});
}
const MAX_PAGE_NUM = navDotEls.length - 1;
function transitionToNextPage() {
if (currentPageNum < MAX_PAGE_NUM) {
transitionToPage(currentPageNum + 1);
}
}
function transitionToPreviousPage() {
if (currentPageNum > 0) {
transitionToPage(currentPageNum - 1, true);
}
}
// showPage is used by transitionToPage and transitionToPageInURL
// not recommended to be called manually!
function showPage(nextPageNum, reverseAnimation = false) | animatedEl.style.transitionDelay = `${delay}s`;
}
animatedEl.style.opacity = 1;
delay += 0.1;
}
const navEl = document.getElementsByClassName('nav-dot-container')[0];
// Hide the navigation element on the landing page and the thank you page
if (currentPageNum === 0 || currentPageNum === MAX_PAGE_NUM) {
navEl.style.opacity = 0;
} else {
navEl.style.opacity = 1;
}
const navDogEl = document.getElementById('nav-dog');
const navDotWidth = navEl.offsetWidth / navDotEls.length;
const navDogElOffset = 19; // higher number = move further left
navDogEl.style.left = (navDotWidth/2 + navDotWidth*currentPageNum - navDogElOffset) + 'px';
let navDotCounter = 0;
for (const navDotEl of navDotEls) {
if (navDotCounter <= currentPageNum) {
navDotEl.removeAttribute('disabled');
}
navDotCounter++;
}
if (!nextPageEl.querySelector('.page-light-background')) {
document.getElementById('footer').classList.add('dark-footer');
document.getElementById('footer').classList.remove('light-footer');
} else {
document.getElementById('footer').classList.add('light-footer');
document.getElementById('footer').classList.remove('dark | {
currentPageNum = nextPageNum;
const nextPageEl = pageEls[nextPageNum];
nextPageEl.scrollIntoView();
let delay = 0;
const animatedEls = nextPageEl.querySelectorAll('.animate-in, .animate-out');
for (const animatedEl of animatedEls) {
const elIsAnimatingIn =
(animatedEl.classList.contains('animate-in') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-out') && reverseAnimation);
if (!elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
}
if (elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0.2s'; | identifier_body |
index.js | '),
document.getElementById('step-4-part-1-puzzlespot'),
document.getElementById('step-4-part-2-puzzlepiece-container'),
document.getElementById('step-4-part-2-puzzlespot')
]);
draggables.on('drag', function(el, source) {
// Hide the "drag to start" when the BEGIN puzzle piece is dragged
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 0;
}
});
draggables.on('cancel', function (el, container, source) {
// Show the "drag to start" when the BEGIN puzzle piece is dragged and dropped outside a dragula container
if (source.id === landing_page_puzzlepiece_container) {
document.getElementById(drag_to_start_story_div).style.opacity = 1;
}
});
draggables.on('drop', function (el, source, target, sibling) {
// console.log("element", el); // The draggable puzzle piece
// console.log("source", source); // The missing puzzle piece div is the source for some reason
// console.log("target", target); // The container holding the puzzle piece is the target for some reason
// Transition to the next page
const isLandingPagePuzzlePiece = source.classList.contains('landing-page-puzzlepiece');
if (isLandingPagePuzzlePiece ||
source.classList.contains('puzzle-piece') ||
source.classList.contains('puzzle-piece-big') &&
source.classList.contains('missing')
) {
// Go to the next page and disable the next button on the following page
setTimeout(() => {
transitionToNextPage();
target.parentElement.querySelector('.next-button')?.removeAttribute('disabled');
}, 400);
// Hide the puzzle piece and show the BEGIN button instead once the user navigates past the landing page
setTimeout(() => {
if (isLandingPagePuzzlePiece) {
document.getElementById('landing-page-nextback-container').style.opacity = 1;
document.getElementById('landing-page-puzzle-grid').style.display = 'none';
}
}, 1500);
}
});
/**
* Give a visual hint to the user by animating puzzle pieces when the user is
* hovering over a missing puzzle piece element.
*/
const puzzlePuzzleEls = Array.from(document.querySelectorAll('.puzzle-piece, .puzzle-piece-big'));
const missingPuzzleEls = Array.from(document.querySelectorAll('.missing'));
for (const missingPuzzleEl of missingPuzzleEls) {
missingPuzzleEl.addEventListener('mouseenter', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '2s';
puzzleEl.style.animationName = 'pulse';
}
}
});
missingPuzzleEl.addEventListener('mouseleave', () => {
for (const puzzleEl of puzzlePuzzleEls) {
if (!puzzleEl.classList.contains('missing')) {
puzzleEl.style.animationDuration = '6s';
puzzleEl.style.animationName = 'bounce';
}
}
});
}
/**
* Control transitions between pages. The current page is encoded in the URL as an id
* e.g. https://esse-dev.github.io/a-web-monetization-story#page-0
*/
const pageEls = document.getElementsByClassName('page-container');
let currentPageNum = -1;
let pageElIdCounter = 0;
for (const pageEl of pageEls) {
pageEl.id = `page-${pageElIdCounter}`;
pageElIdCounter++;
}
// The 'popstate' event is triggered when the user navigates toa new URL within the current website.
// For instance, this happens when the user presses the browser back button.
window.addEventListener('popstate', showPageInURL);
// Once website is loaded show current page (to prevent images and fonts from showing up late)
document.fonts.ready.then(showPageInURL);
// Page was getting scrolled halfway between pages when resizing, transitionToPageInURL should
// handle scrolling back to the proper position once the resize happens.
window.addEventListener('resize', () => showPage(currentPageNum));
function showPageInURL() {
// Get the page number encoded in the URL. If there is no page in the URL, default to 0.
const pageInUrl = parseInt(window.location.hash.replace('#page-', '')) || 0;
if (pageInUrl !== currentPageNum) {
const isGoingToPreviousPage = pageInUrl === currentPageNum - 1;
showPage(pageInUrl, isGoingToPreviousPage);
}
}
function transitionToPage(nextPageNum, reverseAnimation = false) {
const currentPageEl = pageEls[currentPageNum];
let delay = 0;
// Get all animated elements in the current page element.
const animatedEls =
currentPageEl.querySelectorAll('.animate-in, .animate-out');
const animatedOutEls =
currentPageEl.querySelectorAll('.animate-out');
const animatedInEls =
currentPageEl.querySelectorAll('.animate-in');
// Hide all animated elements in the current page.
// setTimeout is used so .animate-in elements are hidden AFTER transitioning to the next page.
setTimeout(() => {
for (const animatedEl of Array.from(animatedEls).reverse()) {
const elIsAnimatingOut =
(animatedEl.classList.contains('animate-out') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-in') && reverseAnimation);
if (!elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
setTimeout(() => {
animatedEl.style.opacity = 0;
}, 800);
}
if (elIsAnimatingOut) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
animatedEl.style.opacity = 0;
delay += 0.1;
}
}
}, 10);
// Once all elements in the current page are hidden, show the next page.
const isPageAnimatingOut = (animatedOutEls.length > 0 && !reverseAnimation) ||
(animatedInEls.length > 0 && reverseAnimation);
const totalPageAnimateOutTime = delay*100 + 200;
setTimeout(() => {
window.location.href = '#page-' + nextPageNum;
// Showing the next page is handled by the popstate listener
}, isPageAnimatingOut ? totalPageAnimateOutTime + 400 : 20);
}
const navDotEls = Array.from(document.getElementsByClassName('nav-dot'));
for (let i = 0; i < navDotEls.length; i++) {
const navDotEl = navDotEls[i];
navDotEl.addEventListener('click', () => {
transitionToPage(i, true);
});
}
const MAX_PAGE_NUM = navDotEls.length - 1;
function transitionToNextPage() {
if (currentPageNum < MAX_PAGE_NUM) {
transitionToPage(currentPageNum + 1);
}
}
function transitionToPreviousPage() {
if (currentPageNum > 0) {
transitionToPage(currentPageNum - 1, true);
}
}
// showPage is used by transitionToPage and transitionToPageInURL
// not recommended to be called manually!
function showPage(nextPageNum, reverseAnimation = false) {
currentPageNum = nextPageNum;
const nextPageEl = pageEls[nextPageNum];
nextPageEl.scrollIntoView();
let delay = 0;
const animatedEls = nextPageEl.querySelectorAll('.animate-in, .animate-out');
for (const animatedEl of animatedEls) {
const elIsAnimatingIn =
(animatedEl.classList.contains('animate-in') && !reverseAnimation) ||
(animatedEl.classList.contains('animate-out') && reverseAnimation);
if (!elIsAnimatingIn) |
if (elIsAnimatingIn) {
animatedEl.style.transitionDuration = '0.2s';
animatedEl.style.transitionDelay = `${delay}s`;
}
animatedEl.style.opacity = 1;
delay += 0.1;
}
const navEl = document.getElementsByClassName('nav-dot-container')[0];
// Hide the navigation element on the landing page and the thank you page
if (currentPageNum === 0 || currentPageNum === MAX_PAGE_NUM) {
navEl.style.opacity = 0;
} else {
navEl.style.opacity = 1;
}
const navDogEl = document.getElementById('nav-dog');
const navDotWidth = navEl.offsetWidth / navDotEls.length;
const navDogElOffset = 19; // higher number = move further left
navDogEl.style.left = (navDotWidth/2 + navDotWidth*currentPageNum - navDogElOffset) + 'px';
let navDotCounter = 0;
for (const navDotEl of navDotEls) {
if (navDotCounter <= currentPageNum) {
navDotEl.removeAttribute('disabled');
}
navDotCounter++;
}
if (!nextPageEl.querySelector('.page-light-background')) {
document.getElementById('footer').classList.add('dark-footer');
document.getElementById('footer').classList.remove('light-footer');
} else {
document.getElementById('footer').classList.add('light-footer');
document.getElementById('footer').classList.remove(' | {
animatedEl.style.transitionDuration = '0s';
animatedEl.style.transitionDelay = '0s';
} | conditional_block |
lib.rs | *custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if !std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
}
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> { ... }
/// fn deserialize(buffer: &[u8]) -> Self { ... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn drop_value<T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| { | // Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to | random_line_split |
|
lib.rs | for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if !std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
}
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> { ... }
/// fn deserialize(buffer: &[u8]) -> Self { ... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn | drop_value | identifier_name |
|
lib.rs | }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn drop_value<T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| {
// Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to
// a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer
// would be aligned and valid.
let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>;
let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() };
// The `Rc` will be dropped here, and its reference count will decrease
// by one (possibly freeing the referenced value).
// Safety: Since the OCaml runtime will only invoke the finalizer for a
// value which will never again be used, it is safe to use
// `drop_in_place` (i.e., our finalizer will only be invoked once, so we
// won't cause a double-drop).
unsafe {
std::ptr::drop_in_place(&mut custom_block.1);
}
0
});
}
/// Helper function for serialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn serialize_value<T: CamlSerialize>(
value: usize,
bsize_32: *mut usize,
bsize_64: *mut usize,
) {
let _: usize = catch_unwind(|| {
// Safety: Only called by the OCaml runtime (we don't expose a means of
// invoking this function from Rust), which provides some OCaml
// CUSTOM_TAG block as the value.
let value = unsafe { Value::from_bits(value) };
// Only called by the OCaml runtime, when serializing
// a Custom-object managed by the OCaml GC.
let rc = rc_from_value::<T>(value).unwrap();
let bytes: Vec<u8> = rc.serialize();
let bytes_ptr = bytes.as_ptr();
// Safety: As above, we don't expose a means of invoking this function
// from Rust--it can only be invoked by the OCaml runtime while
// serializing a value. It is safe to invoke OCaml serialization
// functions in this context.
unsafe {
let len = bytes.len();
caml_serialize_int_8(len.try_into().unwrap());
caml_serialize_block_1(bytes_ptr, len);
// The size taken up in the data-part of the custom block.
*bsize_32 = std::mem::size_of::<u32>();
*bsize_64 = std::mem::size_of::<u64>();
}
0
});
}
/// Helper function for deserialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize {
catch_unwind(|| {
// Get the serialized bytes from the input channel.
let bytes = unsafe {
// Safety: We don't expose a means of invoking this function from
// Rust--`deserialize_value` can only be invoked by the OCaml
// runtime while deserializing a custom block value. It is safe to
// invoke OCaml deserialization functions in this context.
let len: usize = caml_deserialize_sint_8().try_into().unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(len);
// Safety: len <= capacity. The elements aren't initialized at this
// time, but we trust that caml_deserialize_block_1 will fill `len`
// bytes of the buffer.
#[allow(clippy::uninit_vec)]
buf.set_len(len);
// Safety: As above, `deserialize_value` can only be invoked by the
// OCaml runtime during custom block deserialization.
caml_deserialize_block_1(buf.as_mut_ptr(), len);
buf
};
// Actually deserialize those bytes into a T.
let val: T = CamlSerialize::deserialize(&bytes);
// Safety: The OCaml runtime will give us a data buffer which is
// usize-aligned and valid for reads and writes of bsize_32 or bsize_64
// (as provided by `serialize_value`, above) bytes (depending on system
// architecture). This is sufficient for `Rc<T>` (which has the size and
// alignment of usize).
let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>;
let data = unsafe { data_ptr.as_mut().unwrap() };
*data = MaybeUninit::new(Rc::new(val));
// Return the size of the value we wrote to our output pointer. The
// OCaml runtime will verify that it matches the expected
// bsize_32/bsize_64 written by the serializer.
std::mem::size_of_val(data)
})
}
#[cfg(test)]
mod test {
use std::mem::*;
use super::*;
#[test]
fn custom_block_ocamlrep_size() {
assert_eq!(
size_of::<CustomBlockOcamlRep<u8>>(),
2 * size_of::<Value<'_>>()
);
}
#[test]
fn custom_block_ocamlrep_align() | {
assert_eq!(
align_of::<CustomBlockOcamlRep<u8>>(),
align_of::<Value<'_>>()
);
} | identifier_body |
|
lib.rs | 0.get() - 1);
/// counter
/// }
///
/// fn counter_read(counter: Custom<Counter>) -> isize {
/// counter.0.get()
/// }
/// }
/// ```
///
/// From OCaml:
///
/// ```ocaml
/// type counter; (* abstract type *)
///
/// external counter_new : unit -> counter = "counter_new"
/// external counter_inc: counter -> unit = "counter_inc"
/// external counter_read : counter -> isize = "counter_read"
///
/// let () =
/// let cnt = counter_new () in (* will be dropped on GC finalization *)
/// assert (counter_read cnt == 0);
/// counter_inc cnt;
/// assert (counter_read cnt == 1)
/// ```
pub struct Custom<T: CamlSerialize>(Rc<T>);
impl<T: CamlSerialize> Custom<T> {
/// Create a new `ToCustom` wrapper by taking ownership of the value.
pub fn from(x: T) -> Self {
Self::new(Rc::new(x))
}
/// Create a new `ToCustom` directly from an `Rc`'d value.
pub fn new(x: Rc<T>) -> Self {
Self(x)
}
/// Get a reference to the inner `Rc`
pub fn inner(&self) -> &Rc<T> {
&self.0
}
}
impl<T: CamlSerialize> Deref for Custom<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A custom block has two words: a pointer to the CustomOperations struct,
/// and a pointer the the value. Our values are ref-counted, but an Rc pointer
/// is just pointer-sized.
#[repr(C)]
struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>);
const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>();
const CUSTOM_BLOCK_SIZE_IN_WORDS: usize =
CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>();
impl<T: CamlSerialize> ToOcamlRep for Custom<T> {
fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> {
let ops: &'static CustomOperations = <T as CamlSerialize>::operations();
let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG);
// Safety: we don't call any method on `alloc` after this method.
let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) };
// Safety: `alloc` guarantees that the `block_ptr` returned by
// `block_ptr_mut` is aligend to `align_of::<Value>()` and valid
// for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size
// `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its
// alignment is equal to `align_of::<Value>()`, and no other
// reference to our newly-allocated block can exist, it's safe for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if !std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) |
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> { ... }
/// fn deserialize(buffer: &[u8]) -> Self { ... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8 | {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
} | conditional_block |
run.py | )
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Healer)
gc.queue_research(bc.UnitType.Mage)
#method to move any unit
def move(unit):
#API returns any possible moves in list form
possible_directions = list(bc.Direction)
choices = []
#find only the moves that are valid moves
for direct in possible_directions:
if gc.can_move(unit.id, direct):
choices.append(direct)
#if not choices:
# gc.disintegrate_unit(unit.id)
# return
if choices:
dir = random.choice(choices)
#if unit can move and is ready to move, randomly move them to a new position
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, dir):
gc.move_robot(unit.id, dir)
#Try to approach a given target destination. (Note: NOT unit)
def approach(unit, location, destination):
global approach_dir
#Find the difference in unit position and reduce it to a simple coordinate pair
#for use with the approach_dir dictionary.
x_diff = destination.x - location.x
y_diff = destination.y - location.y
x_move = x_diff
y_move = y_diff
#if there is an x_diff/y_diff, reduce it to a movement in one direction.
if x_diff != 0:
x_move = x_diff/abs(x_diff)
if y_diff != 0:
y_move = y_diff/abs(y_diff)
#if there is no moves to make, exit.
if (x_move,y_move) == (0,0):
return
#if we can move in an optimal direction, move that direction.
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if cant move in optimal direction, try moving in a similar direction
if x_move == 0:
x_move = random.choice([-1,1])
elif y_move == 0:
y_move = random.choice([-1,1])
else:
if x_diff > y_diff:
y_move = 0
else:
x_move = 0
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if nothing else works, move randomly
move(unit)
#logic for worker units
def workerWork(worker):
global num_workers, total_number_factories, escape, full_vision, fight
#if there is a worker deficit and we have the resources to replicate,
#find a valid direction to do so.
if num_workers < 7 and gc.karbonite() >= 60:
for dir in directions:
if gc.can_replicate(worker.id, dir):
gc.replicate(worker.id, dir)
return #once an action is performed, that worker is done
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.vision_range, enemy_team)
if nearby:
fight = True
full_vision.extend(nearby)
#build on any existing nearby blueprints, or repair damaged structures
nearby = gc.sense_nearby_units(worker.location.map_location(), 2)
for other in nearby:
if gc.can_build(worker.id, other.id):
gc.build(worker.id, other.id)
return
elif other.health < other.max_health and gc.can_repair(worker.id, other.id):
gc.repair(worker.id, other.id)
return
#build factories until game reaches round 150, then focus on making units
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost() and gc.round() < 150:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Factory, dir):
gc.blueprint(worker.id, bc.UnitType.Factory, dir)
return
if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.round() > 550:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Rocket, dir):
gc.blueprint(worker.id, bc.UnitType.Rocket, dir)
return
#find a direction to harvest
for dir in directions:
if gc.can_harvest(worker.id, dir):
gc.harvest(worker.id, dir)
return
#if this part of the code is reached, then the only thing left to do is move
move(worker)
#factoryProduce takes a factory and first to ungarrison any available units
#then attempts to produce a ratio of a 4 rangers to 1 healer
def factoryProduce(factory):
global num_healers, num_rangers, release_units, fight
garrison = unit.structure_garrison()
if num_rangers + num_healers > 15 or fight:
release_units = True
#If a unit is garrisoned, release them in an available spot.
if len(garrison) > 0 and release_units:
for dir in directions:
if gc.can_unload(factory.id, dir):
gc.unload(factory.id, dir)
if gc.round() > 650:
return
#If the factory is available to produce another unit. If we have enough
#healers, produce rangers.
if gc.can_produce_robot(factory.id, bc.UnitType.Ranger):
if num_rangers < num_healers * 4:
gc.produce_robot(factory.id, bc.UnitType.Ranger)
else:
gc.produce_robot(factory.id, bc.UnitType.Healer)
return
#Healer_heal finds units near the healer and attempts to heal them
def Healer_heal(unit):
global enemy_spawn, my_team, full_vision
location = unit.location
#find nearby units on team
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.attack_range(), my_team)
#if can heal, heal
heal = False
if gc.is_heal_ready(unit.id):
lowest_health = unit
for other in nearby:
if other.health < lowest_health.health and other.health < other.max_health:
lowest_health = other
heal = True
if gc.can_heal(unit.id, lowest_health.id) and heal:
gc.heal(unit.id, lowest_health.id)
return
#if no heal targets, walk towards the action
if full_vision:
approach(unit, unit.location.map_location(),full_vision[0].location.map_location())
else:
approach(unit, unit.location.map_location(),enemy_spawn)
#Healer_overcharge finds a nearby unit and restores their ability charge.
def Healer_overcharge(unit):
global my_team
#if we can't overcharge, exit
if not gc.is_overcharge_ready(unit.id):
return
#cannot overcharge if not at research level 3
if bc.ResearchInfo().get_level(bc.UnitType.Healer) < 3:
return
#find our location
location = unit.location
#get all possible targets around, and choose one to heal
possible_targets = sense_nearby_units_by_team(location.map_location(), unit.ability_range(), my_team)
for other in possible_targets:
if gc.can_heal(unit.id, other.id):
gc.heal(unit.id, other.id)
return
#Mars Info Finding and Rocket variables
marsMap = gc.starting_map(bc.Planet.Mars)
marsHeight = marsMap.height
marsWidth = marsMap.width
#add to this variable as rockets are built
safe_locations = []
#method to find a safe location on Mars to land using known Mars info from the API
def | ():
global safe_locations
component_num = 0
for i in range(marsHeight):
for j in range(marsWidth):
if (i, j) not in safe_locations:
temp_loc = bc.MapLocation(bc.Planet.Mars, i, j)
try:
if marsMap.is_passable_terrain_at(temp_loc):
safe_locations.append((i, j)) #this stores the locations that are safe to use later
component_num += 1
except Exception as e:
print(i, j)
print('Error:', e)
#traceback.print_exc()
#now choose a safe location to launch to per rocket
def findRocketLand(rocket):
global safe_locations
#not sure what range to use
temp_range= 5
for t in range(temp_range):
return_value = random.choice(safe_locations) #calls locations from above method
if (t < temp_range -1):
continue
return bc.MapLocation(bc.Planet.Mars, return_value[0], return_value[1])
#returns the map location to land on
#method to launch the rocket
def launch(unit):
garrison = unit.structure_garrison()
free_loc = findRocketLand(unit)
if gc.can_launch_rocket(unit.id, free_loc):
#if can launch, launch
gc.launch_rocket(unit.id, free_loc)
#method to | find_locations_Mars | identifier_name |
run.py | )
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Healer)
gc.queue_research(bc.UnitType.Mage)
#method to move any unit
def move(unit):
#API returns any possible moves in list form
possible_directions = list(bc.Direction)
choices = []
#find only the moves that are valid moves
for direct in possible_directions:
if gc.can_move(unit.id, direct):
choices.append(direct)
#if not choices:
# gc.disintegrate_unit(unit.id)
# return
if choices:
dir = random.choice(choices)
#if unit can move and is ready to move, randomly move them to a new position
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, dir):
gc.move_robot(unit.id, dir)
#Try to approach a given target destination. (Note: NOT unit)
def approach(unit, location, destination):
global approach_dir
#Find the difference in unit position and reduce it to a simple coordinate pair
#for use with the approach_dir dictionary.
x_diff = destination.x - location.x
y_diff = destination.y - location.y
x_move = x_diff
y_move = y_diff
#if there is an x_diff/y_diff, reduce it to a movement in one direction.
if x_diff != 0:
x_move = x_diff/abs(x_diff)
if y_diff != 0:
y_move = y_diff/abs(y_diff)
#if there is no moves to make, exit.
if (x_move,y_move) == (0,0):
return
#if we can move in an optimal direction, move that direction.
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if cant move in optimal direction, try moving in a similar direction
if x_move == 0:
x_move = random.choice([-1,1])
elif y_move == 0:
y_move = random.choice([-1,1])
else:
if x_diff > y_diff:
y_move = 0
else:
x_move = 0
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if nothing else works, move randomly
move(unit)
#logic for worker units
def workerWork(worker):
global num_workers, total_number_factories, escape, full_vision, fight
#if there is a worker deficit and we have the resources to replicate,
#find a valid direction to do so.
if num_workers < 7 and gc.karbonite() >= 60:
for dir in directions:
if gc.can_replicate(worker.id, dir):
gc.replicate(worker.id, dir)
return #once an action is performed, that worker is done
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.vision_range, enemy_team)
if nearby:
fight = True
full_vision.extend(nearby)
#build on any existing nearby blueprints, or repair damaged structures
nearby = gc.sense_nearby_units(worker.location.map_location(), 2)
for other in nearby:
if gc.can_build(worker.id, other.id):
gc.build(worker.id, other.id)
return
elif other.health < other.max_health and gc.can_repair(worker.id, other.id):
gc.repair(worker.id, other.id)
return
#build factories until game reaches round 150, then focus on making units
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost() and gc.round() < 150:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Factory, dir):
gc.blueprint(worker.id, bc.UnitType.Factory, dir)
return
if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.round() > 550:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Rocket, dir):
gc.blueprint(worker.id, bc.UnitType.Rocket, dir)
return
#find a direction to harvest
for dir in directions:
if gc.can_harvest(worker.id, dir):
gc.harvest(worker.id, dir)
return
#if this part of the code is reached, then the only thing left to do is move
move(worker)
#factoryProduce takes a factory and first to ungarrison any available units
#then attempts to produce a ratio of a 4 rangers to 1 healer
def factoryProduce(factory):
| return
#Healer_heal finds units near the healer and attempts to heal them
def Healer_heal(unit):
global enemy_spawn, my_team, full_vision
location = unit.location
#find nearby units on team
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.attack_range(), my_team)
#if can heal, heal
heal = False
if gc.is_heal_ready(unit.id):
lowest_health = unit
for other in nearby:
if other.health < lowest_health.health and other.health < other.max_health:
lowest_health = other
heal = True
if gc.can_heal(unit.id, lowest_health.id) and heal:
gc.heal(unit.id, lowest_health.id)
return
#if no heal targets, walk towards the action
if full_vision:
approach(unit, unit.location.map_location(),full_vision[0].location.map_location())
else:
approach(unit, unit.location.map_location(),enemy_spawn)
#Healer_overcharge finds a nearby unit and restores their ability charge.
def Healer_overcharge(unit):
global my_team
#if we can't overcharge, exit
if not gc.is_overcharge_ready(unit.id):
return
#cannot overcharge if not at research level 3
if bc.ResearchInfo().get_level(bc.UnitType.Healer) < 3:
return
#find our location
location = unit.location
#get all possible targets around, and choose one to heal
possible_targets = sense_nearby_units_by_team(location.map_location(), unit.ability_range(), my_team)
for other in possible_targets:
if gc.can_heal(unit.id, other.id):
gc.heal(unit.id, other.id)
return
#Mars Info Finding and Rocket variables
marsMap = gc.starting_map(bc.Planet.Mars)
marsHeight = marsMap.height
marsWidth = marsMap.width
#add to this variable as rockets are built
safe_locations = []
#method to find a safe location on Mars to land using known Mars info from the API
def find_locations_Mars():
global safe_locations
component_num = 0
for i in range(marsHeight):
for j in range(marsWidth):
if (i, j) not in safe_locations:
temp_loc = bc.MapLocation(bc.Planet.Mars, i, j)
try:
if marsMap.is_passable_terrain_at(temp_loc):
safe_locations.append((i, j)) #this stores the locations that are safe to use later
component_num += 1
except Exception as e:
print(i, j)
print('Error:', e)
#traceback.print_exc()
#now choose a safe location to launch to per rocket
def findRocketLand(rocket):
global safe_locations
#not sure what range to use
temp_range= 5
for t in range(temp_range):
return_value = random.choice(safe_locations) #calls locations from above method
if (t < temp_range -1):
continue
return bc.MapLocation(bc.Planet.Mars, return_value[0], return_value[1])
#returns the map location to land on
#method to launch the rocket
def launch(unit):
garrison = unit.structure_garrison()
free_loc = findRocketLand(unit)
if gc.can_launch_rocket(unit.id, free_loc):
#if can launch, launch
gc.launch_rocket(unit.id, free_loc)
#method to unload | global num_healers, num_rangers, release_units, fight
garrison = unit.structure_garrison()
if num_rangers + num_healers > 15 or fight:
release_units = True
#If a unit is garrisoned, release them in an available spot.
if len(garrison) > 0 and release_units:
for dir in directions:
if gc.can_unload(factory.id, dir):
gc.unload(factory.id, dir)
if gc.round() > 650:
return
#If the factory is available to produce another unit. If we have enough
#healers, produce rangers.
if gc.can_produce_robot(factory.id, bc.UnitType.Ranger):
if num_rangers < num_healers * 4:
gc.produce_robot(factory.id, bc.UnitType.Ranger)
else:
gc.produce_robot(factory.id, bc.UnitType.Healer) | identifier_body |
run.py | #find nearby units on team
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.attack_range(), my_team)
#if can heal, heal
heal = False
if gc.is_heal_ready(unit.id):
lowest_health = unit
for other in nearby:
if other.health < lowest_health.health and other.health < other.max_health:
lowest_health = other
heal = True
if gc.can_heal(unit.id, lowest_health.id) and heal:
gc.heal(unit.id, lowest_health.id)
return
#if no heal targets, walk towards the action
if full_vision:
approach(unit, unit.location.map_location(),full_vision[0].location.map_location())
else:
approach(unit, unit.location.map_location(),enemy_spawn)
#Healer_overcharge finds a nearby unit and restores their ability charge.
def Healer_overcharge(unit):
global my_team
#if we can't overcharge, exit
if not gc.is_overcharge_ready(unit.id):
return
#cannot overcharge if not at research level 3
if bc.ResearchInfo().get_level(bc.UnitType.Healer) < 3:
return
#find our location
location = unit.location
#get all possible targets around, and choose one to heal
possible_targets = sense_nearby_units_by_team(location.map_location(), unit.ability_range(), my_team)
for other in possible_targets:
if gc.can_heal(unit.id, other.id):
gc.heal(unit.id, other.id)
return
#Mars Info Finding and Rocket variables
marsMap = gc.starting_map(bc.Planet.Mars)
marsHeight = marsMap.height
marsWidth = marsMap.width
#add to this variable as rockets are built
safe_locations = []
#method to find a safe location on Mars to land using known Mars info from the API
def find_locations_Mars():
global safe_locations
component_num = 0
for i in range(marsHeight):
for j in range(marsWidth):
if (i, j) not in safe_locations:
temp_loc = bc.MapLocation(bc.Planet.Mars, i, j)
try:
if marsMap.is_passable_terrain_at(temp_loc):
safe_locations.append((i, j)) #this stores the locations that are safe to use later
component_num += 1
except Exception as e:
print(i, j)
print('Error:', e)
#traceback.print_exc()
#now choose a safe location to launch to per rocket
def findRocketLand(rocket):
global safe_locations
#not sure what range to use
temp_range= 5
for t in range(temp_range):
return_value = random.choice(safe_locations) #calls locations from above method
if (t < temp_range -1):
continue
return bc.MapLocation(bc.Planet.Mars, return_value[0], return_value[1])
#returns the map location to land on
#method to launch the rocket
def launch(unit):
garrison = unit.structure_garrison()
free_loc = findRocketLand(unit)
if gc.can_launch_rocket(unit.id, free_loc):
#if can launch, launch
gc.launch_rocket(unit.id, free_loc)
#method to unload and garrison the rocket once built
def unloadRocket(rocket):
garrison = unit.structure_garrison()
if len(garrison) > 0:
for d in directions:
if gc.can_unload(unit.id, d):
gc.unload(unit.id, d)
find_locations_Mars()
#method to move the units towards the rockets
def moveUnitToRocket(unit,nearby):
if not gc.is_move_ready(unit.id):
return
#if ready to move
#get a location of the unit
location = unit.location.map_location()
#use directions from above
best = directions[0]
#set a distance
closest_distance = 100000
#for each of nearby
for x in nearby:
if gc.can_load(x.id, unit.id):
gc.load(x.id,unit.id)
return
next_location = x.location.map_location()
#now the distance is from that location to the next one found
current_distance = location.distance_squared_to(next_location)
#if closer than the set closest distance, go there
if current_distance < closest_distance:
closest_distance = current_distance
best = location.direction_to(next_location)
#moving the units based off current location and if they can move
range_index = 8
for i in range(8):
if directions[i] == best:
range_index = i
break
for i in range(4):
temp_index = (range_index + i + 9)%9
if gc.can_move(unit.id, directions[temp_index]):
gc.move_robot(unit.id, directions[temp_index])
return
temp_index = (range_index - i + 9)%9
if gc.can_move(unit.id, directions[temp_index]):
gc.move_robot(unit.id, directions[temp_index])
return
#rangerAttack takes a unit and who is nearby to attempt an attack.
def rangerAttack(unit, nearby):
global priority_rangers
best_target = 0
targets = [] #list of targets from least valuable to most
#we find the best unit to attack from the priority_rangers dictionary
#and attempt to attack the best unit.
for enemy in nearby:
#if enemy is too close, back away
if gc.is_move_ready(unit.id):
x_diff = unit.location.map_location().x - enemy.location.map_location().x
y_diff = unit.location.map_location().y - enemy.location.map_location().y
#backing away is done by reversing location and destination in approach function
if (x_diff * x_diff) + (y_diff * y_diff) < 20:
approach(unit,enemy.location.map_location(),unit.location.map_location())
if priority_rangers[enemy.unit_type] > best_target:
best_target = priority_rangers[enemy.unit_type]
targets.append(enemy)
#if we can attack, and something is nearby to attack, do so.
if gc.is_attack_ready(unit.id):
for i in range(len(targets)-1,-1,-1):
if gc.can_attack(unit.id, targets[i].id):
gc.attack(unit.id, targets[i].id)
return
if gc.is_move_ready(unit.id):
approach(unit,unit.location.map_location(),targets[-1].location.map_location())
#rangerLogic handles movement when no enemies are nearby, and attack orders.
def rangerLogic(unit):
global enemy_spawn, enemy_team, escape, full_vision
#Make sure only rangers get ranger orders.
if unit.unit_type != bc.UnitType.Ranger:
return
location = unit.location
#if its time to escape, try to run to a rocket
if escape and unit.location.map_location().planet == bc.Planet.Earth:
nearby = gc.sense_nearby_units_by_type(location.map_location(), unit.vision_range, bc.UnitType.Rocket)
if nearby:
moveUnitToRocket(unit,nearby)
return
#sense enemies that are nearby, and then attack them
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.vision_range, enemy_team)
if nearby:
full_vision.extend(nearby)
rangerAttack(unit, nearby)
#if no one is nearby then approach the enemy, if no enemies are seen by anyone, approach enemy spawn
if not nearby and gc.is_move_ready(unit.id):
if full_vision:
approach(unit, unit.location.map_location(),full_vision[0].location.map_location())
else:
approach(unit, unit.location.map_location(),enemy_spawn)
while True:
# We only support Python 3, which means brackets around print()
print('pyround:', gc.round(), 'time left:', gc.get_time_left_ms(), 'ms')
# count how much of each unit we have at the beginning of each turn
num_workers = 0
num_knights = 0
num_healers = 0
num_rangers = 0
num_mages = 0
total_number_factories = 0
total_number_rockets = 0
for unit in gc.my_units():
if unit.unit_type == bc.UnitType.Worker:
num_workers += 1
if unit.unit_type == bc.UnitType.Knight:
num_knights += 1
if unit.unit_type == bc.UnitType.Healer:
num_healers += 1
if unit.unit_type == bc.UnitType.Ranger:
num_rangers += 1
if unit.unit_type == bc.UnitType.Mage:
num_mages += 1
if unit.unit_type == bc.UnitType.Factory:
total_number_factories += 1
if unit.unit_type == bc.UnitType.Rocket:
total_number_rockets += 1
# shared unit vision
full_vision = []
try:
# walk through our units:
for unit in gc.my_units():
location = unit.location
if unit.unit_type == bc.UnitType.Rocket:
escape = True
if unit.location.map_location().planet == bc.Planet.Mars:
| unloadRocket(unit) | conditional_block |
|
run.py | priority_healers = {
bc.UnitType.Worker : 4,
bc.UnitType.Knight : 3,
bc.UnitType.Healer : 2,
bc.UnitType.Ranger : 1,
bc.UnitType.Mage : 2
}
#a directions dictionary used to approach
approach_dir = {
(0,1) : bc.Direction.North,
(1,1) : bc.Direction.Northeast,
(1,0) : bc.Direction.East,
(1,-1) : bc.Direction.Southeast,
(0,-1) : bc.Direction.South,
(-1,-1) : bc.Direction.Southwest,
(-1,0) : bc.Direction.West,
(-1,1) : bc.Direction.Northwest,
}
#sets the my_team and enemy_team variables to know who to attack or help
enemy_team = bc.Team.Red
if my_team == bc.Team.Red:
enemy_team = bc.Team.Blue
#find the start map and original units at start of game
start_map = gc.starting_map(bc.Planet.Earth)
init_units = start_map.initial_units
for i in range(init_units.__len__()):
if init_units.__getitem__(i).team == enemy_team:
enemy_spawn = init_units.__getitem__(i).location.map_location()
#flag for sending units into battle, flipped when an army has begun amassing
release_units = False
#flag for sending units to the rockets for escape
escape = False
fight = False
print("pystarted")
random.seed(datetime.now())
#Research order
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Ranger)
gc.queue_research(bc.UnitType.Healer)
gc.queue_research(bc.UnitType.Healer)
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Rocket)
gc.queue_research(bc.UnitType.Ranger)
gc.queue_research(bc.UnitType.Ranger)
gc.queue_research(bc.UnitType.Rocket)
gc.queue_research(bc.UnitType.Rocket)
gc.queue_research(bc.UnitType.Worker)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Mage)
gc.queue_research(bc.UnitType.Healer)
gc.queue_research(bc.UnitType.Mage)
#method to move any unit
def move(unit):
#API returns any possible moves in list form
possible_directions = list(bc.Direction)
choices = []
#find only the moves that are valid moves
for direct in possible_directions:
if gc.can_move(unit.id, direct):
choices.append(direct)
#if not choices:
# gc.disintegrate_unit(unit.id)
# return
if choices:
dir = random.choice(choices)
#if unit can move and is ready to move, randomly move them to a new position
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, dir):
gc.move_robot(unit.id, dir)
#Try to approach a given target destination. (Note: NOT unit)
def approach(unit, location, destination):
global approach_dir
#Find the difference in unit position and reduce it to a simple coordinate pair
#for use with the approach_dir dictionary.
x_diff = destination.x - location.x
y_diff = destination.y - location.y
x_move = x_diff
y_move = y_diff
#if there is an x_diff/y_diff, reduce it to a movement in one direction.
if x_diff != 0:
x_move = x_diff/abs(x_diff)
if y_diff != 0:
y_move = y_diff/abs(y_diff)
#if there is no moves to make, exit.
if (x_move,y_move) == (0,0):
return
#if we can move in an optimal direction, move that direction.
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if cant move in optimal direction, try moving in a similar direction
if x_move == 0:
x_move = random.choice([-1,1])
elif y_move == 0:
y_move = random.choice([-1,1])
else:
if x_diff > y_diff:
y_move = 0
else:
x_move = 0
dir = approach_dir[(x_move,y_move)]
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,dir):
gc.move_robot(unit.id, dir)
return
#if nothing else works, move randomly
move(unit)
#logic for worker units
def workerWork(worker):
global num_workers, total_number_factories, escape, full_vision, fight
#if there is a worker deficit and we have the resources to replicate,
#find a valid direction to do so.
if num_workers < 7 and gc.karbonite() >= 60:
for dir in directions:
if gc.can_replicate(worker.id, dir):
gc.replicate(worker.id, dir)
return #once an action is performed, that worker is done
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.vision_range, enemy_team)
if nearby:
fight = True
full_vision.extend(nearby)
#build on any existing nearby blueprints, or repair damaged structures
nearby = gc.sense_nearby_units(worker.location.map_location(), 2)
for other in nearby:
if gc.can_build(worker.id, other.id):
gc.build(worker.id, other.id)
return
elif other.health < other.max_health and gc.can_repair(worker.id, other.id):
gc.repair(worker.id, other.id)
return
#build factories until game reaches round 150, then focus on making units
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost() and gc.round() < 150:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Factory, dir):
gc.blueprint(worker.id, bc.UnitType.Factory, dir)
return
if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.round() > 550:
for dir in directions:
if gc.can_blueprint(worker.id, bc.UnitType.Rocket, dir):
gc.blueprint(worker.id, bc.UnitType.Rocket, dir)
return
#find a direction to harvest
for dir in directions:
if gc.can_harvest(worker.id, dir):
gc.harvest(worker.id, dir)
return
#if this part of the code is reached, then the only thing left to do is move
move(worker)
#factoryProduce takes a factory and first to ungarrison any available units
#then attempts to produce a ratio of a 4 rangers to 1 healer
def factoryProduce(factory):
global num_healers, num_rangers, release_units, fight
garrison = unit.structure_garrison()
if num_rangers + num_healers > 15 or fight:
release_units = True
#If a unit is garrisoned, release them in an available spot.
if len(garrison) > 0 and release_units:
for dir in directions:
if gc.can_unload(factory.id, dir):
gc.unload(factory.id, dir)
if gc.round() > 650:
return
#If the factory is available to produce another unit. If we have enough
#healers, produce rangers.
if gc.can_produce_robot(factory.id, bc.UnitType.Ranger):
if num_rangers < num_healers * 4:
gc.produce_robot(factory.id, bc.UnitType.Ranger)
else:
gc.produce_robot(factory.id, bc.UnitType.Healer)
return
#Healer_heal finds units near the healer and attempts to heal them
def Healer_heal(unit):
global enemy_spawn, my_team, full_vision
location = unit.location
#find nearby units on team
nearby = gc.sense_nearby_units_by_team(location.map_location(), unit.attack_range(), my_team)
#if can heal, heal
heal = False
if gc.is_heal_ready(unit.id):
lowest_health = unit
for other in nearby:
if other.health < lowest_health.health and other.health < other.max_health:
lowest_health = other
heal = True
if gc.can_heal(unit.id, lowest_health.id) and heal:
gc.heal(unit.id, lowest_health.id)
return
#if no heal targets, walk towards the action
if full_vision:
approach(unit, unit.location.map_location(),full_vision[0].location.map_location())
else:
approach(unit, unit.location.map_location(),enemy_spawn)
#Healer_overcharge finds a nearby unit and restores their ability charge.
def Healer_overcharge(unit):
global my_team
#if we can't overcharge, exit
if not gc.is_overcharge_ready(unit.id):
return
#cannot overcharge if not at research level 3
if bc.ResearchInfo().get_level(bc.UnitType.Healer) < 3:
return
#find our location
location = unit.location
| random_line_split |
||
test.rs | allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") && !attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be ignored by the stability lint's
/// call to source_map's `is_internal` check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt<'_>, sp: Span) -> Span {
sp.with_ctxt(cx.ctxt)
}
enum HasTestSignature {
Yes,
No(BadTestSignature),
}
#[derive(PartialEq)]
enum BadTestSignature {
NotEvenAFunction,
WrongTypeSignature,
NoArgumentsAllowed,
ShouldPanicOnlyWithNoArgs,
}
/// Creates a function item for use as the main function of a test build.
/// This function will call the `test_runner` as specified by the crate attribute
fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// test::test_main_static(::std::os::args().as_slice(), &[..tests]);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
let test_id = ecx.ident_of("test").gensym();
// test::test_main_static(...)
let mut test_runner = cx.test_runner.clone().unwrap_or(
ecx.path(sp, vec![
test_id, ecx.ident_of("test_main_static")
]));
test_runner.span = sp;
let test_main_path_expr = ecx.expr_path(test_runner);
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![mk_tests_slice(cx)]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, Symbol::intern("main"));
let main_attr = ecx.attribute(sp, main_meta);
// extern crate test as test_gensym
let test_extern_stmt = ecx.stmt_item(sp, ecx.item(sp,
test_id,
vec![],
ast::ItemKind::ExternCrate(Some(Symbol::intern("test")))
));
// pub fn main() { ... }
let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(vec![]));
// If no test runner is provided we need to import the test crate
let main_body = if cx.test_runner.is_none() {
ecx.block(sp, vec![test_extern_stmt, call_test_main])
} else {
ecx.block(sp, vec![call_test_main])
};
let main = ast::ItemKind::Fn(ecx.fn_decl(vec![], ast::FunctionRetTy::Ty(main_ret_ty)),
ast::FnHeader::default(),
ast::Generics::default(),
main_body);
// Honor the reexport_test_harness_main attribute
let main_id = Ident::new(
cx.reexport_test_harness_main.unwrap_or(Symbol::gensym("main")),
sp);
P(ast::Item {
ident: main_id,
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: dummy_spanned(ast::VisibilityKind::Public),
span: sp,
tokens: None,
})
}
fn path_name_i(idents: &[Ident]) -> String {
let mut path_name = "".to_string();
let mut idents_iter = idents.iter().peekable();
while let Some(ident) = idents_iter.next() {
path_name.push_str(&ident.as_str());
if idents_iter.peek().is_some() {
path_name.push_str("::")
}
}
path_name
}
/// Creates a slice containing every test like so:
/// &[path::to::test1, path::to::test2]
fn mk_tests_slice(cx: &TestCtxt<'_>) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.test_cases.len());
let ref ecx = cx.ext_cx;
ecx.expr_vec_slice(DUMMY_SP,
cx.test_cases.iter().map(|test| {
ecx.expr_addr_of(test.span,
ecx.expr_path(ecx.path(test.span, visible_path(cx, &test.path))))
}).collect())
}
/// Creates a path from the top-level __test module to the test via __test_reexports
fn visible_path(cx: &TestCtxt<'_>, path: &[Ident]) -> Vec<Ident>{
let mut visible_path = vec![];
match cx.toplevel_reexport {
Some(id) => visible_path.push(id),
None => {
cx.span_diagnostic.bug("expected to find top-level re-export name, but found None");
}
}
visible_path.extend_from_slice(path);
visible_path
}
fn is_test_case(i: &ast::Item) -> bool | {
attr::contains_name(&i.attrs, "rustc_test_marker")
} | identifier_body |
|
test.rs | some_name;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
// Do this here so that the test_runner crate attribute gets marked as used
// even in non-test builds
let test_runner = get_test_runner(span_diagnostic, &krate);
if should_test {
generate_test_harness(sess, resolver, reexport_test_harness_main,
krate, span_diagnostic, features, test_runner)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(Ident, Ident)>,
}
impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn visit_crate(&mut self, c: &mut ast::Crate) {
noop_visit_crate(c, self);
// Create a main function to run our tests
let test_main = {
let unresolved = mk_main(&mut self.cx);
self.cx.ext_cx.monotonic_expander().flat_map_item(unresolved).pop().unwrap()
};
c.module.items.push(test_main);
}
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let ident = i.ident;
if ident.name != keywords::Invalid.name() {
self.cx.path.push(ident);
}
debug!("current path: {}", path_name_i(&self.cx.path));
let mut item = i.into_inner();
if is_test_case(&item) {
debug!("this is a test item");
let test = Test {
span: item.span,
path: self.cx.path.clone(),
};
self.cx.test_cases.push(test);
self.tests.push(item.ident);
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
if let ast::ItemKind::Mod(mut module) = item.node {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
noop_visit_mod(&mut module, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
if !tests.is_empty() || !tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
module.items.push(it);
if !self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
item.node = ast::ItemKind::Mod(module);
}
if ident.name != keywords::Invalid.name() {
self.cx.path.pop();
}
smallvec![P(item)]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner {
// Current depth in the ast
depth: usize,
} | fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
self.depth += 1;
let item = noop_flat_map_item(i, self).expect_one("noop did something");
self.depth -= 1;
// Remove any #[main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
let item = match entry::entry_point_type(&item, self.depth) {
EntryPointType::MainNamed |
EntryPointType::MainAttr |
EntryPointType::Start =>
item.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| {
let allow_ident = Ident::from_str("allow");
let dc_nested = attr::mk_nested_word_item(Ident::from_str("dead_code"));
let allow_dead_code_item = attr::mk_list_item(DUMMY_SP, allow_ident,
vec![dc_nested]);
let allow_dead_code = attr::mk_attr_outer(DUMMY_SP,
attr::mk_attr_id(),
allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") && !attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be |
impl MutVisitor for EntryPointCleaner { | random_line_split |
test.rs | some_name;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
// Do this here so that the test_runner crate attribute gets marked as used
// even in non-test builds
let test_runner = get_test_runner(span_diagnostic, &krate);
if should_test {
generate_test_harness(sess, resolver, reexport_test_harness_main,
krate, span_diagnostic, features, test_runner)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(Ident, Ident)>,
}
impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn visit_crate(&mut self, c: &mut ast::Crate) {
noop_visit_crate(c, self);
// Create a main function to run our tests
let test_main = {
let unresolved = mk_main(&mut self.cx);
self.cx.ext_cx.monotonic_expander().flat_map_item(unresolved).pop().unwrap()
};
c.module.items.push(test_main);
}
fn | (&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let ident = i.ident;
if ident.name != keywords::Invalid.name() {
self.cx.path.push(ident);
}
debug!("current path: {}", path_name_i(&self.cx.path));
let mut item = i.into_inner();
if is_test_case(&item) {
debug!("this is a test item");
let test = Test {
span: item.span,
path: self.cx.path.clone(),
};
self.cx.test_cases.push(test);
self.tests.push(item.ident);
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
if let ast::ItemKind::Mod(mut module) = item.node {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
noop_visit_mod(&mut module, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
if !tests.is_empty() || !tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
module.items.push(it);
if !self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
item.node = ast::ItemKind::Mod(module);
}
if ident.name != keywords::Invalid.name() {
self.cx.path.pop();
}
smallvec![P(item)]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner {
// Current depth in the ast
depth: usize,
}
impl MutVisitor for EntryPointCleaner {
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
self.depth += 1;
let item = noop_flat_map_item(i, self).expect_one("noop did something");
self.depth -= 1;
// Remove any #[main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
let item = match entry::entry_point_type(&item, self.depth) {
EntryPointType::MainNamed |
EntryPointType::MainAttr |
EntryPointType::Start =>
item.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| {
let allow_ident = Ident::from_str("allow");
let dc_nested = attr::mk_nested_word_item(Ident::from_str("dead_code"));
let allow_dead_code_item = attr::mk_list_item(DUMMY_SP, allow_ident,
vec![dc_nested]);
let allow_dead_code = attr::mk_attr_outer(DUMMY_SP,
attr::mk_attr_id(),
allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") && !attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be | flat_map_item | identifier_name |
healthCheck.js | maxBlockTime) maxBlockTime = block.timestamp
if (block.transactions.length) {
for (const tx of block.transactions) {
// If transaction is from audius account, determine success or fail status
if (RELAY_HEALTH_ACCOUNTS.has(tx.from)) {
const txHash = tx.hash
const resp = await web3.eth.getTransactionReceipt(txHash)
txCounter++
// tx failed
if (!resp.status) {
const senderAddress = await redis.hget('txHashToSenderAddress', txHash)
if (senderAddress) {
if (!failureTxs[senderAddress]) failureTxs[senderAddress] = [txHash]
else failureTxs[senderAddress].push(txHash)
} else {
failureTxs['unknown'] = (failureTxs['unknown'] || []).concat(txHash)
}
}
}
}
}
}
let isError = false
// delete old entries from set in redis
const epochOneHourAgo = Math.floor(Date.now() / 1000) - 3600
await redis.zremrangebyscore('relayTxAttempts', '-inf', epochOneHourAgo)
await redis.zremrangebyscore('relayTxFailures', '-inf', epochOneHourAgo)
await redis.zremrangebyscore('relayTxSuccesses', '-inf', epochOneHourAgo)
// check if there have been any attempts in the time window that we processed the block health check
const attemptedTxsInRedis = await redis.zrangebyscore('relayTxAttempts', minBlockTime, maxBlockTime)
const successfulTxsInRedis = await redis.zrangebyscore('relayTxSuccesses', minBlockTime, maxBlockTime)
const failureTxsInRedis = await redis.zrangebyscore('relayTxFailures', minBlockTime, maxBlockTime)
if (txCounter < minTransactions) isError = true
const serverResponse = {
blockchain: {
numberOfTransactions: txCounter,
minTransactions,
numberOfFailedTransactions: flatten(Object.values(failureTxs)).length,
failedTransactionHashes: failureTxs,
startBlock: startBlockNumber,
endBlock: endBlockNumber
},
redis: {
attemptedTxsCount: attemptedTxsInRedis.length,
successfulTxsCount: successfulTxsInRedis.length,
failureTxsCount: failureTxsInRedis.length
},
healthCheckComputeTime: Date.now() - start
}
if (isVerbose) {
serverResponse.redis = {
...serverResponse.redis,
attemptedTxsInRedis,
successfulTxsInRedis,
failureTxsInRedis
}
}
if (isError) return errorResponseServerError(serverResponse)
else return successResponse(serverResponse)
}))
app.get('/health_check', handleResponse(async (req, res) => {
// for now we just check db connectivity
await sequelize.query('SELECT 1', { type: sequelize.QueryTypes.SELECT })
// get connected discprov via libs
const audiusLibsInstance = req.app.get('audiusLibs')
return successResponse({ 'healthy': true, 'git': process.env.GIT_SHA, selectedDiscoveryProvider: audiusLibsInstance.discoveryProvider.discoveryProviderEndpoint })
}))
app.get('/balance_check', handleResponse(async (req, res) => {
let { minimumBalance, minimumRelayerBalance } = req.query
minimumBalance = parseFloat(minimumBalance || config.get('minimumBalance'))
minimumRelayerBalance = parseFloat(minimumRelayerBalance || config.get('minimumRelayerBalance'))
let belowMinimumBalances = []
let balances = []
// run fundRelayerIfEmpty so it'll auto top off any accounts below the threshold
try {
await fundRelayerIfEmpty()
} catch (err) {
req.logger.error(`Failed to fund relayer with error: ${err}`)
}
balances = await Promise.all(
[...RELAY_HEALTH_ACCOUNTS].map(async account => {
let balance = parseFloat(Web3.utils.fromWei(await getRelayerFunds(account), 'ether'))
if (balance < minimumBalance) {
belowMinimumBalances.push({ account, balance })
}
return { account, balance }
})
)
const relayerPublicKey = config.get('relayerPublicKey')
const relayerBalance = parseFloat(Web3.utils.fromWei(await getRelayerFunds(relayerPublicKey), 'ether'))
const relayerAboveMinimum = relayerBalance >= minimumRelayerBalance
// no accounts below minimum balance
if (!belowMinimumBalances.length && relayerAboveMinimum) {
return successResponse({
'above_balance_minimum': true,
'minimum_balance': minimumBalance,
'balances': balances,
'relayer': {
'wallet': relayerPublicKey,
'balance': relayerBalance,
'above_balance_minimum': relayerAboveMinimum
}
})
} else {
return errorResponseServerError({
'above_balance_minimum': false,
'minimum_balance': minimumBalance,
'balances': balances,
'below_minimum_balance': belowMinimumBalances,
'relayer': {
'wallet': relayerPublicKey,
'balance': relayerBalance,
'above_balance_minimum': relayerAboveMinimum
}
})
}
}))
app.get('/eth_balance_check', handleResponse(async (req, res) => {
let { minimumBalance, minimumFunderBalance } = req.query
minimumBalance = parseFloat(minimumBalance || config.get('ethMinimumBalance'))
minimumFunderBalance = parseFloat(minimumFunderBalance || config.get('ethMinimumFunderBalance'))
let funderAddress = config.get('ethFunderAddress')
let funderBalance = parseFloat(Web3.utils.fromWei(await getEthRelayerFunds(funderAddress), 'ether'))
let funderAboveMinimum = funderBalance >= minimumFunderBalance
let belowMinimumBalances = []
const balances = await Promise.all(
[...ETH_RELAY_HEALTH_ACCOUNTS].map(async account => {
let balance = parseFloat(Web3.utils.fromWei(await getEthRelayerFunds(account), 'ether'))
if (balance < minimumBalance) {
belowMinimumBalances.push({ account, balance })
}
return { account, balance }
})
)
let balanceResponse = {
'minimum_balance': minimumBalance,
'balances': balances,
'funder': {
'wallet': funderAddress,
'balance': funderBalance,
'above_balance_minimum': funderAboveMinimum
}
}
// no accounts below minimum balance
if (!belowMinimumBalances.length && funderAboveMinimum) {
return successResponse({
'above_balance_minimum': true,
...balanceResponse
})
} else {
return errorResponseServerError({
'above_balance_minimum': false,
'below_minimum_balance': belowMinimumBalances,
...balanceResponse
})
}
}))
app.get('/sol_balance_check', handleResponse(async (req, res) => {
const minimumBalance = parseFloat(req.query.minimumBalance || config.get('solMinimumBalance'))
const solanaFeePayerWallet = config.get('solanaFeePayerWallet')
let solanaFeePayerPublicKey = null
let balance = 0
if (solanaFeePayerWallet) {
solanaFeePayerPublicKey = (new solanaWeb3.Account(solanaFeePayerWallet)).publicKey
balance = await solanaConnection.getBalance(solanaFeePayerPublicKey)
}
const sol = Math.floor(balance / (10 ** 9))
const lamports = balance % (10 ** 9)
if (balance > minimumBalance) {
return successResponse({
above_balance_minimum: true,
balance: { sol, lamports },
wallet: solanaFeePayerPublicKey ? solanaFeePayerPublicKey.toBase58() : null
})
}
return errorResponseServerError({
above_balance_minimum: false,
balance: { sol, lamports },
wallet: solanaFeePayerPublicKey ? solanaFeePayerPublicKey.toBase58() : null
})
}))
app.get('/notification_check', handleResponse(async (req, res) => {
let { maxBlockDifference, maxDrift } = req.query
maxBlockDifference = maxBlockDifference || 100
let highestBlockNumber = await models.NotificationAction.max('blocknumber')
if (!highestBlockNumber) {
highestBlockNumber = config.get('notificationStartBlock')
}
let redis = req.app.get('redis')
let maxFromRedis = await redis.get('maxBlockNumber')
if (maxFromRedis) {
highestBlockNumber = parseInt(maxFromRedis)
}
// Get job success timestamps
const notificationJobLastSuccess = await redis.get(NOTIFICATION_JOB_LAST_SUCCESS_KEY)
const notificationEmailsJobLastSuccess = await redis.get(NOTIFICATION_EMAILS_JOB_LAST_SUCCESS_KEY)
const notificationAnnouncementsJobLastSuccess = await redis.get(NOTIFICATION_ANNOUNCEMENTS_JOB_LAST_SUCCESS_KEY)
const { discoveryProvider } = audiusLibsWrapper.getAudiusLibs()
let body = (await axios({
method: 'get',
url: `${discoveryProvider.discoveryProviderEndpoint}/health_check`
})).data
let discProvDbHighestBlock = body.data['db']['number'] | random_line_split |
||
utils.py | _path, obj, protocol=2):
"""
For python 3 compatibility, use protocol 2
"""
if not file_path.endswith('.pkl'):
file_path += '.pkl'
with open(file_path, 'wb') as opdwf:
pk.dump(obj, opdwf, protocol=protocol)
def unpickle(file_path):
with open(file_path, 'rb') as opdrf:
data = pk.load(opdrf)
return data
# Load data
def load_data_multiscale(data_dir, scale_list):
X_tr_list = list()
y_tr_list = list()
X_te_list = list()
y_te_list = list()
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_tr_fp = os.path.join(data_dir, scale, 'feat.tr.npy')
target_tr_fp = os.path.join(data_dir, scale, 'target.tr.npy')
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_tr = np.load(feat_tr_fp)
y_tr = np.load(target_tr_fp)
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_tr_list.append(X_tr)
y_tr_list.append(y_tr)
X_te_list.append(X_te)
y_te_list.append(y_te)
X_va_list.append(X_va)
y_va_list.append(y_va)
y_tr = y_tr_list[0]
y_va = y_va_list[0]
y_te = y_te_list[0]
return X_tr_list, y_tr, X_va_list, y_va, X_te_list, y_te
def load_data_multiscale_te(data_dir, scale_list):
X_te_list = list()
y_te_list = list()
for ii, scale in enumerate(scale_list):
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_te_list.append(X_te)
y_te_list.append(y_te)
y_te = y_te_list[0]
return X_te_list, y_te
def load_data_multiscale_va(data_dir, scale_list):
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
# append
X_va_list.append(X_va)
y_va_list.append(y_va)
y_va = y_va_list[0]
return X_va_list, y_va
# Recursively convert string to int in a list
def to_int(data_list):
return [to_int(term)
if type(term) == list else int(term) for term in data_list]
# Iterate inputs
def iterate_minibatches_multiscale(inputs_list, targets,
batchsize, shuffle=False):
if type(targets) == np.ndarray:
n = len(targets)
k = targets.shape[-1]
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
|
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list], \
targets[excerpt].reshape((-1, k))
def iterate_minibatches_multiscale_feat(inputs_list, batchsize, shuffle=False):
n = len(inputs_list[0])
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list]
# Functions used in train for recording and printing
def check_best_loss(best_val_loss, val_loss):
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_updated = True
else:
best_val_updated = False
return best_val_loss, best_val_updated
def print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss):
print("Epoch {} of {}.".format(epoch, n_epochs))
print(" training loss: {:.6f}".format(mean_tr_loss))
print(" validation loss: {:.6f}".format(mean_va_loss))
print(" best va (epoch, loss):({}, {:.6f})".format(
best_va_epoch, best_va_loss
))
print(" ")
# Multiple input sources
def train_multiscale(
X_tr_list, y_tr, X_va_list, y_va,
network,
train_func, va_func,
n_epochs, batch_size, lr_var, param_fp=None):
print("Starting training...")
best_va_epoch = 0
best_va_loss = np.inf
for epoch in range(1, n_epochs+1):
train_loss = 0
train_batches = 0
# Training
for batch_ in iterate_minibatches_multiscale(X_tr_list, y_tr,
batch_size,
shuffle=True):
inputs_list, targets = batch_
temp = inputs_list+[targets]
train_loss_one = train_func(*temp)
train_loss += train_loss_one
train_batches += 1
mean_tr_loss = train_loss/train_batches
# Validation
pre_list, mean_va_loss = validate_multiscale(X_va_list, y_va,
va_func)
# Check best loss
best_va_loss, best_va_updated = check_best_loss(
best_va_loss, mean_va_loss)
if best_va_updated:
best_va_epoch = epoch
if param_fp is not None:
save_model(param_fp, network)
# Print the results for this epoch:
print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss)
def validate_multiscale(X_list, y, val_func):
val_loss = 0
val_batches = 0
pre_list = []
for batch in iterate_minibatches_multiscale(X_list, y, 1, shuffle=False):
inputs_list, targets = batch
temp = inputs_list+[targets]
pre, loss = val_func(*temp)
val_loss += loss
val_batches += 1
pre_list.append(pre)
mean_val_loss = val_loss / val_batches
return pre_list, mean_val_loss
def predict_multiscale(X_list, pr_func):
pre_list = []
for inputs_list in iterate_minibatches_multiscale_feat(
X_list, 1, shuffle=False):
pre = pr_func(*inputs_list)
pre_list.append(pre)
return pre_list
# Save/load
def save_model(fp, network):
np.savez(fp, *lasagne.layers.get_all_param_values(network))
def load_model(fp, network):
with np.load(fp) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
# Get thresholds
def f1_one(y_target, y_predicted):
'''
y_target, y_predicted:
1D binary array
'''
return f1_score(y_target, y_predicted, average='binary')
def f1(Y_target, Y_predicted):
'''
Y_target, Y_predicted:
n x k 2D binary array, where n is the number of data and
k is the number of tags
'''
scores = [f1_one(y_target, y_predicted)
for y_target, y_predicted in zip(Y_target.T, Y_predicted.T)]
scores = np.array(scores)
return scores
def get_measure(arg):
threshold, prediction, target, step_size, lower_b, measure_func = arg
pred_binary = ((prediction-threshold) > 0).astype(int)
measures = measure_func(target, pred_binary)
return measures
def get_thresholds(pred, target, search_range, step_size, measure_func=f1,
n_processes=20):
'''
pred: np.array
prediction from a model
n x k 2D array, where n is the number of data and
k is the number of tags
target: np.array
groundtruth
n x k 2D binary array, where n is the number of | indices = np.arange(n)
np.random.shuffle(indices) | conditional_block |
utils.py | (file_path, obj, protocol=2):
"""
For python 3 compatibility, use protocol 2
"""
if not file_path.endswith('.pkl'):
file_path += '.pkl'
with open(file_path, 'wb') as opdwf:
pk.dump(obj, opdwf, protocol=protocol)
def unpickle(file_path):
with open(file_path, 'rb') as opdrf:
data = pk.load(opdrf)
return data
# Load data
def load_data_multiscale(data_dir, scale_list):
X_tr_list = list()
y_tr_list = list()
X_te_list = list()
y_te_list = list()
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_tr_fp = os.path.join(data_dir, scale, 'feat.tr.npy')
target_tr_fp = os.path.join(data_dir, scale, 'target.tr.npy')
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_tr = np.load(feat_tr_fp)
y_tr = np.load(target_tr_fp)
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_tr_list.append(X_tr)
y_tr_list.append(y_tr)
X_te_list.append(X_te)
y_te_list.append(y_te)
X_va_list.append(X_va)
y_va_list.append(y_va)
y_tr = y_tr_list[0]
y_va = y_va_list[0]
y_te = y_te_list[0]
return X_tr_list, y_tr, X_va_list, y_va, X_te_list, y_te
def load_data_multiscale_te(data_dir, scale_list):
X_te_list = list()
y_te_list = list()
for ii, scale in enumerate(scale_list):
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_te_list.append(X_te)
y_te_list.append(y_te)
y_te = y_te_list[0]
return X_te_list, y_te
def load_data_multiscale_va(data_dir, scale_list):
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
# append
X_va_list.append(X_va)
y_va_list.append(y_va)
y_va = y_va_list[0]
return X_va_list, y_va
# Recursively convert string to int in a list
def to_int(data_list):
return [to_int(term)
if type(term) == list else int(term) for term in data_list]
# Iterate inputs
def iterate_minibatches_multiscale(inputs_list, targets,
batchsize, shuffle=False):
if type(targets) == np.ndarray:
n = len(targets)
k = targets.shape[-1]
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list], \
targets[excerpt].reshape((-1, k))
def iterate_minibatches_multiscale_feat(inputs_list, batchsize, shuffle=False):
n = len(inputs_list[0])
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list]
# Functions used in train for recording and printing
def check_best_loss(best_val_loss, val_loss):
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_updated = True
else:
best_val_updated = False
return best_val_loss, best_val_updated
def print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss):
print("Epoch {} of {}.".format(epoch, n_epochs))
print(" training loss: {:.6f}".format(mean_tr_loss))
print(" validation loss: {:.6f}".format(mean_va_loss))
print(" best va (epoch, loss):({}, {:.6f})".format(
best_va_epoch, best_va_loss
))
print(" ")
# Multiple input sources
def train_multiscale(
X_tr_list, y_tr, X_va_list, y_va,
network,
train_func, va_func,
n_epochs, batch_size, lr_var, param_fp=None):
print("Starting training...")
best_va_epoch = 0
best_va_loss = np.inf
for epoch in range(1, n_epochs+1):
train_loss = 0
train_batches = 0
# Training
for batch_ in iterate_minibatches_multiscale(X_tr_list, y_tr,
batch_size,
shuffle=True):
inputs_list, targets = batch_
temp = inputs_list+[targets]
train_loss_one = train_func(*temp)
train_loss += train_loss_one
train_batches += 1
mean_tr_loss = train_loss/train_batches
# Validation
pre_list, mean_va_loss = validate_multiscale(X_va_list, y_va,
va_func)
# Check best loss
best_va_loss, best_va_updated = check_best_loss(
best_va_loss, mean_va_loss)
if best_va_updated:
best_va_epoch = epoch
if param_fp is not None:
save_model(param_fp, network)
# Print the results for this epoch:
print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss)
def validate_multiscale(X_list, y, val_func):
val_loss = 0
val_batches = 0
pre_list = []
for batch in iterate_minibatches_multiscale(X_list, y, 1, shuffle=False):
inputs_list, targets = batch
temp = inputs_list+[targets]
pre, loss = val_func(*temp)
val_loss += loss
val_batches += 1
pre_list.append(pre)
mean_val_loss = val_loss / val_batches
return pre_list, mean_val_loss
def predict_multiscale(X_list, pr_func):
pre_list = []
for inputs_list in iterate_minibatches_multiscale_feat(
X_list, 1, shuffle=False):
pre = pr_func(*inputs_list)
pre_list.append(pre)
return pre_list
# Save/load
def save_model(fp, network):
np.savez(fp, *lasagne.layers.get_all_param_values(network))
def load_model(fp, network):
with np.load(fp) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
# Get thresholds
def f1_one(y_target, y_predicted):
'''
y_target, y_predicted:
1D binary array
'''
return f1_score(y_target, y_predicted, average='binary')
def f1(Y_target, Y_predicted):
'''
Y_target, Y_predicted:
n x k 2D binary array, where n is the number of data and
k is the number of tags
'''
scores = [f1_one(y_target, y_predicted)
for y_target, y_predicted in zip(Y_target.T, Y_predicted.T)]
scores = np.array(scores)
return scores
def get_measure(arg):
threshold, prediction, target, step_size, lower_b, measure_func = arg |
measures = measure_func(target, pred_binary)
return measures
def get_thresholds(pred, target, search_range, step_size, measure_func=f1,
n_processes=20):
'''
pred: np.array
prediction from a model
n x k 2D array, where n is the number of data and
k is the number of tags
target: np.array
groundtruth
n x k 2D binary array, where n is the number of | pred_binary = ((prediction-threshold) > 0).astype(int) | random_line_split |
utils.py | _path, obj, protocol=2):
"""
For python 3 compatibility, use protocol 2
"""
if not file_path.endswith('.pkl'):
file_path += '.pkl'
with open(file_path, 'wb') as opdwf:
pk.dump(obj, opdwf, protocol=protocol)
def unpickle(file_path):
with open(file_path, 'rb') as opdrf:
data = pk.load(opdrf)
return data
# Load data
def load_data_multiscale(data_dir, scale_list):
X_tr_list = list()
y_tr_list = list()
X_te_list = list()
y_te_list = list()
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_tr_fp = os.path.join(data_dir, scale, 'feat.tr.npy')
target_tr_fp = os.path.join(data_dir, scale, 'target.tr.npy')
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_tr = np.load(feat_tr_fp)
y_tr = np.load(target_tr_fp)
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_tr_list.append(X_tr)
y_tr_list.append(y_tr)
X_te_list.append(X_te)
y_te_list.append(y_te)
X_va_list.append(X_va)
y_va_list.append(y_va)
y_tr = y_tr_list[0]
y_va = y_va_list[0]
y_te = y_te_list[0]
return X_tr_list, y_tr, X_va_list, y_va, X_te_list, y_te
def load_data_multiscale_te(data_dir, scale_list):
X_te_list = list()
y_te_list = list()
for ii, scale in enumerate(scale_list):
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_te_list.append(X_te)
y_te_list.append(y_te)
y_te = y_te_list[0]
return X_te_list, y_te
def load_data_multiscale_va(data_dir, scale_list):
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
# append
X_va_list.append(X_va)
y_va_list.append(y_va)
y_va = y_va_list[0]
return X_va_list, y_va
# Recursively convert string to int in a list
def to_int(data_list):
return [to_int(term)
if type(term) == list else int(term) for term in data_list]
# Iterate inputs
def iterate_minibatches_multiscale(inputs_list, targets,
batchsize, shuffle=False):
if type(targets) == np.ndarray:
n = len(targets)
k = targets.shape[-1]
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list], \
targets[excerpt].reshape((-1, k))
def iterate_minibatches_multiscale_feat(inputs_list, batchsize, shuffle=False):
n = len(inputs_list[0])
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list]
# Functions used in train for recording and printing
def check_best_loss(best_val_loss, val_loss):
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_updated = True
else:
best_val_updated = False
return best_val_loss, best_val_updated
def print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss):
print("Epoch {} of {}.".format(epoch, n_epochs))
print(" training loss: {:.6f}".format(mean_tr_loss))
print(" validation loss: {:.6f}".format(mean_va_loss))
print(" best va (epoch, loss):({}, {:.6f})".format(
best_va_epoch, best_va_loss
))
print(" ")
# Multiple input sources
def train_multiscale(
X_tr_list, y_tr, X_va_list, y_va,
network,
train_func, va_func,
n_epochs, batch_size, lr_var, param_fp=None):
print("Starting training...")
best_va_epoch = 0
best_va_loss = np.inf
for epoch in range(1, n_epochs+1):
train_loss = 0
train_batches = 0
# Training
for batch_ in iterate_minibatches_multiscale(X_tr_list, y_tr,
batch_size,
shuffle=True):
inputs_list, targets = batch_
temp = inputs_list+[targets]
train_loss_one = train_func(*temp)
train_loss += train_loss_one
train_batches += 1
mean_tr_loss = train_loss/train_batches
# Validation
pre_list, mean_va_loss = validate_multiscale(X_va_list, y_va,
va_func)
# Check best loss
best_va_loss, best_va_updated = check_best_loss(
best_va_loss, mean_va_loss)
if best_va_updated:
best_va_epoch = epoch
if param_fp is not None:
save_model(param_fp, network)
# Print the results for this epoch:
print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss)
def validate_multiscale(X_list, y, val_func):
val_loss = 0
val_batches = 0
pre_list = []
for batch in iterate_minibatches_multiscale(X_list, y, 1, shuffle=False):
inputs_list, targets = batch
temp = inputs_list+[targets]
pre, loss = val_func(*temp)
val_loss += loss
val_batches += 1
pre_list.append(pre)
mean_val_loss = val_loss / val_batches
return pre_list, mean_val_loss
def predict_multiscale(X_list, pr_func):
pre_list = []
for inputs_list in iterate_minibatches_multiscale_feat(
X_list, 1, shuffle=False):
pre = pr_func(*inputs_list)
pre_list.append(pre)
return pre_list
# Save/load
def save_model(fp, network):
|
def load_model(fp, network):
with np.load(fp) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
# Get thresholds
def f1_one(y_target, y_predicted):
'''
y_target, y_predicted:
1D binary array
'''
return f1_score(y_target, y_predicted, average='binary')
def f1(Y_target, Y_predicted):
'''
Y_target, Y_predicted:
n x k 2D binary array, where n is the number of data and
k is the number of tags
'''
scores = [f1_one(y_target, y_predicted)
for y_target, y_predicted in zip(Y_target.T, Y_predicted.T)]
scores = np.array(scores)
return scores
def get_measure(arg):
threshold, prediction, target, step_size, lower_b, measure_func = arg
pred_binary = ((prediction-threshold) > 0).astype(int)
measures = measure_func(target, pred_binary)
return measures
def get_thresholds(pred, target, search_range, step_size, measure_func=f1,
n_processes=20):
'''
pred: np.array
prediction from a model
n x k 2D array, where n is the number of data and
k is the number of tags
target: np.array
groundtruth
n x k 2D binary array, where n is the number of | np.savez(fp, *lasagne.layers.get_all_param_values(network)) | identifier_body |
utils.py | _te = np.load(target_te_fp)
# append
X_tr_list.append(X_tr)
y_tr_list.append(y_tr)
X_te_list.append(X_te)
y_te_list.append(y_te)
X_va_list.append(X_va)
y_va_list.append(y_va)
y_tr = y_tr_list[0]
y_va = y_va_list[0]
y_te = y_te_list[0]
return X_tr_list, y_tr, X_va_list, y_va, X_te_list, y_te
def load_data_multiscale_te(data_dir, scale_list):
X_te_list = list()
y_te_list = list()
for ii, scale in enumerate(scale_list):
feat_te_fp = os.path.join(data_dir, scale, 'feat.te.npy')
target_te_fp = os.path.join(data_dir, scale, 'target.te.npy')
X_te = np.load(feat_te_fp)
y_te = np.load(target_te_fp)
# append
X_te_list.append(X_te)
y_te_list.append(y_te)
y_te = y_te_list[0]
return X_te_list, y_te
def load_data_multiscale_va(data_dir, scale_list):
X_va_list = list()
y_va_list = list()
for ii, scale in enumerate(scale_list):
feat_va_fp = os.path.join(data_dir, scale, 'feat.va.npy')
target_va_fp = os.path.join(data_dir, scale, 'target.va.npy')
X_va = np.load(feat_va_fp)
y_va = np.load(target_va_fp)
# append
X_va_list.append(X_va)
y_va_list.append(y_va)
y_va = y_va_list[0]
return X_va_list, y_va
# Recursively convert string to int in a list
def to_int(data_list):
return [to_int(term)
if type(term) == list else int(term) for term in data_list]
# Iterate inputs
def iterate_minibatches_multiscale(inputs_list, targets,
batchsize, shuffle=False):
if type(targets) == np.ndarray:
n = len(targets)
k = targets.shape[-1]
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list], \
targets[excerpt].reshape((-1, k))
def iterate_minibatches_multiscale_feat(inputs_list, batchsize, shuffle=False):
n = len(inputs_list[0])
for inputs in inputs_list:
assert len(inputs) == n
if shuffle:
indices = np.arange(n)
np.random.shuffle(indices)
for start_idx in range(0, n - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [inputs[excerpt] for inputs in inputs_list]
# Functions used in train for recording and printing
def check_best_loss(best_val_loss, val_loss):
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_updated = True
else:
best_val_updated = False
return best_val_loss, best_val_updated
def print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss):
print("Epoch {} of {}.".format(epoch, n_epochs))
print(" training loss: {:.6f}".format(mean_tr_loss))
print(" validation loss: {:.6f}".format(mean_va_loss))
print(" best va (epoch, loss):({}, {:.6f})".format(
best_va_epoch, best_va_loss
))
print(" ")
# Multiple input sources
def train_multiscale(
X_tr_list, y_tr, X_va_list, y_va,
network,
train_func, va_func,
n_epochs, batch_size, lr_var, param_fp=None):
print("Starting training...")
best_va_epoch = 0
best_va_loss = np.inf
for epoch in range(1, n_epochs+1):
train_loss = 0
train_batches = 0
# Training
for batch_ in iterate_minibatches_multiscale(X_tr_list, y_tr,
batch_size,
shuffle=True):
inputs_list, targets = batch_
temp = inputs_list+[targets]
train_loss_one = train_func(*temp)
train_loss += train_loss_one
train_batches += 1
mean_tr_loss = train_loss/train_batches
# Validation
pre_list, mean_va_loss = validate_multiscale(X_va_list, y_va,
va_func)
# Check best loss
best_va_loss, best_va_updated = check_best_loss(
best_va_loss, mean_va_loss)
if best_va_updated:
best_va_epoch = epoch
if param_fp is not None:
save_model(param_fp, network)
# Print the results for this epoch:
print_in_train(epoch, n_epochs,
mean_tr_loss, mean_va_loss,
best_va_epoch, best_va_loss)
def validate_multiscale(X_list, y, val_func):
val_loss = 0
val_batches = 0
pre_list = []
for batch in iterate_minibatches_multiscale(X_list, y, 1, shuffle=False):
inputs_list, targets = batch
temp = inputs_list+[targets]
pre, loss = val_func(*temp)
val_loss += loss
val_batches += 1
pre_list.append(pre)
mean_val_loss = val_loss / val_batches
return pre_list, mean_val_loss
def predict_multiscale(X_list, pr_func):
pre_list = []
for inputs_list in iterate_minibatches_multiscale_feat(
X_list, 1, shuffle=False):
pre = pr_func(*inputs_list)
pre_list.append(pre)
return pre_list
# Save/load
def save_model(fp, network):
np.savez(fp, *lasagne.layers.get_all_param_values(network))
def load_model(fp, network):
with np.load(fp) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
# Get thresholds
def f1_one(y_target, y_predicted):
'''
y_target, y_predicted:
1D binary array
'''
return f1_score(y_target, y_predicted, average='binary')
def f1(Y_target, Y_predicted):
'''
Y_target, Y_predicted:
n x k 2D binary array, where n is the number of data and
k is the number of tags
'''
scores = [f1_one(y_target, y_predicted)
for y_target, y_predicted in zip(Y_target.T, Y_predicted.T)]
scores = np.array(scores)
return scores
def get_measure(arg):
threshold, prediction, target, step_size, lower_b, measure_func = arg
pred_binary = ((prediction-threshold) > 0).astype(int)
measures = measure_func(target, pred_binary)
return measures
def get_thresholds(pred, target, search_range, step_size, measure_func=f1,
n_processes=20):
'''
pred: np.array
prediction from a model
n x k 2D array, where n is the number of data and
k is the number of tags
target: np.array
groundtruth
n x k 2D binary array, where n is the number of data and
k is the number of tags
search_range: tuple
the range for searching the thresholds
(a, b), where a is the lower bound and b is the upper bound
step_size: float
searching the threholds in (a, a+step_size, a+2step_size, ..., ...)
measure_func: function or str
function defined in the begining of this fild
'''
lower_b, upper_b = search_range
assert(upper_b > lower_b)
if measure_func == 'f1':
measure_func = f1
n_tags = target.shape[1]
diff = upper_b-lower_b
n_steps = int(np.floor(diff/step_size))
threshold_list = [lower_b+ii*step_size for ii in range(n_steps+1)]
arg_list = []
for th in threshold_list:
arg_list.append(
(th, pred, target, step_size, lower_b, measure_func))
pool = Pool(processes=n_processes)
all_measures = np.array(pool.map(get_measure, arg_list))
pool.close()
# print(all_measures.shape)
best_idx_list = np.argmax(all_measures, axis=0)
best_thresholds = lower_b+best_idx_list*step_size
best_measures = all_measures[best_idx_list, [ii for ii in range(n_tags)]]
# print(n_tags, len(best_idx_list))
return best_thresholds, best_measures
# Upscale array
def | shift | identifier_name |
|
pl.locales.ts | silniejszą i bardziej zabawną społeczność.',
},
uHostSection: {
heading: 'uHost, uHost AI-Assistant, and AI-Host',
content:
'Trzy główne tryby turniejowe dają Ci pełną swobodę w prowadzeniu gier.',
hostTypes: [
{
heading: 'uHost',
imageAlt: 'uHost',
content:
'Wybierz z szablonu turnieju lub dostosuj własny. Opublikuj je, a Tourney utworzy wszystkie kanały i DM role, które chcesz.',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/uHost.png',
},
{
heading: 'uHost AI-Assistant',
imageAlt: 'uHost AI-Assistant',
content:
'Tourney pomaga moderować turniej i zapewnia wskazówki dla każdego gracza pomiędzy meczami i wiele więcej.',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/uHost-assistant.png',
},
{
heading: 'AI-Host',
imageAlt: 'aiHost',
content:
'Tourney wybierze grę, czas, format i przeprowadzi turniej całkowicie samodzielnie od początku do końca.',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/ai-Host.png',
},
],
templateSection: {
imageUrl: 'https://cdn.game.tv/images/meet-tourney/templates.png',
imageAlt: 'Templates',
heading: 'Szablony',
content:
'Tourney zawiera dziesiątki wstępnie skonfigurowanych szablonów turniejów, dzięki czemu możesz je opublikować i przejść dalej.',
},
messagingSection: {
imageAlt: 'DMs and Messaging',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/dms.png',
heading: 'Wiadomości',
content:
'Tourney może wysyłać wiadomości dotyczące poszczególnych ról dla gry lub dowolnej roli, którą powiadomisz o nowym turnieju. Cała organizacja odbywa się na nowym kanale, który tworzy Tourney, a wszystkie zaproszenia do lobby i kojarzenie są wysyłane za pośrednictwem PW.',
},
},
perksSection: {
heading: 'Zalety', | imageUrl:
'https://cdn.game.tv/images/meet-tourney/perk-tournaments.png',
imageAlt: 'Nagradzane Poziomy Turniejowe',
},
{
content:
'Streamujesz swoje turnieje? Idealnie, mamy dla Ciebie przygotoway plugin OBS.',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/perk-obs.png',
imageAlt: 'Wewnętrzny plugin OBS dla streamowania',
},
{
content:
'Chcesz uruchomić ligę turniejową na swoim serwerze? Bingo, my też to mamy!',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/perk-league.png',
imageAlt: 'Organizuj Ligi!',
},
],
},
graphsSection: {
heading: 'Role',
content:
'Im więcej turniejów prowadzisz z Tourney, tym więcej korzyści odblokujesz. Twoja rola pojawia się w tabeli liderów społeczności Game.tv Discord, a każdy zestaw profitów otrzymasz po przejściu do następnej roli.',
graphContent: {
previousTitle: 'Poprzedni',
nextTitle: 'Następny',
perksTitle: 'Profity',
forTitle: 'DLA',
graphList: [
{
type: 'bronze',
imageCaption: 'Brąz',
imageAlt: 'Bronze',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/tier-bronze.png',
forDuration: 'Od 1 do 6 Turniejów tygodniowo',
perks: [
'Unikalne Emotikony Tourney',
'Profile i odznaki',
'Odblokuj AI Tourney',
],
},
{
type: 'silver',
imageCaption: 'Srebro',
imageAlt: 'Silver',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/tier-silver.png',
forDuration: '7 Turniejów tygodniowo',
perks: [
'2 Nitro boosty dla twojego serwera Discord',
'Odblokuj ligi',
],
},
{
type: 'gold',
imageCaption: 'Złoto',
imageAlt: 'Gold',
imageUrl: 'https://cdn.game.tv/images/meet-tourney/tier-gold.png',
forDuration:
'Poprowadź ligę z 300 lub więcej unikalnych uczestników/sezonów ',
perks: ['Gwarantowany sponsoring nagród ligowych'],
},
],
},
},
tourneyEmotesSection: {
heading: 'Emotikony Tourney',
content:
'Odblokowany w brązie otrzymujesz 42 niesamowitych emotikonów w wysokiej rozdzielczości, które możesz wykorzystać w swojej społeczności. ',
},
profilesSection: {
items: [
{
imageUrl: 'https://cdn.game.tv/images/meet-tourney/profiles.png',
heading: 'Profile',
imageAlt: 'Profiles',
content:
'Odblokowane w brązie, gracze w twoich turniejach automatycznie otrzymują profile, które mogą dostosować. Każdy profil pokazuje rozegrane gry, rekord wygranych / przegranych oraz ocenę gracza. Oceny graczy pomagają w dobieraniu graczy i awansowaniu ligi.',
},
{
imageUrl: 'https://cdn.game.tv/images/meet-tourney/badges.png',
heading: 'Odznaki',
imageAlt: 'Badges',
content:
'Wygrywaj gry i zdobywaj odznaki, aby pochwalić się swoimi umiejętnościami. Wraz ze wzrostem ELO lub wygranymi w turniejach i nagrodach zdobywasz ekskluzywne odznaki w swoim profilu, które czasami zawierają super tajne i ekskluzywne dodatkowe korzyści.',
reverse: true,
},
],
},
tourneyAiSection: {
heading: 'AI Tourney',
content1:
'Po trafieniu w Brąz odblokujesz AI-Host. Włączenie AI-Host powoduje, że Tourney może automatycznie uruchamiać turnieje w Twojej społeczności. Pamiętaj, że tylko turnieje uHost liczą się do twojego postępu. Turnieje AI-Host nie liczą się (ale są świetne i może je prowadzić wraz z uHost).',
content2: '',
},
leaguesSection: {
items: [
{
imageUrl: 'https://cdn.game.tv/images/meet-tourney/league.png',
heading: 'Ligi',
imageAlt: 'Leagues',
content:
'Odblokowane złotem, Tourney może prowadzić całą ligę na twoim serwerze. Gracze na twoim serwerze będą mieli okazję konkurować na równoległym systemie poziomów i zdobyć miesięczny sponsoring, jeśli znajdą się w TOP-8 krajowych rankingów.',
},
],
},
getBoostedSection: {
heading: 'Zdobądź premię',
content:
'Zdobądź srebro, a my damy Twojemu serwerowi Discord nie jeden, ale dwa doładowania, które zapewnią ci te słodkie przywileje poziomu 1. Tak długo, jak co tydzień organizujesz siedem lub więcej turniejów (od poniedziałku do niedzieli), będziemy nadal ulepszać Twój serwer. Jeśli spadniesz poniżej siedmiu turniejów co tydzień, możemy usunąć twoje wzmocnienia, dopóki nie wrócisz do siedmiu lub więcej. Dodatkowo odblokowujesz możliwość tworzenia lig biegowych dla swojej społeczności za pomocą AI-Host',
imgDesktopUrl: 'https://cdn.game.tv | content: 'Tourney nie byłby kompletny bez mnóstwa dodatków.',
perksList: [
{
content:
'Prowadzisz mnóstwo turniejów? Świetnie, mamy dla Ciebie system poziomów, który Cię wynagrodzi.', | random_line_split |
build_cgd_dataset.py | import median_filter
# progress bars https://github.com/tqdm/tqdm
# import tqdm without enforcing it as a dependency
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.keras.utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import _hash_file
import keras
from keras import backend as K
flags.DEFINE_string('data_dir',
os.path.join(os.path.expanduser("~"),
'.keras', 'datasets', 'cornell_grasping'),
"""Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.""")
flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified')
flags.DEFINE_boolean('grasp_download', True,
"""Download the grasp_dataset to data_dir if it is not already present.""")
FLAGS = flags.FLAGS
def mkdir_p(path):
"""Create the specified path on the filesystem like the `mkdir -p` command
Creates one or more filesystem directory levels as needed,
and does not return an error if the directory already exists.
"""
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_sequence(arg):
"""Returns true if arg is a list or another Python Sequence, and false otherwise.
source: https://stackoverflow.com/a/17148334/99379
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
class GraspDataset(object):
"""Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
# Arguments
data_dir: Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.
`~/.keras/datasets/grasping` by default.
dataset: 'all' to load all the data.
download: True to actually download the dataset, also see FLAGS.
"""
def __init__(self, data_dir=None, dataset=None, download=None, verbose=0):
if data_dir is None:
data_dir = FLAGS.data_dir
self.data_dir = data_dir
if dataset is None:
dataset = FLAGS.grasp_dataset
self.dataset = dataset
if download is None:
download = FLAGS.grasp_download
if download:
self.download(data_dir, dataset)
self.verbose = verbose
def download(self, data_dir=None, dataset='all'):
'''Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
Includes grasp_listing.txt with all files in all datasets;
the feature csv files which specify the dataset size,
the features (data channels), and the number of grasps;
and the tfrecord files which actually contain all the data.
If `grasp_listing_hashed.txt` is present, an additional
hashing step will will be completed to verify dataset integrity.
`grasp_listing_hashed.txt` will be generated automatically when
downloading with `dataset='all'`.
# Arguments
dataset: The name of the dataset to download, downloads all by default
with the '' parameter, 102 will download the 102 feature dataset
found in grasp_listing.txt.
# Returns
list of paths to the downloaded files
'''
dataset = self._update_dataset_param(dataset)
if data_dir is None:
if self.data_dir is None:
data_dir = FLAGS.data_dir
else:
data_dir = self.data_dir
mkdir_p(data_dir)
print('Downloading datasets to: ', data_dir)
url_prefix = ''
# If a hashed version of the listing is available,
# download the dataset and verify hashes to prevent data corruption.
listing_hash = os.path.join(data_dir, 'grasp_listing_hash.txt')
if os.path.isfile(listing_hash):
files_and_hashes = np.genfromtxt(listing_hash, dtype='str', delimiter=' ')
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, file_hash=hash_str, extract=True)
for fpath, hash_str in tqdm(files_and_hashes)
if '_' + str(dataset) in fpath]
else:
# If a hashed version of the listing is not available,
# simply download the dataset normally.
listing_url = 'https://raw.githubusercontent.com/ahundt/robot-grasp-detection/master/grasp_listing.txt'
grasp_listing_path = get_file('grasp_listing.txt', listing_url, cache_subdir=data_dir)
grasp_files = np.genfromtxt(grasp_listing_path, dtype=str)
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, extract=True)
for fpath in tqdm(grasp_files)
if '_' + dataset in fpath]
# If all files are downloaded, generate a hashed listing.
if dataset is 'all' or dataset is '':
print('Hashing all dataset files to prevent corruption...')
hashes = []
for i, f in enumerate(tqdm(files)):
hashes.append(_hash_file(f))
file_hash_np = np.column_stack([grasp_files, hashes])
with open(listing_hash, 'wb') as hash_file:
np.savetxt(hash_file, file_hash_np, fmt='%s', delimiter=' ', header='file_path sha256')
print('Hashing complete, {} contains each url plus hash, and will be used to verify the '
'dataset during future calls to download().'.format(listing_hash))
return files
def _update_dataset_param(self, dataset):
"""Internal function to configure which subset of the datasets is being used.
Helps to choose a reasonable default action based on previous user parameters.
"""
if dataset is None and self.dataset is None:
return []
if dataset is 'all':
dataset = ''
if dataset is None and self.dataset is not None:
dataset = self.dataset
return dataset
class ImageCoder(object):
def __init__(self):
self._sess = tf.Session()
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def decode_png(self, image_data):
return self._sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
def _process_image(filename, coder):
# Decode the image
with open(filename) as f:
image_data = f.read()
image = coder.decode_png(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_bboxes(name):
'''Create a list with the coordinates of the grasping rectangles. Every
element is either x or y of a vertex.'''
with open(name, 'r') as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def _int64_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def _floats_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def _bytes_feature(v):
|
def _convert_to_example(filename, bboxes, image_buffer, height, width):
# Build an Example proto for an example
example = tf.train.Example(features=tf.train.Features(feature={
'image/filename': _bytes_feature(filename),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'bboxes': _floats_feature(bboxes)}))
return example
def main():
gd = GraspDataset()
if FLAGS.grasp_download:
gd.download(dataset=FLAGS.grasp_dataset)
train_file = os.path.join(FLAGS.data_dir, 'train-cgd')
validation_file = os.path.join(FLAGS.data_dir, 'validation-cgd')
print(train_file)
print(validation_file)
writer_train = tf.python_io.TFRecordWriter(train_file)
writer_validation = tf.python_io.TFRecordWriter(validation_file)
# Creating a list with all the image paths
folders = range(1,11)
folders = ['0'+str(i) if i<10 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[v])) | identifier_body |
build_cgd_dataset.py | import median_filter
# progress bars https://github.com/tqdm/tqdm
# import tqdm without enforcing it as a dependency
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.keras.utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import _hash_file
import keras
from keras import backend as K
flags.DEFINE_string('data_dir',
os.path.join(os.path.expanduser("~"),
'.keras', 'datasets', 'cornell_grasping'),
"""Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.""")
flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified')
flags.DEFINE_boolean('grasp_download', True,
"""Download the grasp_dataset to data_dir if it is not already present.""")
FLAGS = flags.FLAGS
def mkdir_p(path):
"""Create the specified path on the filesystem like the `mkdir -p` command
Creates one or more filesystem directory levels as needed,
and does not return an error if the directory already exists.
"""
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_sequence(arg):
"""Returns true if arg is a list or another Python Sequence, and false otherwise.
source: https://stackoverflow.com/a/17148334/99379
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
class GraspDataset(object):
"""Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
# Arguments
data_dir: Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.
`~/.keras/datasets/grasping` by default.
dataset: 'all' to load all the data.
download: True to actually download the dataset, also see FLAGS.
"""
def __init__(self, data_dir=None, dataset=None, download=None, verbose=0):
if data_dir is None:
data_dir = FLAGS.data_dir
self.data_dir = data_dir
if dataset is None:
dataset = FLAGS.grasp_dataset
self.dataset = dataset
if download is None:
download = FLAGS.grasp_download
if download:
self.download(data_dir, dataset)
self.verbose = verbose
def | (self, data_dir=None, dataset='all'):
'''Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
Includes grasp_listing.txt with all files in all datasets;
the feature csv files which specify the dataset size,
the features (data channels), and the number of grasps;
and the tfrecord files which actually contain all the data.
If `grasp_listing_hashed.txt` is present, an additional
hashing step will will be completed to verify dataset integrity.
`grasp_listing_hashed.txt` will be generated automatically when
downloading with `dataset='all'`.
# Arguments
dataset: The name of the dataset to download, downloads all by default
with the '' parameter, 102 will download the 102 feature dataset
found in grasp_listing.txt.
# Returns
list of paths to the downloaded files
'''
dataset = self._update_dataset_param(dataset)
if data_dir is None:
if self.data_dir is None:
data_dir = FLAGS.data_dir
else:
data_dir = self.data_dir
mkdir_p(data_dir)
print('Downloading datasets to: ', data_dir)
url_prefix = ''
# If a hashed version of the listing is available,
# download the dataset and verify hashes to prevent data corruption.
listing_hash = os.path.join(data_dir, 'grasp_listing_hash.txt')
if os.path.isfile(listing_hash):
files_and_hashes = np.genfromtxt(listing_hash, dtype='str', delimiter=' ')
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, file_hash=hash_str, extract=True)
for fpath, hash_str in tqdm(files_and_hashes)
if '_' + str(dataset) in fpath]
else:
# If a hashed version of the listing is not available,
# simply download the dataset normally.
listing_url = 'https://raw.githubusercontent.com/ahundt/robot-grasp-detection/master/grasp_listing.txt'
grasp_listing_path = get_file('grasp_listing.txt', listing_url, cache_subdir=data_dir)
grasp_files = np.genfromtxt(grasp_listing_path, dtype=str)
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, extract=True)
for fpath in tqdm(grasp_files)
if '_' + dataset in fpath]
# If all files are downloaded, generate a hashed listing.
if dataset is 'all' or dataset is '':
print('Hashing all dataset files to prevent corruption...')
hashes = []
for i, f in enumerate(tqdm(files)):
hashes.append(_hash_file(f))
file_hash_np = np.column_stack([grasp_files, hashes])
with open(listing_hash, 'wb') as hash_file:
np.savetxt(hash_file, file_hash_np, fmt='%s', delimiter=' ', header='file_path sha256')
print('Hashing complete, {} contains each url plus hash, and will be used to verify the '
'dataset during future calls to download().'.format(listing_hash))
return files
def _update_dataset_param(self, dataset):
"""Internal function to configure which subset of the datasets is being used.
Helps to choose a reasonable default action based on previous user parameters.
"""
if dataset is None and self.dataset is None:
return []
if dataset is 'all':
dataset = ''
if dataset is None and self.dataset is not None:
dataset = self.dataset
return dataset
class ImageCoder(object):
def __init__(self):
self._sess = tf.Session()
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def decode_png(self, image_data):
return self._sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
def _process_image(filename, coder):
# Decode the image
with open(filename) as f:
image_data = f.read()
image = coder.decode_png(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_bboxes(name):
'''Create a list with the coordinates of the grasping rectangles. Every
element is either x or y of a vertex.'''
with open(name, 'r') as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def _int64_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def _floats_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def _bytes_feature(v):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[v]))
def _convert_to_example(filename, bboxes, image_buffer, height, width):
# Build an Example proto for an example
example = tf.train.Example(features=tf.train.Features(feature={
'image/filename': _bytes_feature(filename),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'bboxes': _floats_feature(bboxes)}))
return example
def main():
gd = GraspDataset()
if FLAGS.grasp_download:
gd.download(dataset=FLAGS.grasp_dataset)
train_file = os.path.join(FLAGS.data_dir, 'train-cgd')
validation_file = os.path.join(FLAGS.data_dir, 'validation-cgd')
print(train_file)
print(validation_file)
writer_train = tf.python_io.TFRecordWriter(train_file)
writer_validation = tf.python_io.TFRecordWriter(validation_file)
# Creating a list with all the image paths
folders = range(1,11)
folders = ['0'+str(i) if i<10 | download | identifier_name |
build_cgd_dataset.py | import median_filter
# progress bars https://github.com/tqdm/tqdm
# import tqdm without enforcing it as a dependency
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.keras.utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import _hash_file
import keras
from keras import backend as K
flags.DEFINE_string('data_dir',
os.path.join(os.path.expanduser("~"),
'.keras', 'datasets', 'cornell_grasping'),
"""Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.""")
flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified')
flags.DEFINE_boolean('grasp_download', True,
"""Download the grasp_dataset to data_dir if it is not already present.""")
FLAGS = flags.FLAGS
def mkdir_p(path):
"""Create the specified path on the filesystem like the `mkdir -p` command
Creates one or more filesystem directory levels as needed,
and does not return an error if the directory already exists.
"""
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_sequence(arg):
"""Returns true if arg is a list or another Python Sequence, and false otherwise.
source: https://stackoverflow.com/a/17148334/99379
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
class GraspDataset(object):
"""Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
# Arguments
data_dir: Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.
`~/.keras/datasets/grasping` by default.
dataset: 'all' to load all the data.
download: True to actually download the dataset, also see FLAGS.
"""
def __init__(self, data_dir=None, dataset=None, download=None, verbose=0):
if data_dir is None:
data_dir = FLAGS.data_dir
self.data_dir = data_dir
if dataset is None:
dataset = FLAGS.grasp_dataset
self.dataset = dataset
if download is None:
download = FLAGS.grasp_download
if download:
self.download(data_dir, dataset)
self.verbose = verbose
def download(self, data_dir=None, dataset='all'):
'''Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
Includes grasp_listing.txt with all files in all datasets;
the feature csv files which specify the dataset size,
the features (data channels), and the number of grasps;
and the tfrecord files which actually contain all the data.
If `grasp_listing_hashed.txt` is present, an additional
hashing step will will be completed to verify dataset integrity.
`grasp_listing_hashed.txt` will be generated automatically when
downloading with `dataset='all'`.
# Arguments
dataset: The name of the dataset to download, downloads all by default
with the '' parameter, 102 will download the 102 feature dataset
found in grasp_listing.txt.
# Returns
list of paths to the downloaded files
'''
dataset = self._update_dataset_param(dataset)
if data_dir is None:
if self.data_dir is None:
data_dir = FLAGS.data_dir
else:
data_dir = self.data_dir
mkdir_p(data_dir)
print('Downloading datasets to: ', data_dir)
url_prefix = ''
# If a hashed version of the listing is available,
# download the dataset and verify hashes to prevent data corruption.
listing_hash = os.path.join(data_dir, 'grasp_listing_hash.txt')
if os.path.isfile(listing_hash):
files_and_hashes = np.genfromtxt(listing_hash, dtype='str', delimiter=' ')
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, file_hash=hash_str, extract=True)
for fpath, hash_str in tqdm(files_and_hashes)
if '_' + str(dataset) in fpath]
else:
# If a hashed version of the listing is not available,
# simply download the dataset normally.
listing_url = 'https://raw.githubusercontent.com/ahundt/robot-grasp-detection/master/grasp_listing.txt'
grasp_listing_path = get_file('grasp_listing.txt', listing_url, cache_subdir=data_dir)
grasp_files = np.genfromtxt(grasp_listing_path, dtype=str)
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, extract=True)
for fpath in tqdm(grasp_files)
if '_' + dataset in fpath]
# If all files are downloaded, generate a hashed listing.
if dataset is 'all' or dataset is '':
print('Hashing all dataset files to prevent corruption...')
hashes = []
for i, f in enumerate(tqdm(files)):
hashes.append(_hash_file(f))
file_hash_np = np.column_stack([grasp_files, hashes])
with open(listing_hash, 'wb') as hash_file:
np.savetxt(hash_file, file_hash_np, fmt='%s', delimiter=' ', header='file_path sha256')
print('Hashing complete, {} contains each url plus hash, and will be used to verify the '
'dataset during future calls to download().'.format(listing_hash))
return files
def _update_dataset_param(self, dataset):
"""Internal function to configure which subset of the datasets is being used.
Helps to choose a reasonable default action based on previous user parameters.
"""
if dataset is None and self.dataset is None:
return []
if dataset is 'all':
dataset = ''
if dataset is None and self.dataset is not None:
dataset = self.dataset
return dataset
class ImageCoder(object):
def __init__(self): | return self._sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
def _process_image(filename, coder):
# Decode the image
with open(filename) as f:
image_data = f.read()
image = coder.decode_png(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_bboxes(name):
'''Create a list with the coordinates of the grasping rectangles. Every
element is either x or y of a vertex.'''
with open(name, 'r') as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def _int64_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def _floats_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def _bytes_feature(v):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[v]))
def _convert_to_example(filename, bboxes, image_buffer, height, width):
# Build an Example proto for an example
example = tf.train.Example(features=tf.train.Features(feature={
'image/filename': _bytes_feature(filename),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'bboxes': _floats_feature(bboxes)}))
return example
def main():
gd = GraspDataset()
if FLAGS.grasp_download:
gd.download(dataset=FLAGS.grasp_dataset)
train_file = os.path.join(FLAGS.data_dir, 'train-cgd')
validation_file = os.path.join(FLAGS.data_dir, 'validation-cgd')
print(train_file)
print(validation_file)
writer_train = tf.python_io.TFRecordWriter(train_file)
writer_validation = tf.python_io.TFRecordWriter(validation_file)
# Creating a list with all the image paths
folders = range(1,11)
folders = ['0'+str(i) if i<10 else | self._sess = tf.Session()
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def decode_png(self, image_data): | random_line_split |
build_cgd_dataset.py | import median_filter
# progress bars https://github.com/tqdm/tqdm
# import tqdm without enforcing it as a dependency
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.keras.utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import _hash_file
import keras
from keras import backend as K
flags.DEFINE_string('data_dir',
os.path.join(os.path.expanduser("~"),
'.keras', 'datasets', 'cornell_grasping'),
"""Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.""")
flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified')
flags.DEFINE_boolean('grasp_download', True,
"""Download the grasp_dataset to data_dir if it is not already present.""")
FLAGS = flags.FLAGS
def mkdir_p(path):
"""Create the specified path on the filesystem like the `mkdir -p` command
Creates one or more filesystem directory levels as needed,
and does not return an error if the directory already exists.
"""
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_sequence(arg):
"""Returns true if arg is a list or another Python Sequence, and false otherwise.
source: https://stackoverflow.com/a/17148334/99379
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
class GraspDataset(object):
"""Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
# Arguments
data_dir: Path to dataset in TFRecord format
(aka Example protobufs) and feature csv files.
`~/.keras/datasets/grasping` by default.
dataset: 'all' to load all the data.
download: True to actually download the dataset, also see FLAGS.
"""
def __init__(self, data_dir=None, dataset=None, download=None, verbose=0):
if data_dir is None:
data_dir = FLAGS.data_dir
self.data_dir = data_dir
if dataset is None:
dataset = FLAGS.grasp_dataset
self.dataset = dataset
if download is None:
download = FLAGS.grasp_download
if download:
self.download(data_dir, dataset)
self.verbose = verbose
def download(self, data_dir=None, dataset='all'):
'''Cornell Grasping Dataset - about 5GB total size
http:pr.cs.cornell.edu/grasping/rect_data/data.php
Downloads to `~/.keras/datasets/cornell_grasping` by default.
Includes grasp_listing.txt with all files in all datasets;
the feature csv files which specify the dataset size,
the features (data channels), and the number of grasps;
and the tfrecord files which actually contain all the data.
If `grasp_listing_hashed.txt` is present, an additional
hashing step will will be completed to verify dataset integrity.
`grasp_listing_hashed.txt` will be generated automatically when
downloading with `dataset='all'`.
# Arguments
dataset: The name of the dataset to download, downloads all by default
with the '' parameter, 102 will download the 102 feature dataset
found in grasp_listing.txt.
# Returns
list of paths to the downloaded files
'''
dataset = self._update_dataset_param(dataset)
if data_dir is None:
if self.data_dir is None:
data_dir = FLAGS.data_dir
else:
data_dir = self.data_dir
mkdir_p(data_dir)
print('Downloading datasets to: ', data_dir)
url_prefix = ''
# If a hashed version of the listing is available,
# download the dataset and verify hashes to prevent data corruption.
listing_hash = os.path.join(data_dir, 'grasp_listing_hash.txt')
if os.path.isfile(listing_hash):
files_and_hashes = np.genfromtxt(listing_hash, dtype='str', delimiter=' ')
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, file_hash=hash_str, extract=True)
for fpath, hash_str in tqdm(files_and_hashes)
if '_' + str(dataset) in fpath]
else:
# If a hashed version of the listing is not available,
# simply download the dataset normally.
listing_url = 'https://raw.githubusercontent.com/ahundt/robot-grasp-detection/master/grasp_listing.txt'
grasp_listing_path = get_file('grasp_listing.txt', listing_url, cache_subdir=data_dir)
grasp_files = np.genfromtxt(grasp_listing_path, dtype=str)
files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, extract=True)
for fpath in tqdm(grasp_files)
if '_' + dataset in fpath]
# If all files are downloaded, generate a hashed listing.
if dataset is 'all' or dataset is '':
print('Hashing all dataset files to prevent corruption...')
hashes = []
for i, f in enumerate(tqdm(files)):
|
file_hash_np = np.column_stack([grasp_files, hashes])
with open(listing_hash, 'wb') as hash_file:
np.savetxt(hash_file, file_hash_np, fmt='%s', delimiter=' ', header='file_path sha256')
print('Hashing complete, {} contains each url plus hash, and will be used to verify the '
'dataset during future calls to download().'.format(listing_hash))
return files
def _update_dataset_param(self, dataset):
"""Internal function to configure which subset of the datasets is being used.
Helps to choose a reasonable default action based on previous user parameters.
"""
if dataset is None and self.dataset is None:
return []
if dataset is 'all':
dataset = ''
if dataset is None and self.dataset is not None:
dataset = self.dataset
return dataset
class ImageCoder(object):
def __init__(self):
self._sess = tf.Session()
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def decode_png(self, image_data):
return self._sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
def _process_image(filename, coder):
# Decode the image
with open(filename) as f:
image_data = f.read()
image = coder.decode_png(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_bboxes(name):
'''Create a list with the coordinates of the grasping rectangles. Every
element is either x or y of a vertex.'''
with open(name, 'r') as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def _int64_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def _floats_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def _bytes_feature(v):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[v]))
def _convert_to_example(filename, bboxes, image_buffer, height, width):
# Build an Example proto for an example
example = tf.train.Example(features=tf.train.Features(feature={
'image/filename': _bytes_feature(filename),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'bboxes': _floats_feature(bboxes)}))
return example
def main():
gd = GraspDataset()
if FLAGS.grasp_download:
gd.download(dataset=FLAGS.grasp_dataset)
train_file = os.path.join(FLAGS.data_dir, 'train-cgd')
validation_file = os.path.join(FLAGS.data_dir, 'validation-cgd')
print(train_file)
print(validation_file)
writer_train = tf.python_io.TFRecordWriter(train_file)
writer_validation = tf.python_io.TFRecordWriter(validation_file)
# Creating a list with all the image paths
folders = range(1,11)
folders = ['0'+str(i) if i<1 | hashes.append(_hash_file(f)) | conditional_block |
AdobeFontLabUtils.py | 32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.shiftKey:
notPressed = 0
return notPressed
def checkAltKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.optionKey:
notPressed = 0
return notPressed
def GetSharedDataPath():
sdPath = ""
for path in sys.path:
if not re.search(r"FDK/Tools", path):
continue
m = re.search(kSharedDataName, path)
if not m:
continue
sdPath = path[:m.end()]
if not sdPath:
print "Error. The path to ",kSharedDataName," is not in the sys.path list."
elif not os.path.exists(sdPath):
print "Error.", sdPath,"does not exist."
sdPath = ""
return sdPath
# fontDirPath is an absolute path to the font dir, supplied by FontLab
# fontPSName is used to get the top family directory from the font library DB file.
# so as to look back up the family tree for the GOASDB.
def GetGOADBPath(fontDirPath, fontPSName):
goadbPath = ""
dirPath = fontDirPath
trys = 3 # look first in the font's dir, then up to two levels up.
while trys:
goadbPath = os.path.join(dirPath, kGlyphOrderAndAliasDBName)
if (goadbPath and os.path.exists(goadbPath)):
break
dirPath = os.path.dirname(dirPath)
trys -= 1
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
# default to the global FDK GOADB.
goadbPath = ""
sharedDataDir = GetSharedDataPath()
if sharedDataDir:
goadbPath = os.path.join(sharedDataDir, kGlyphOrderAndAliasDBName )
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
print "Error. Could not find", kGlyphOrderAndAliasDBName,", even in FDK Shared Data Dir."
goadbPath = ""
return goadbPath
def SplitGOADBEntries(line):
global goadbIndex
entry = string.split(line)
if (len(entry) < 2) or (len(entry) > 3):
print "Error in GOADB: bad entry - too many or two few columns <" + line + ">"
entry = None
if len(entry) == 3:
if entry[2][0] != "u":
print "Error in GOADB: 3rd column must be a uni or u Unicode name <" + line + ">"
entry = None
if len(entry) == 2:
entry.append("")
# Add GOADB index value
if entry:
entry.append(goadbIndex)
goadbIndex = goadbIndex + 1
return entry
########################################################
# Misc utilities
########################################################
def RemoveComment(line):
try:
index = string.index(line, "#")
line = line[:index]
except:
pass
return line
#return list of lines with comments and blank lines removed.
def CleanLines(lineList):
lineList = map(lambda line: RemoveComment(line) , lineList)
lineList = filter(lambda line: string.strip(line), lineList)
return lineList
#split out lines from a stream of file data.
def SplitLines(data):
lineList = re.findall(r"([^\r\n]+)[\r\n]", data)
return lineList
def LoadGOADB(filePath):
""" Read a glyph alias file for makeOTF into a dict."""
global goadbIndex
finalNameDict = {}
productionNameDict = {}
goadbIndex = 0
gfile = open(filePath,"rb")
data = gfile.read()
gfile.close()
glyphEntryList = SplitLines(data)
glyphEntryList = CleanLines(glyphEntryList)
glyphEntryList = map(SplitGOADBEntries, glyphEntryList)
glyphEntryList = filter(lambda entry: entry, glyphEntryList) # drop out any entry == None
for entry in glyphEntryList:
finalNameDict[entry[0]] = [ entry[1], entry[2], entry[3] ]
if productionNameDict.has_key(entry[1]):
print "Error in GOADB: more than one final name for a production name!"
print "\tfinal name 1:", productionNameDict[entry[1]], "Final name 2:", entry[0], "Production name:", entry[1]
print "\tUsing Final name 2."
productionNameDict[entry[1]] = [ entry[0], entry[2], entry[3] ]
return finalNameDict, productionNameDict
kDefaultReportExtension = "log"
kDefaultLogSubdirectory = "logs"
kDefaultVersionDigits = 3
kWriteBoth = 3
kWriteStdOut = 1
kWriteFile = 2
class Reporter:
""" Logging class to let me echo output to both/either screen and a log file.
Makes log files with same base name as font file, and special extension.
Default extension is supplied, can be overridden.
Trys to put log file in subdirectory under font file home directory."""
def __init__(self, fileOrPath, extension = kDefaultReportExtension):
self.file = None
self.fileName = None
self.state = kWriteBoth
if type(fileOrPath) == type(" "):
# try to find or make log directory for report file.
dir,name = os.path.split(fileOrPath)
logDir = os.path.join(dir, kDefaultLogSubdirectory)
if not os.path.exists(logDir):
try:
os.mkdir(logDir)
except IOError:
print "Failed to make log file subdir:", logDir
return
if os.path.exists(logDir):
fileOrPath = os.path.join(logDir, name)
basePath, fileExt = os.path.splitext(fileOrPath)
self.fileName = self.makeSafeReportName(basePath, extension)
try:
self.file = open(self.fileName, "wt")
except IOError:
print "Failed to open file", self.fileName
return
else:
self.fileName = None
self.file = fileOrPath
return
def makeSafeReportName(self, baseFilePath, extension):
global kDefaultVersionDigits
""" make a report file name with a number 1 greater than any
existing report file name with the same extension. We know the
baseFilePath exists, as it comes from an open font file. We will
not worry about 32 char name limits -> Mac OS X and Windows 2000
only.
"""
n = 1
dir, file = os.path.split(baseFilePath)
numPattern = re.compile(file + "." + extension + r"v0*(\d+)$")
fileList = os.listdir(dir)
for file in fileList:
match = numPattern.match(file)
if match:
num = match.group(1)
num = eval(num)
if num >= n:
n = num + 1
if n > (10**kDefaultVersionDigits - 1):
kDefaultVersionDigits = kDefaultVersionDigits +1
filePath = baseFilePath + "." + extension + "v" + str(n).zfill(kDefaultVersionDigits)
return filePath
def write(*args):
self = args[0]
text = []
for arg in args[1:]:
try:
text.append(str(arg))
except:
text.append(repr(arg))
text = " ".join(text)
if (self.state == kWriteBoth):
print text
if (self.file != sys.stdout):
self.file.write(text + os.linesep)
elif (self.state == kWriteFile):
self.file.write(text + os.linesep)
elif (self.state == kWriteStdOut):
print text
def set_state(self, state):
self.state = state
def close(self):
if self.file and (self.file != sys.stdout):
self.file.close()
if self.fileName:
print "Log saved to ", self.fileName
def read(*args): # added to make this class look more like a file.
pass
def | getLatestReport | identifier_name |
|
AdobeFontLabUtils.py | /Tools/osx" % (home)
os.environ["PATH"] = paths + fdkPath
if os.name == "nt":
p = os.popen("for %%i in (%s) do @echo. %%~$PATH:i" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
else:
p = os.popen("which %s" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
if not toolPath:
print """
The script cannot run the command-line program '%s'. Please make sure the AFDKO is installed, and the system environment variable PATH
contains the path the to FDK sub-directory containing '%s'.""" % (toolName, toolName)
return toolPath # get reid of new-line
def checkControlKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_CONTROL)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.controlKey:
notPressed = 0
return notPressed
def checkShiftKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.shiftKey:
notPressed = 0
return notPressed
def checkAltKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.optionKey:
notPressed = 0
return notPressed
def GetSharedDataPath():
sdPath = ""
for path in sys.path:
if not re.search(r"FDK/Tools", path):
continue
m = re.search(kSharedDataName, path)
if not m:
continue
sdPath = path[:m.end()]
if not sdPath:
print "Error. The path to ",kSharedDataName," is not in the sys.path list."
elif not os.path.exists(sdPath):
print "Error.", sdPath,"does not exist."
sdPath = ""
return sdPath
# fontDirPath is an absolute path to the font dir, supplied by FontLab
# fontPSName is used to get the top family directory from the font library DB file.
# so as to look back up the family tree for the GOASDB.
def GetGOADBPath(fontDirPath, fontPSName):
goadbPath = ""
dirPath = fontDirPath
trys = 3 # look first in the font's dir, then up to two levels up.
while trys:
goadbPath = os.path.join(dirPath, kGlyphOrderAndAliasDBName)
if (goadbPath and os.path.exists(goadbPath)):
break
dirPath = os.path.dirname(dirPath)
trys -= 1
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
# default to the global FDK GOADB.
goadbPath = ""
sharedDataDir = GetSharedDataPath()
if sharedDataDir:
goadbPath = os.path.join(sharedDataDir, kGlyphOrderAndAliasDBName )
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
print "Error. Could not find", kGlyphOrderAndAliasDBName,", even in FDK Shared Data Dir."
goadbPath = ""
return goadbPath
def SplitGOADBEntries(line):
global goadbIndex
entry = string.split(line)
if (len(entry) < 2) or (len(entry) > 3):
print "Error in GOADB: bad entry - too many or two few columns <" + line + ">"
entry = None
if len(entry) == 3:
if entry[2][0] != "u":
print "Error in GOADB: 3rd column must be a uni or u Unicode name <" + line + ">"
entry = None
if len(entry) == 2:
entry.append("")
# Add GOADB index value
if entry:
entry.append(goadbIndex)
goadbIndex = goadbIndex + 1
return entry
########################################################
# Misc utilities
########################################################
def RemoveComment(line):
try:
index = string.index(line, "#")
line = line[:index]
except:
pass
return line
#return list of lines with comments and blank lines removed.
def CleanLines(lineList):
lineList = map(lambda line: RemoveComment(line) , lineList)
lineList = filter(lambda line: string.strip(line), lineList)
return lineList
#split out lines from a stream of file data.
def SplitLines(data):
|
def LoadGOADB(filePath):
""" Read a glyph alias file for makeOTF into a dict."""
global goadbIndex
finalNameDict = {}
productionNameDict = {}
goadbIndex = 0
gfile = open(filePath,"rb")
data = gfile.read()
gfile.close()
glyphEntryList = SplitLines(data)
glyphEntryList = CleanLines(glyphEntryList)
glyphEntryList = map(SplitGOADBEntries, glyphEntryList)
glyphEntryList = filter(lambda entry: entry, glyphEntryList) # drop out any entry == None
for entry in glyphEntryList:
finalNameDict[entry[0]] = [ entry[1], entry[2], entry[3] ]
if productionNameDict.has_key(entry[1]):
print "Error in GOADB: more than one final name for a production name!"
print "\tfinal name 1:", productionNameDict[entry[1]], "Final name 2:", entry[0], "Production name:", entry[1]
print "\tUsing Final name 2."
productionNameDict[entry[1]] = [ entry[0], entry[2], entry[3] ]
return finalNameDict, productionNameDict
kDefaultReportExtension = "log"
kDefaultLogSubdirectory = "logs"
kDefaultVersionDigits = 3
kWriteBoth = 3
kWriteStdOut = 1
kWriteFile = 2
class Reporter:
""" Logging class to let me echo output to both/either screen and a log file.
Makes log files with same base name as font file, and special extension.
Default extension is supplied, can be overridden.
Trys to put log file in subdirectory under font file home directory."""
def __init__(self, fileOrPath, extension = kDefaultReportExtension):
self.file = None
self.fileName = None
self.state = kWriteBoth
if type(fileOrPath) == type(" "):
# try to find or make log directory for report file.
dir,name = os.path.split(fileOrPath)
logDir = os.path.join(dir, kDefaultLogSubdirectory)
if not os.path.exists(logDir):
try:
os.mkdir(logDir)
except IOError:
print "Failed to make log file subdir:", logDir
return
if os.path.exists(logDir):
fileOrPath = os.path.join(logDir, name)
basePath, fileExt = os.path.splitext(fileOrPath)
self.fileName = self.makeSafeReportName(basePath, extension)
try:
self.file = | lineList = re.findall(r"([^\r\n]+)[\r\n]", data)
return lineList | identifier_body |
AdobeFontLabUtils.py | installed, and the system environment variable PATH
contains the path the to FDK sub-directory containing '%s'.""" % (toolName, toolName)
return toolPath # get reid of new-line
def checkControlKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_CONTROL)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.controlKey:
notPressed = 0
return notPressed
def checkShiftKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.shiftKey:
notPressed = 0
return notPressed
def checkAltKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.optionKey:
notPressed = 0
return notPressed
def GetSharedDataPath():
sdPath = ""
for path in sys.path:
if not re.search(r"FDK/Tools", path):
continue
m = re.search(kSharedDataName, path)
if not m:
continue
sdPath = path[:m.end()]
if not sdPath:
print "Error. The path to ",kSharedDataName," is not in the sys.path list."
elif not os.path.exists(sdPath):
print "Error.", sdPath,"does not exist."
sdPath = ""
return sdPath
# fontDirPath is an absolute path to the font dir, supplied by FontLab
# fontPSName is used to get the top family directory from the font library DB file.
# so as to look back up the family tree for the GOASDB.
def GetGOADBPath(fontDirPath, fontPSName):
goadbPath = ""
dirPath = fontDirPath
trys = 3 # look first in the font's dir, then up to two levels up.
while trys:
goadbPath = os.path.join(dirPath, kGlyphOrderAndAliasDBName)
if (goadbPath and os.path.exists(goadbPath)):
break
dirPath = os.path.dirname(dirPath)
trys -= 1
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
# default to the global FDK GOADB.
goadbPath = ""
sharedDataDir = GetSharedDataPath()
if sharedDataDir:
goadbPath = os.path.join(sharedDataDir, kGlyphOrderAndAliasDBName )
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
print "Error. Could not find", kGlyphOrderAndAliasDBName,", even in FDK Shared Data Dir."
goadbPath = ""
return goadbPath
def SplitGOADBEntries(line):
global goadbIndex
entry = string.split(line)
if (len(entry) < 2) or (len(entry) > 3):
print "Error in GOADB: bad entry - too many or two few columns <" + line + ">"
entry = None
if len(entry) == 3:
if entry[2][0] != "u":
print "Error in GOADB: 3rd column must be a uni or u Unicode name <" + line + ">"
entry = None
if len(entry) == 2:
entry.append("")
# Add GOADB index value
if entry:
entry.append(goadbIndex)
goadbIndex = goadbIndex + 1
return entry
########################################################
# Misc utilities
########################################################
def RemoveComment(line):
try:
index = string.index(line, "#")
line = line[:index]
except:
pass
return line
#return list of lines with comments and blank lines removed.
def CleanLines(lineList):
lineList = map(lambda line: RemoveComment(line) , lineList)
lineList = filter(lambda line: string.strip(line), lineList)
return lineList
#split out lines from a stream of file data.
def SplitLines(data):
lineList = re.findall(r"([^\r\n]+)[\r\n]", data)
return lineList
def LoadGOADB(filePath):
""" Read a glyph alias file for makeOTF into a dict."""
global goadbIndex
finalNameDict = {}
productionNameDict = {}
goadbIndex = 0
gfile = open(filePath,"rb")
data = gfile.read()
gfile.close()
glyphEntryList = SplitLines(data)
glyphEntryList = CleanLines(glyphEntryList)
glyphEntryList = map(SplitGOADBEntries, glyphEntryList)
glyphEntryList = filter(lambda entry: entry, glyphEntryList) # drop out any entry == None
for entry in glyphEntryList:
finalNameDict[entry[0]] = [ entry[1], entry[2], entry[3] ]
if productionNameDict.has_key(entry[1]):
print "Error in GOADB: more than one final name for a production name!"
print "\tfinal name 1:", productionNameDict[entry[1]], "Final name 2:", entry[0], "Production name:", entry[1]
print "\tUsing Final name 2."
productionNameDict[entry[1]] = [ entry[0], entry[2], entry[3] ]
return finalNameDict, productionNameDict
kDefaultReportExtension = "log"
kDefaultLogSubdirectory = "logs"
kDefaultVersionDigits = 3
kWriteBoth = 3
kWriteStdOut = 1
kWriteFile = 2
class Reporter:
""" Logging class to let me echo output to both/either screen and a log file.
Makes log files with same base name as font file, and special extension.
Default extension is supplied, can be overridden.
Trys to put log file in subdirectory under font file home directory."""
def __init__(self, fileOrPath, extension = kDefaultReportExtension):
self.file = None
self.fileName = None
self.state = kWriteBoth
if type(fileOrPath) == type(" "):
# try to find or make log directory for report file.
dir,name = os.path.split(fileOrPath)
logDir = os.path.join(dir, kDefaultLogSubdirectory)
if not os.path.exists(logDir):
try:
os.mkdir(logDir)
except IOError:
print "Failed to make log file subdir:", logDir
return
if os.path.exists(logDir):
fileOrPath = os.path.join(logDir, name)
basePath, fileExt = os.path.splitext(fileOrPath)
self.fileName = self.makeSafeReportName(basePath, extension)
try:
self.file = open(self.fileName, "wt")
except IOError:
print "Failed to open file", self.fileName
return
else:
self.fileName = None
self.file = fileOrPath
return
def makeSafeReportName(self, baseFilePath, extension):
global kDefaultVersionDigits
""" make a report file name with a number 1 greater than any
existing report file name with the same extension. We know the
baseFilePath exists, as it comes from an open font file. We will
not worry about 32 char name limits -> Mac OS X and Windows 2000
only.
"""
n = 1 | dir, file = os.path.split(baseFilePath) | random_line_split |
|
AdobeFontLabUtils.py | /Tools/osx" % (home)
os.environ["PATH"] = paths + fdkPath
if os.name == "nt":
p = os.popen("for %%i in (%s) do @echo. %%~$PATH:i" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
else:
p = os.popen("which %s" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
if not toolPath:
print """
The script cannot run the command-line program '%s'. Please make sure the AFDKO is installed, and the system environment variable PATH
contains the path the to FDK sub-directory containing '%s'.""" % (toolName, toolName)
return toolPath # get reid of new-line
def checkControlKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_CONTROL)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.controlKey:
notPressed = 0
return notPressed
def checkShiftKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.shiftKey:
notPressed = 0
return notPressed
def checkAltKeyPress():
notPressed = 1
if os.name == "nt":
try:
import win32api
import win32con
keyState = win32api.GetAsyncKeyState(win32con.VK_SHIFT)
if keyState < 0:
notPressed = 0
except ImportError:
print "Note: to be able to set options for this script, you must install"
print "win32all Python module from Mark Hammond. This can be found at:"
print " http://www.python.net/crew/mhammond/win32/Downloads.html"
print "or http://sourceforge.net/, and search for 'Python Windows Extensions."
else:
import Carbon.Evt
import Carbon.Events
modifiers = Carbon.Evt.GetCurrentKeyModifiers()
if modifiers & Carbon.Events.optionKey:
notPressed = 0
return notPressed
def GetSharedDataPath():
sdPath = ""
for path in sys.path:
if not re.search(r"FDK/Tools", path):
continue
m = re.search(kSharedDataName, path)
if not m:
continue
sdPath = path[:m.end()]
if not sdPath:
print "Error. The path to ",kSharedDataName," is not in the sys.path list."
elif not os.path.exists(sdPath):
print "Error.", sdPath,"does not exist."
sdPath = ""
return sdPath
# fontDirPath is an absolute path to the font dir, supplied by FontLab
# fontPSName is used to get the top family directory from the font library DB file.
# so as to look back up the family tree for the GOASDB.
def GetGOADBPath(fontDirPath, fontPSName):
goadbPath = ""
dirPath = fontDirPath
trys = 3 # look first in the font's dir, then up to two levels up.
while trys:
goadbPath = os.path.join(dirPath, kGlyphOrderAndAliasDBName)
if (goadbPath and os.path.exists(goadbPath)):
break
dirPath = os.path.dirname(dirPath)
trys -= 1
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
# default to the global FDK GOADB.
goadbPath = ""
sharedDataDir = GetSharedDataPath()
if sharedDataDir:
goadbPath = os.path.join(sharedDataDir, kGlyphOrderAndAliasDBName )
if (goadbPath and os.path.exists(goadbPath)):
return goadbPath
print "Error. Could not find", kGlyphOrderAndAliasDBName,", even in FDK Shared Data Dir."
goadbPath = ""
return goadbPath
def SplitGOADBEntries(line):
global goadbIndex
entry = string.split(line)
if (len(entry) < 2) or (len(entry) > 3):
print "Error in GOADB: bad entry - too many or two few columns <" + line + ">"
entry = None
if len(entry) == 3:
if entry[2][0] != "u":
print "Error in GOADB: 3rd column must be a uni or u Unicode name <" + line + ">"
entry = None
if len(entry) == 2:
|
# Add GOADB index value
if entry:
entry.append(goadbIndex)
goadbIndex = goadbIndex + 1
return entry
########################################################
# Misc utilities
########################################################
def RemoveComment(line):
try:
index = string.index(line, "#")
line = line[:index]
except:
pass
return line
#return list of lines with comments and blank lines removed.
def CleanLines(lineList):
lineList = map(lambda line: RemoveComment(line) , lineList)
lineList = filter(lambda line: string.strip(line), lineList)
return lineList
#split out lines from a stream of file data.
def SplitLines(data):
lineList = re.findall(r"([^\r\n]+)[\r\n]", data)
return lineList
def LoadGOADB(filePath):
""" Read a glyph alias file for makeOTF into a dict."""
global goadbIndex
finalNameDict = {}
productionNameDict = {}
goadbIndex = 0
gfile = open(filePath,"rb")
data = gfile.read()
gfile.close()
glyphEntryList = SplitLines(data)
glyphEntryList = CleanLines(glyphEntryList)
glyphEntryList = map(SplitGOADBEntries, glyphEntryList)
glyphEntryList = filter(lambda entry: entry, glyphEntryList) # drop out any entry == None
for entry in glyphEntryList:
finalNameDict[entry[0]] = [ entry[1], entry[2], entry[3] ]
if productionNameDict.has_key(entry[1]):
print "Error in GOADB: more than one final name for a production name!"
print "\tfinal name 1:", productionNameDict[entry[1]], "Final name 2:", entry[0], "Production name:", entry[1]
print "\tUsing Final name 2."
productionNameDict[entry[1]] = [ entry[0], entry[2], entry[3] ]
return finalNameDict, productionNameDict
kDefaultReportExtension = "log"
kDefaultLogSubdirectory = "logs"
kDefaultVersionDigits = 3
kWriteBoth = 3
kWriteStdOut = 1
kWriteFile = 2
class Reporter:
""" Logging class to let me echo output to both/either screen and a log file.
Makes log files with same base name as font file, and special extension.
Default extension is supplied, can be overridden.
Trys to put log file in subdirectory under font file home directory."""
def __init__(self, fileOrPath, extension = kDefaultReportExtension):
self.file = None
self.fileName = None
self.state = kWriteBoth
if type(fileOrPath) == type(" "):
# try to find or make log directory for report file.
dir,name = os.path.split(fileOrPath)
logDir = os.path.join(dir, kDefaultLogSubdirectory)
if not os.path.exists(logDir):
try:
os.mkdir(logDir)
except IOError:
print "Failed to make log file subdir:", logDir
return
if os.path.exists(logDir):
fileOrPath = os.path.join(logDir, name)
basePath, fileExt = os.path.splitext(fileOrPath)
self.fileName = self.makeSafeReportName(basePath, extension)
try:
self.file = open | entry.append("") | conditional_block |
gap_stats.py | _desired
if __name__ == "__main__":
usage = """
___________
Description:
Command line utility for analyzing gaps in a fasta file. One file can be analyzed, or up to 3 can be compared.
Use this tool to compare a genome assembly pre and post gap filling with tools such as PBJelly.
_____
Usage:
python gap_stats.py [options] <sequence1.fasta> <sequence2.fasta> <sequence3.fasta>
OPTIONS:
-m Save a matplotlib gap length histogram in current working directory.
* Requires matplotlib to be installed *
-p Write a plain text file of all gap lengths in current working directory for
use as input into other statistical analysis software.
-b Make a gap bed file for each input fasta.
-h Print help message.
"""
def parse_args(args_list):
"""
Given all command line arguments, make a dictionary containing all
of the flags, and all of the fasta files.
If the command line arguments either request help or raises an error,
that will be done here. If this function returns, it can be assumed that
the command line statement is ready for further analysis.
:param args_list: List of command line arguments (sys.argv)
:return: Dictionary specifying all flags and all fasta files.
"""
# If no arguments specified, print usage statement with no error.
if len(args_list) == 1:
sys.exit(usage)
# Make all flags upper case to avoid case sensitivity.
flags = [i.upper() for i in args_list if i.startswith('-')]
# See if help is desired. If so, print usage with no error.
if help_desired(flags):
sys.exit(usage)
# Retrieve fasta files. At least one, up to 3 is needed.
fastas = [
i for i in args_list if
i.endswith('.fasta') or
i.endswith('.fa') or
i.endswith('.fan') or
i.endswith('.fas')
]
# Make sure that at least one fasta file was found.
if not fastas:
print usage
raise ValueError('No fasta files found.')
# Make sure that no more than 3 fasta files have been selected.
if len(fastas) > 3:
print usage
raise ValueError(
'A maximum of 3 fasta files can be compared at once. You entered %r fasta files.' % len(fastas)
)
return {
'flags': flags,
'fastas': fastas
}
def write_gap_stats(info):
"""
Use info obtained in get_gap_info to write a summary stats csv file.
:param info: Dictionary where
key = fasta file
value = ordered dictionary containing all gap info from get_gap_info
"""
with open('gap_stats.txt', 'w') as out_file:
# Get each category from each fasta file. One row for each.
all_percent_N = [str(100*(info[i]['total_N']/info[i]['total_nucleotides'])) for i in info.keys()]
all_total_gaps = [str(info[i]['total_gaps']) for i in info.keys()]
all_total_gaps_over_100 = [str(info[i]['total_gaps_over_100']) for i in info.keys()]
all_longest_gap = [str(max(info[i]['all_gap_lengths'])) for i in info.keys()]
all_medians = [str(calculate_median(info[i]['all_gap_lengths'])) for i in info.keys()]
files = [ntpath.basename(f) for f in info.keys()]
# Write rows out to csv file.
# First, write out the header (gap metrics).
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % ('file_name', '%N', 'Total Gaps', 'Total Gaps Longer Than 100bp', 'Longest Gap', 'Median Gap Length'))
# Write results for each file.
for i in range(len(files)):
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (files[i], all_percent_N[i], all_total_gaps[i], all_total_gaps_over_100[i], all_longest_gap[i], all_medians[i]))
def write_bed_file(bed_dict, out_file_name):
"""
From a dictionary storing bed file info, write output file in bed format.
:param bed_dict: Dict where keys = fasta headers, and values = coordinates of each gap in that sequence.
:param out_file_name: Name for output bed file.
"""
with open(os.getcwd() + '/' + ntpath.basename(out_file_name), 'w') as out_file:
for header in bed_dict.keys():
for coordinates in bed_dict[header]:
out_file.write(
'%s\t%r\t%r\n' %(header[1:], coordinates[0], coordinates[1])
)
def write_hist_img_file(lengths, labels):
| plt.legend()
plt.title('Gap Length Histogram')
plt.xlabel('Gap Length (b)')
plt.ylabel('Frequency')
plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')
def write_hist_text_file(lengths, labels):
"""
Write a plain text file to current working directory.
1 ordered column of all histogram lengths.
This is for input into statistical analysis software such as R.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
for lengths_list, label in zip(lengths, labels):
hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt'
with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:
out_file.write(ntpath.basename(label) + '\n')
for length in sorted(lengths_list):
out_file.write(str(length) + '\n')
def get_gap_info(in_file):
"""
Given a fasta file, find out some information regarding its global gap content.
:param in_file: Fasta or multi-fasta with sequences for gap analysis.
:return: dictionary with total_N, total_nucleotides, total_gaps and all_gap_lengths
"""
# Initialize values to be computed.
total_N = 0
total_nucleotides = 0
total_gaps = 0
total_gaps_over_100 = 0
all_gap_lengths = []
# Use a dictionary to store bed coordinates.
# key = fasta header
# Value = list of tuples corresponding to genomic coordinates.
bed_gaps = collections.OrderedDict()
# Iterate through each sequence in the fasta,
# and get gap info from each.
sequences = SeqReader(in_file)
for header, sequence in sequences.parse_fasta():
gap_sequence = GapSequence(sequence)
# Get total number of 'N' characters for this sequence.
total_N += gap_sequence.count_Ns()
# Get total number of nucleotides for this sequence.
total_nucleotides += len(sequence)
for gap in gap_sequence.get_gaps():
# Increment total number of gaps
total_gaps += 1
if len(gap) > 100:
total_gaps_over_100 += 1
# Save this gap length to master list.
all_gap_lengths.append(len(gap))
# Now fill in bed file data structure.
all_coordinates = [(m.start(0), m.end(0)) for m in gap_sequence.get_gap_coords()]
if all_coordinates:
bed_gaps[header] = all_coordinates
return {
'total_N': total_N,
'total_nucleotides': total_nucleotides,
'total_gaps': total_gaps,
'total_gaps_over_100': total_gaps_over_100,
'all_gap_lengths': all_gap_lengths,
'bed_gaps': bed_gaps
}
# Parse the command line arguments.
arg_dict = parse_args(sys.argv)
# Get gap info for each fasta.
all_files_info = collections.OrderedDict()
for fasta in arg_dict['fastas']:
log(' ---- Analyzing gaps for %s' % fasta)
all_files_info[fasta] = get_gap_info(fasta)
# Write csv file with basic gap stats.
write_gap_stats(all_files_info)
# Check if bed file is desired.
# Save to current working directory if so.
if '-B' in arg_dict['flags']:
log(' ---- Writing bed file(s).')
for f in all_files_info.keys():
file | """
Save a matplotlib length histogram image to current working directory.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
import matplotlib.pyplot as plt
# Find the max and min values for plotting.
max_length = max(max(i) for i in lengths)
min_length = min(min(i) for i in lengths)
bin_size = int(0.025*max_length)
# Make histogram
colors = ['r', 'g', 'b']
plt.hist(
lengths,
bins=range(min_length, max_length+bin_size, bin_size),
color=colors[:len(lengths)],
label=[ntpath.basename(l) for l in labels]
) | identifier_body |
gap_stats.py | ired
if __name__ == "__main__":
usage = """
___________
Description:
Command line utility for analyzing gaps in a fasta file. One file can be analyzed, or up to 3 can be compared.
Use this tool to compare a genome assembly pre and post gap filling with tools such as PBJelly.
_____
Usage:
python gap_stats.py [options] <sequence1.fasta> <sequence2.fasta> <sequence3.fasta>
OPTIONS:
-m Save a matplotlib gap length histogram in current working directory.
* Requires matplotlib to be installed *
-p Write a plain text file of all gap lengths in current working directory for
use as input into other statistical analysis software.
-b Make a gap bed file for each input fasta.
-h Print help message.
"""
def parse_args(args_list):
"""
Given all command line arguments, make a dictionary containing all
of the flags, and all of the fasta files.
If the command line arguments either request help or raises an error,
that will be done here. If this function returns, it can be assumed that
the command line statement is ready for further analysis.
:param args_list: List of command line arguments (sys.argv)
:return: Dictionary specifying all flags and all fasta files.
"""
# If no arguments specified, print usage statement with no error.
if len(args_list) == 1:
sys.exit(usage)
# Make all flags upper case to avoid case sensitivity.
flags = [i.upper() for i in args_list if i.startswith('-')]
# See if help is desired. If so, print usage with no error.
if help_desired(flags):
sys.exit(usage)
# Retrieve fasta files. At least one, up to 3 is needed.
fastas = [
i for i in args_list if
i.endswith('.fasta') or
i.endswith('.fa') or
i.endswith('.fan') or
i.endswith('.fas')
]
# Make sure that at least one fasta file was found.
if not fastas:
print usage
raise ValueError('No fasta files found.')
# Make sure that no more than 3 fasta files have been selected.
if len(fastas) > 3:
print usage
raise ValueError(
'A maximum of 3 fasta files can be compared at once. You entered %r fasta files.' % len(fastas)
)
return {
'flags': flags,
'fastas': fastas
}
def write_gap_stats(info):
"""
Use info obtained in get_gap_info to write a summary stats csv file.
:param info: Dictionary where
key = fasta file
value = ordered dictionary containing all gap info from get_gap_info
"""
with open('gap_stats.txt', 'w') as out_file:
# Get each category from each fasta file. One row for each.
all_percent_N = [str(100*(info[i]['total_N']/info[i]['total_nucleotides'])) for i in info.keys()]
all_total_gaps = [str(info[i]['total_gaps']) for i in info.keys()]
all_total_gaps_over_100 = [str(info[i]['total_gaps_over_100']) for i in info.keys()]
all_longest_gap = [str(max(info[i]['all_gap_lengths'])) for i in info.keys()]
all_medians = [str(calculate_median(info[i]['all_gap_lengths'])) for i in info.keys()]
files = [ntpath.basename(f) for f in info.keys()]
# Write rows out to csv file.
# First, write out the header (gap metrics).
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % ('file_name', '%N', 'Total Gaps', 'Total Gaps Longer Than 100bp', 'Longest Gap', 'Median Gap Length'))
# Write results for each file.
for i in range(len(files)):
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (files[i], all_percent_N[i], all_total_gaps[i], all_total_gaps_over_100[i], all_longest_gap[i], all_medians[i]))
def write_bed_file(bed_dict, out_file_name):
"""
From a dictionary storing bed file info, write output file in bed format.
:param bed_dict: Dict where keys = fasta headers, and values = coordinates of each gap in that sequence.
:param out_file_name: Name for output bed file.
"""
with open(os.getcwd() + '/' + ntpath.basename(out_file_name), 'w') as out_file:
for header in bed_dict.keys():
|
def write_hist_img_file(lengths, labels):
"""
Save a matplotlib length histogram image to current working directory.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
import matplotlib.pyplot as plt
# Find the max and min values for plotting.
max_length = max(max(i) for i in lengths)
min_length = min(min(i) for i in lengths)
bin_size = int(0.025*max_length)
# Make histogram
colors = ['r', 'g', 'b']
plt.hist(
lengths,
bins=range(min_length, max_length+bin_size, bin_size),
color=colors[:len(lengths)],
label=[ntpath.basename(l) for l in labels]
)
plt.legend()
plt.title('Gap Length Histogram')
plt.xlabel('Gap Length (b)')
plt.ylabel('Frequency')
plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')
def write_hist_text_file(lengths, labels):
"""
Write a plain text file to current working directory.
1 ordered column of all histogram lengths.
This is for input into statistical analysis software such as R.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
for lengths_list, label in zip(lengths, labels):
hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt'
with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:
out_file.write(ntpath.basename(label) + '\n')
for length in sorted(lengths_list):
out_file.write(str(length) + '\n')
def get_gap_info(in_file):
"""
Given a fasta file, find out some information regarding its global gap content.
:param in_file: Fasta or multi-fasta with sequences for gap analysis.
:return: dictionary with total_N, total_nucleotides, total_gaps and all_gap_lengths
"""
# Initialize values to be computed.
total_N = 0
total_nucleotides = 0
total_gaps = 0
total_gaps_over_100 = 0
all_gap_lengths = []
# Use a dictionary to store bed coordinates.
# key = fasta header
# Value = list of tuples corresponding to genomic coordinates.
bed_gaps = collections.OrderedDict()
# Iterate through each sequence in the fasta,
# and get gap info from each.
sequences = SeqReader(in_file)
for header, sequence in sequences.parse_fasta():
gap_sequence = GapSequence(sequence)
# Get total number of 'N' characters for this sequence.
total_N += gap_sequence.count_Ns()
# Get total number of nucleotides for this sequence.
total_nucleotides += len(sequence)
for gap in gap_sequence.get_gaps():
# Increment total number of gaps
total_gaps += 1
if len(gap) > 100:
total_gaps_over_100 += 1
# Save this gap length to master list.
all_gap_lengths.append(len(gap))
# Now fill in bed file data structure.
all_coordinates = [(m.start(0), m.end(0)) for m in gap_sequence.get_gap_coords()]
if all_coordinates:
bed_gaps[header] = all_coordinates
return {
'total_N': total_N,
'total_nucleotides': total_nucleotides,
'total_gaps': total_gaps,
'total_gaps_over_100': total_gaps_over_100,
'all_gap_lengths': all_gap_lengths,
'bed_gaps': bed_gaps
}
# Parse the command line arguments.
arg_dict = parse_args(sys.argv)
# Get gap info for each fasta.
all_files_info = collections.OrderedDict()
for fasta in arg_dict['fastas']:
log(' ---- Analyzing gaps for %s' % fasta)
all_files_info[fasta] = get_gap_info(fasta)
# Write csv file with basic gap stats.
write_gap_stats(all_files_info)
# Check if bed file is desired.
# Save to current working directory if so.
if '-B' in arg_dict['flags']:
log(' ---- Writing bed file(s).')
for f in all_files_info.keys():
file | for coordinates in bed_dict[header]:
out_file.write(
'%s\t%r\t%r\n' %(header[1:], coordinates[0], coordinates[1])
) | conditional_block |
gap_stats.py | ired
if __name__ == "__main__":
usage = """
___________
Description:
Command line utility for analyzing gaps in a fasta file. One file can be analyzed, or up to 3 can be compared.
Use this tool to compare a genome assembly pre and post gap filling with tools such as PBJelly.
_____
Usage:
python gap_stats.py [options] <sequence1.fasta> <sequence2.fasta> <sequence3.fasta>
OPTIONS:
-m Save a matplotlib gap length histogram in current working directory.
* Requires matplotlib to be installed *
-p Write a plain text file of all gap lengths in current working directory for
use as input into other statistical analysis software.
-b Make a gap bed file for each input fasta.
-h Print help message.
"""
def parse_args(args_list):
"""
Given all command line arguments, make a dictionary containing all
of the flags, and all of the fasta files.
If the command line arguments either request help or raises an error,
that will be done here. If this function returns, it can be assumed that
the command line statement is ready for further analysis.
:param args_list: List of command line arguments (sys.argv)
:return: Dictionary specifying all flags and all fasta files.
"""
# If no arguments specified, print usage statement with no error.
if len(args_list) == 1:
sys.exit(usage)
# Make all flags upper case to avoid case sensitivity.
flags = [i.upper() for i in args_list if i.startswith('-')]
# See if help is desired. If so, print usage with no error.
if help_desired(flags):
sys.exit(usage)
# Retrieve fasta files. At least one, up to 3 is needed.
fastas = [
i for i in args_list if
i.endswith('.fasta') or
i.endswith('.fa') or
i.endswith('.fan') or
i.endswith('.fas')
]
# Make sure that at least one fasta file was found.
if not fastas:
print usage
raise ValueError('No fasta files found.')
# Make sure that no more than 3 fasta files have been selected.
if len(fastas) > 3:
print usage
raise ValueError(
'A maximum of 3 fasta files can be compared at once. You entered %r fasta files.' % len(fastas)
)
return {
'flags': flags,
'fastas': fastas
}
def | (info):
"""
Use info obtained in get_gap_info to write a summary stats csv file.
:param info: Dictionary where
key = fasta file
value = ordered dictionary containing all gap info from get_gap_info
"""
with open('gap_stats.txt', 'w') as out_file:
# Get each category from each fasta file. One row for each.
all_percent_N = [str(100*(info[i]['total_N']/info[i]['total_nucleotides'])) for i in info.keys()]
all_total_gaps = [str(info[i]['total_gaps']) for i in info.keys()]
all_total_gaps_over_100 = [str(info[i]['total_gaps_over_100']) for i in info.keys()]
all_longest_gap = [str(max(info[i]['all_gap_lengths'])) for i in info.keys()]
all_medians = [str(calculate_median(info[i]['all_gap_lengths'])) for i in info.keys()]
files = [ntpath.basename(f) for f in info.keys()]
# Write rows out to csv file.
# First, write out the header (gap metrics).
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % ('file_name', '%N', 'Total Gaps', 'Total Gaps Longer Than 100bp', 'Longest Gap', 'Median Gap Length'))
# Write results for each file.
for i in range(len(files)):
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (files[i], all_percent_N[i], all_total_gaps[i], all_total_gaps_over_100[i], all_longest_gap[i], all_medians[i]))
def write_bed_file(bed_dict, out_file_name):
"""
From a dictionary storing bed file info, write output file in bed format.
:param bed_dict: Dict where keys = fasta headers, and values = coordinates of each gap in that sequence.
:param out_file_name: Name for output bed file.
"""
with open(os.getcwd() + '/' + ntpath.basename(out_file_name), 'w') as out_file:
for header in bed_dict.keys():
for coordinates in bed_dict[header]:
out_file.write(
'%s\t%r\t%r\n' %(header[1:], coordinates[0], coordinates[1])
)
def write_hist_img_file(lengths, labels):
"""
Save a matplotlib length histogram image to current working directory.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
import matplotlib.pyplot as plt
# Find the max and min values for plotting.
max_length = max(max(i) for i in lengths)
min_length = min(min(i) for i in lengths)
bin_size = int(0.025*max_length)
# Make histogram
colors = ['r', 'g', 'b']
plt.hist(
lengths,
bins=range(min_length, max_length+bin_size, bin_size),
color=colors[:len(lengths)],
label=[ntpath.basename(l) for l in labels]
)
plt.legend()
plt.title('Gap Length Histogram')
plt.xlabel('Gap Length (b)')
plt.ylabel('Frequency')
plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')
def write_hist_text_file(lengths, labels):
"""
Write a plain text file to current working directory.
1 ordered column of all histogram lengths.
This is for input into statistical analysis software such as R.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
for lengths_list, label in zip(lengths, labels):
hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt'
with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:
out_file.write(ntpath.basename(label) + '\n')
for length in sorted(lengths_list):
out_file.write(str(length) + '\n')
def get_gap_info(in_file):
"""
Given a fasta file, find out some information regarding its global gap content.
:param in_file: Fasta or multi-fasta with sequences for gap analysis.
:return: dictionary with total_N, total_nucleotides, total_gaps and all_gap_lengths
"""
# Initialize values to be computed.
total_N = 0
total_nucleotides = 0
total_gaps = 0
total_gaps_over_100 = 0
all_gap_lengths = []
# Use a dictionary to store bed coordinates.
# key = fasta header
# Value = list of tuples corresponding to genomic coordinates.
bed_gaps = collections.OrderedDict()
# Iterate through each sequence in the fasta,
# and get gap info from each.
sequences = SeqReader(in_file)
for header, sequence in sequences.parse_fasta():
gap_sequence = GapSequence(sequence)
# Get total number of 'N' characters for this sequence.
total_N += gap_sequence.count_Ns()
# Get total number of nucleotides for this sequence.
total_nucleotides += len(sequence)
for gap in gap_sequence.get_gaps():
# Increment total number of gaps
total_gaps += 1
if len(gap) > 100:
total_gaps_over_100 += 1
# Save this gap length to master list.
all_gap_lengths.append(len(gap))
# Now fill in bed file data structure.
all_coordinates = [(m.start(0), m.end(0)) for m in gap_sequence.get_gap_coords()]
if all_coordinates:
bed_gaps[header] = all_coordinates
return {
'total_N': total_N,
'total_nucleotides': total_nucleotides,
'total_gaps': total_gaps,
'total_gaps_over_100': total_gaps_over_100,
'all_gap_lengths': all_gap_lengths,
'bed_gaps': bed_gaps
}
# Parse the command line arguments.
arg_dict = parse_args(sys.argv)
# Get gap info for each fasta.
all_files_info = collections.OrderedDict()
for fasta in arg_dict['fastas']:
log(' ---- Analyzing gaps for %s' % fasta)
all_files_info[fasta] = get_gap_info(fasta)
# Write csv file with basic gap stats.
write_gap_stats(all_files_info)
# Check if bed file is desired.
# Save to current working directory if so.
if '-B' in arg_dict['flags']:
log(' ---- Writing bed file(s).')
for f in all_files_info.keys():
file | write_gap_stats | identifier_name |
gap_stats.py | _desired
if __name__ == "__main__":
usage = """
___________
Description:
Command line utility for analyzing gaps in a fasta file. One file can be analyzed, or up to 3 can be compared.
Use this tool to compare a genome assembly pre and post gap filling with tools such as PBJelly.
_____
Usage:
python gap_stats.py [options] <sequence1.fasta> <sequence2.fasta> <sequence3.fasta>
OPTIONS:
-m Save a matplotlib gap length histogram in current working directory.
* Requires matplotlib to be installed *
-p Write a plain text file of all gap lengths in current working directory for
use as input into other statistical analysis software.
-b Make a gap bed file for each input fasta.
-h Print help message.
"""
def parse_args(args_list):
"""
Given all command line arguments, make a dictionary containing all
of the flags, and all of the fasta files.
If the command line arguments either request help or raises an error,
that will be done here. If this function returns, it can be assumed that
the command line statement is ready for further analysis.
:param args_list: List of command line arguments (sys.argv)
:return: Dictionary specifying all flags and all fasta files.
"""
# If no arguments specified, print usage statement with no error.
if len(args_list) == 1:
sys.exit(usage)
# Make all flags upper case to avoid case sensitivity.
flags = [i.upper() for i in args_list if i.startswith('-')]
# See if help is desired. If so, print usage with no error.
if help_desired(flags):
sys.exit(usage)
# Retrieve fasta files. At least one, up to 3 is needed.
fastas = [
i for i in args_list if
i.endswith('.fasta') or
i.endswith('.fa') or
i.endswith('.fan') or
i.endswith('.fas')
]
# Make sure that at least one fasta file was found.
if not fastas:
print usage
raise ValueError('No fasta files found.')
# Make sure that no more than 3 fasta files have been selected.
if len(fastas) > 3:
print usage
raise ValueError(
'A maximum of 3 fasta files can be compared at once. You entered %r fasta files.' % len(fastas)
)
return {
'flags': flags,
'fastas': fastas
}
def write_gap_stats(info):
"""
Use info obtained in get_gap_info to write a summary stats csv file.
:param info: Dictionary where
key = fasta file
value = ordered dictionary containing all gap info from get_gap_info
"""
with open('gap_stats.txt', 'w') as out_file:
# Get each category from each fasta file. One row for each.
all_percent_N = [str(100*(info[i]['total_N']/info[i]['total_nucleotides'])) for i in info.keys()]
all_total_gaps = [str(info[i]['total_gaps']) for i in info.keys()]
all_total_gaps_over_100 = [str(info[i]['total_gaps_over_100']) for i in info.keys()]
all_longest_gap = [str(max(info[i]['all_gap_lengths'])) for i in info.keys()]
all_medians = [str(calculate_median(info[i]['all_gap_lengths'])) for i in info.keys()]
files = [ntpath.basename(f) for f in info.keys()]
# Write rows out to csv file.
# First, write out the header (gap metrics).
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % ('file_name', '%N', 'Total Gaps', 'Total Gaps Longer Than 100bp', 'Longest Gap', 'Median Gap Length'))
# Write results for each file.
for i in range(len(files)):
out_file.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (files[i], all_percent_N[i], all_total_gaps[i], all_total_gaps_over_100[i], all_longest_gap[i], all_medians[i]))
def write_bed_file(bed_dict, out_file_name):
"""
From a dictionary storing bed file info, write output file in bed format.
:param bed_dict: Dict where keys = fasta headers, and values = coordinates of each gap in that sequence.
:param out_file_name: Name for output bed file.
"""
with open(os.getcwd() + '/' + ntpath.basename(out_file_name), 'w') as out_file:
for header in bed_dict.keys():
for coordinates in bed_dict[header]:
out_file.write(
'%s\t%r\t%r\n' %(header[1:], coordinates[0], coordinates[1])
)
def write_hist_img_file(lengths, labels):
"""
Save a matplotlib length histogram image to current working directory.
:param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
import matplotlib.pyplot as plt
# Find the max and min values for plotting.
max_length = max(max(i) for i in lengths)
min_length = min(min(i) for i in lengths)
bin_size = int(0.025*max_length)
# Make histogram
colors = ['r', 'g', 'b']
plt.hist(
lengths,
bins=range(min_length, max_length+bin_size, bin_size),
color=colors[:len(lengths)],
label=[ntpath.basename(l) for l in labels]
)
plt.legend()
plt.title('Gap Length Histogram')
plt.xlabel('Gap Length (b)')
plt.ylabel('Frequency')
plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')
def write_hist_text_file(lengths, labels):
"""
Write a plain text file to current working directory.
1 ordered column of all histogram lengths.
This is for input into statistical analysis software such as R. | with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:
out_file.write(ntpath.basename(label) + '\n')
for length in sorted(lengths_list):
out_file.write(str(length) + '\n')
def get_gap_info(in_file):
"""
Given a fasta file, find out some information regarding its global gap content.
:param in_file: Fasta or multi-fasta with sequences for gap analysis.
:return: dictionary with total_N, total_nucleotides, total_gaps and all_gap_lengths
"""
# Initialize values to be computed.
total_N = 0
total_nucleotides = 0
total_gaps = 0
total_gaps_over_100 = 0
all_gap_lengths = []
# Use a dictionary to store bed coordinates.
# key = fasta header
# Value = list of tuples corresponding to genomic coordinates.
bed_gaps = collections.OrderedDict()
# Iterate through each sequence in the fasta,
# and get gap info from each.
sequences = SeqReader(in_file)
for header, sequence in sequences.parse_fasta():
gap_sequence = GapSequence(sequence)
# Get total number of 'N' characters for this sequence.
total_N += gap_sequence.count_Ns()
# Get total number of nucleotides for this sequence.
total_nucleotides += len(sequence)
for gap in gap_sequence.get_gaps():
# Increment total number of gaps
total_gaps += 1
if len(gap) > 100:
total_gaps_over_100 += 1
# Save this gap length to master list.
all_gap_lengths.append(len(gap))
# Now fill in bed file data structure.
all_coordinates = [(m.start(0), m.end(0)) for m in gap_sequence.get_gap_coords()]
if all_coordinates:
bed_gaps[header] = all_coordinates
return {
'total_N': total_N,
'total_nucleotides': total_nucleotides,
'total_gaps': total_gaps,
'total_gaps_over_100': total_gaps_over_100,
'all_gap_lengths': all_gap_lengths,
'bed_gaps': bed_gaps
}
# Parse the command line arguments.
arg_dict = parse_args(sys.argv)
# Get gap info for each fasta.
all_files_info = collections.OrderedDict()
for fasta in arg_dict['fastas']:
log(' ---- Analyzing gaps for %s' % fasta)
all_files_info[fasta] = get_gap_info(fasta)
# Write csv file with basic gap stats.
write_gap_stats(all_files_info)
# Check if bed file is desired.
# Save to current working directory if so.
if '-B' in arg_dict['flags']:
log(' ---- Writing bed file(s).')
for f in all_files_info.keys():
file | :param lengths: List of Lists of all gap lengths for each fasta.
:param labels: Labels to be used in the histogram image file.
"""
for lengths_list, label in zip(lengths, labels):
hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt' | random_line_split |
wk11_main.py |
def schoolLife():
survey = [["highly satisfactoty", "satisfaction", "dissatisfaction" ,"highly unsatisfactory"],
[13.1, 37.1, 8.7, 1.5],
[10.6, 34.6, 13.4, 1.9],
[27.1, 40.0, 2.9, 1.5],
[16.2, 37.8, 6.8, 0.8],
[11.4, 29.8, 14.8, 4.9],
[12.2, 26.5, 14.9, 4.4],
[13.5, 29.7, 11.1, 2.4],
[13.7, 37.6, 4.1, 1.2]]
grade=survey[1:8]
sSum=0
dsSum=0
for i in range(len(grade)):
sSum = sSum + grade[i][0] + grade[i][1]
dsSum = dsSum + grade[i][2] + grade[i][3]
sAvg=sSum/len(grade)
dsAvg=dsSum/len(grade)
print "Average of (highly) Satisfaction:", sAvg
print "Average of (highly) unsatisfactory :", dsAvg
def speech():
Bush=["Vice President Cheney, Mr. Chief Justice, President Carter, President Bush, President Clinton, reverend clergy, distinguished guests, fellow citizens:",
"On this day, prescribed by law and marked by ceremony, we celebrate the durable wisdom of our Constitution, and recall the deep commitments that unite our country.",
"I am grateful for the honor of this hour, mindful of the consequential times in which we live, and determined to fulfill the oath that I have sworn and you have witnessed.",
"At this second gathering, our duties are defined not by the words I use, but by the history we have seen together. For a half century, America defended our own freedom by standing watch on distant borders.",
"After the shipwreck of communism came years of relative quiet, years of repose, years of sabbatical —and then there came a day of fire.",
"We have seen our vulnerability —and we have seen its deepest source. For as long as whole regions of the world simmer in resentment and tyranny —prone to ideologies that feed hatred and excuse murder ",
"— violence will gather, and multiply in destructive power, and cross the most defended borders, and raise a mortal threat. There is only one force of history that can break the reign of hatred and resentment,",
"and expose the pretensions of tyrants, and reward the hopes of the decent and tolerant, and that is the force of human freedom.",
"We are led, by events and common sense, to one conclusion: The survival of liberty in our land increasingly depends on the success of liberty in other lands. The best hope for peace in our world is the expansion of",
"freedom in all the world.",
"America's vital interests and our deepest beliefs are now one. From the day of our Founding, we have proclaimed that every man and woman on this earth has rights, and dignity, and matchless value, because they bear",
" the image of the Maker of Heaven and earth. Across the generations we have proclaimed the imperative of self-government, because no one is fit to be a master, and no one deserves to be a slave. Advancing these ideals",
"is the mission that created our Nation. It is the honorable achievement of our fathers. Now it is the urgent requirement of our nation's security, and the calling of our time.",
"So it is the policy of the United States to seek and support the growth of democratic movements and institutions in every nation and culture, with the ultimate goal of ending tyranny in our world.",
"This is not primarily the task of arms, though we will defend ourselves and our friends by force of arms when necessary. Freedom, by its nature, must be chosen, and defended by citizens, and sustained by the rule of ",
" law and the protection of minorities. And when the soul of a nation finally speaks, the institutions that arise may reflect customs and traditions very different from our own. America will not impose our own style of",
" government on the unwilling. Our goal instead is to help others find their own voice, attain their own freedom, and make their own way.",
"The great objective of ending tyranny is the concentrated work of generations. The difficulty of the task is no excuse for avoiding it. America's influence is not unlimited, but fortunately for the oppressed, America's",
" influence is considerable, and we will use it confidently in freedom's cause.",
"My most solemn duty is to protect this nation and its people against further attacks and emerging threats. Some have unwisely chosen to test America's resolve, and have found it firm.",
"We will persistently clarify the choice before every ruler and every nation: The moral choice between oppression, which is always wrong, and freedom, which is eternally right. America will not ",
" pretend that jailed dissidents prefer their chains, or that women welcome humiliation and servitude, or that any human being aspires to live at the mercy of bullies.",
"We will encourage reform in other governments by making clear that success in our relations will require the decent treatment of their own people. America's belief in human dignity will guide our policies, ",
" yet rights must be more than the grudging concessions of dictators; they are secured by free dissent and the participation of the governed. In the long run, there is no justice without freedom, ",
" and there can be no human rights without human liberty.",
"Some, I know, have questioned the global appeal of liberty —though this time in history, four decades defined by the swiftest advance of freedom ever seen, ",
" is an odd time for doubt. Americans, of all people, should never be surprised by the power of our ideals. Eventually, the call of freedom comes to every mind and every soul. ",
" We do not accept the existence of permanent tyranny because we do not accept the possibility of permanent slavery. Liberty will come to those who love it.",
"Today, America speaks anew to the peoples of the world:",
"All who live in tyranny and hopelessness can know: the United States will not ignore your oppression, or excuse your oppressors. When you stand for your liberty, we will stand with you.",
"Democratic reformers facing repression, prison, or exile can know: America sees you for who you are: the future leaders of your free country.",
"The rulers of outlaw regimes can know that we still believe as Abraham Lincoln did: Those who deny freedom to others deserve it not for themselves; and, under the rule of a just God, cannot long retain it.",
"The leaders of governments with long habits of control need to know: To serve your people you must learn to trust them. Start on this journey of progress and justice, and America will walk at your side.",
"And all the allies of the United States can know: we honor your friendship, we rely on your counsel, and we depend on your help. Division among free nations is a primary goal of freedom's enemies. ",
" The concerted effort of free nations to promote democracy is a prelude to our enemies' defeat.",
"Today, I also speak anew to my fellow citizens:",
"From all of you, I have asked patience in the hard task of securing America, which you have granted in good measure. Our country has accepted obligations that are difficult to fulfill,",
" and would be dishonorable to abandon. Yet because we have acted in the great liberating tradition of this nation, tens of millions have achieved their freedom. And as hope kindles hope, ",
" millions more will find it. By our efforts, we have lit a fire as well —a fire in the minds of men. It warms those who feel its power, it burns those who fight its progress, and one day this untamed ",
" fire of freedom will reach the darkest corners of our world.",
"A few Americans have accepted the hardest duties in this cause —in the quiet work of intelligence and diplomacy ... the idealistic work of helping raise up free governments ... ",
" the dangerous and necessary work of fighting our enemies. Some have shown their devotion to our country in deaths that honored their whole lives —and we will always honor their names and their sacrifice.",
"All Americans have witnessed this idealism, and some for the first time. I ask our youngest citizens to believe the evidence of your eyes. You have seen duty and allegiance in the determined faces of our soldiers. ",
" You have seen that life is fragile, and evil is real, and courage triumphs. Make the choice to serve in a cause larger than your wants, larger than yourself —and in your days you will add not just to ",
" the wealth of our country, but to its character.",
"America has need of idealism and courage, because we have essential work at home —the unfinished work of American freedom. In a world moving toward liberty, ",
" we are determined to show the meaning and promise of liberty.",
"In America's ideal of freedom, citizens find the dignity and | t1.right(180)
t1.write("On the line") | conditional_block |
|
wk11_main.py |
def turnright():
t1.right(45)
def turnleft():
t1.left(45)
def keyup():
t1.fd(100)
def turnback():
t1.right(180)
def mousegoto(x,y):
t1.setpos(x,y)
feedback()
def keybye():
wn.bye()
def addkeys():
wn.onkey(turnright,"Right")
wn.onkey(turnleft,"Left")
wn.onkey(keyup,"Up")
wn.onkey(turnback,"Down")
wn.onkey(keybye,"q")
def addmouse():
wn.onclick(mousegoto)
def feedback():
if t1.xcor() > 300 or t1.xcor() < -300:
t1.right(180)
t1.write("On the line")
if t1.ycor() > 300 or t1.ycor() < -300:
t1.right(180)
t1.write("On the line")
def schoolLife():
survey = [["highly satisfactoty", "satisfaction", "dissatisfaction" ,"highly unsatisfactory"],
[13.1, 37.1, 8.7, 1.5],
[10.6, 34.6, 13.4, 1.9],
[27.1, 40.0, 2.9, 1.5],
[16.2, 37.8, 6.8, 0.8],
[11.4, 29.8, 14.8, 4.9],
[12.2, 26.5, 14.9, 4.4],
[13.5, 29.7, 11.1, 2.4],
[13.7, 37.6, 4.1, 1.2]]
grade=survey[1:8]
sSum=0
dsSum=0
for i in range(len(grade)):
sSum = sSum + grade[i][0] + grade[i][1]
dsSum = dsSum + grade[i][2] + grade[i][3]
sAvg=sSum/len(grade)
dsAvg=dsSum/len(grade)
print "Average of (highly) Satisfaction:", sAvg
print "Average of (highly) unsatisfactory :", dsAvg
def speech():
Bush=["Vice President Cheney, Mr. Chief Justice, President Carter, President Bush, President Clinton, reverend clergy, distinguished guests, fellow citizens:",
"On this day, prescribed by law and marked by ceremony, we celebrate the durable wisdom of our Constitution, and recall the deep commitments that unite our country.",
"I am grateful for the honor of this hour, mindful of the consequential times in which we live, and determined to fulfill the oath that I have sworn and you have witnessed.",
"At this second gathering, our duties are defined not by the words I use, but by the history we have seen together. For a half century, America defended our own freedom by standing watch on distant borders.",
"After the shipwreck of communism came years of relative quiet, years of repose, years of sabbatical —and then there came a day of fire.",
"We have seen our vulnerability —and we have seen its deepest source. For as long as whole regions of the world simmer in resentment and tyranny —prone to ideologies that feed hatred and excuse murder ",
"— violence will gather, and multiply in destructive power, and cross the most defended borders, and raise a mortal threat. There is only one force of history that can break the reign of hatred and resentment,",
"and expose the pretensions of tyrants, and reward the hopes of the decent and tolerant, and that is the force of human freedom.",
"We are led, by events and common sense, to one conclusion: The survival of liberty in our land increasingly depends on the success of liberty in other lands. The best hope for peace in our world is the expansion of",
"freedom in all the world.",
"America's vital interests and our deepest beliefs are now one. From the day of our Founding, we have proclaimed that every man and woman on this earth has rights, and dignity, and matchless value, because they bear",
" the image of the Maker of Heaven and earth. Across the generations we have proclaimed the imperative of self-government, because no one is fit to be a master, and no one deserves to be a slave. Advancing these ideals",
"is the mission that created our Nation. It is the honorable achievement of our fathers. Now it is the urgent requirement of our nation's security, and the calling of our time.",
"So it is the policy of the United States to seek and support the growth of democratic movements and institutions in every nation and culture, with the ultimate goal of ending tyranny in our world.",
"This is not primarily the task of arms, though we will defend ourselves and our friends by force of arms when necessary. Freedom, by its nature, must be chosen, and defended by citizens, and sustained by the rule of ",
" law and the protection of minorities. And when the soul of a nation finally speaks, the institutions that arise may reflect customs and traditions very different from our own. America will not impose our own style of",
" government on the unwilling. Our goal instead is to help others find their own voice, attain their own freedom, and make their own way.",
"The great objective of ending tyranny is the concentrated work of generations. The difficulty of the task is no excuse for avoiding it. America's influence is not unlimited, but fortunately for the oppressed, America's",
" influence is considerable, and we will use it confidently in freedom's cause.",
"My most solemn duty is to protect this nation and its people against further attacks and emerging threats. Some have unwisely chosen to test America's resolve, and have found it firm.",
"We will persistently clarify the choice before every ruler and every nation: The moral choice between oppression, which is always wrong, and freedom, which is eternally right. America will not ",
" pretend that jailed dissidents prefer their chains, or that women welcome humiliation and servitude, or that any human being aspires to live at the mercy of bullies.",
"We will encourage reform in other governments by making clear that success in our relations will require the decent treatment of their own people. America's belief in human dignity will guide our policies, ",
" yet rights must be more than the grudging concessions of dictators; they are secured by free dissent and the participation of the governed. In the long run, there is no justice without freedom, ",
" and there can be no human rights without human liberty.",
"Some, I know, have questioned the global appeal of liberty —though this time in history, four decades defined by the swiftest advance of freedom ever seen, ",
" is an odd time for doubt. Americans, of all people, should never be surprised by the power of our ideals. Eventually, the call of freedom comes to every mind and every soul. ",
" We do not accept the existence of permanent tyranny because we do not accept the possibility of permanent slavery. Liberty will come to those who love it.",
"Today, America speaks anew to the peoples of the world:",
"All who live in tyranny and hopelessness can know: the United States will not ignore your oppression, or excuse your oppressors. When you stand for your liberty, we will stand with you.",
"Democratic reformers facing repression, prison, or exile can know: America sees you for who you are: the future leaders of your free country.",
"The rulers of outlaw regimes can know that we still believe as Abraham Lincoln did: Those who deny freedom to others deserve it not for themselves; and, under the rule of a just God, cannot long retain it.",
"The leaders of governments with long habits of control need to know: To serve your people you must learn to trust them. Start on this journey of progress and justice, and America will walk at your side.",
"And all the allies of the United States can know: we honor your friendship, we rely on your counsel, and we depend on your help. Division among free nations is a primary goal of freedom's enemies. ",
" The concerted effort of free nations to promote democracy is a prelude to our enemies' defeat.",
"Today, I also speak anew to my fellow citizens:",
"From all of you, I have asked patience in the hard task of securing America, which you have granted in good measure. Our country has accepted obligations that are difficult to fulfill,",
" and would be dishonorable to abandon. Yet because we have acted in the great liberating tradition of this nation, tens of | ring = turtle.Turtle()
ring.penup()
ring.setpos(-300,300)
ring.pendown()
ring.pensize(3)
#-300,300 -> 300,300 -> 300,-300 -> -300,-300
for side in range(4):
ring.fd(600)
ring.right(90)
ring.write(ring.pos())
ring.hideturtle() | identifier_body |
|
wk11_main.py | arms when necessary. Freedom, by its nature, must be chosen, and defended by citizens, and sustained by the rule of ",
" law and the protection of minorities. And when the soul of a nation finally speaks, the institutions that arise may reflect customs and traditions very different from our own. America will not impose our own style of",
" government on the unwilling. Our goal instead is to help others find their own voice, attain their own freedom, and make their own way.",
"The great objective of ending tyranny is the concentrated work of generations. The difficulty of the task is no excuse for avoiding it. America's influence is not unlimited, but fortunately for the oppressed, America's",
" influence is considerable, and we will use it confidently in freedom's cause.",
"My most solemn duty is to protect this nation and its people against further attacks and emerging threats. Some have unwisely chosen to test America's resolve, and have found it firm.",
"We will persistently clarify the choice before every ruler and every nation: The moral choice between oppression, which is always wrong, and freedom, which is eternally right. America will not ",
" pretend that jailed dissidents prefer their chains, or that women welcome humiliation and servitude, or that any human being aspires to live at the mercy of bullies.",
"We will encourage reform in other governments by making clear that success in our relations will require the decent treatment of their own people. America's belief in human dignity will guide our policies, ",
" yet rights must be more than the grudging concessions of dictators; they are secured by free dissent and the participation of the governed. In the long run, there is no justice without freedom, ",
" and there can be no human rights without human liberty.",
"Some, I know, have questioned the global appeal of liberty —though this time in history, four decades defined by the swiftest advance of freedom ever seen, ",
" is an odd time for doubt. Americans, of all people, should never be surprised by the power of our ideals. Eventually, the call of freedom comes to every mind and every soul. ",
" We do not accept the existence of permanent tyranny because we do not accept the possibility of permanent slavery. Liberty will come to those who love it.",
"Today, America speaks anew to the peoples of the world:",
"All who live in tyranny and hopelessness can know: the United States will not ignore your oppression, or excuse your oppressors. When you stand for your liberty, we will stand with you.",
"Democratic reformers facing repression, prison, or exile can know: America sees you for who you are: the future leaders of your free country.",
"The rulers of outlaw regimes can know that we still believe as Abraham Lincoln did: Those who deny freedom to others deserve it not for themselves; and, under the rule of a just God, cannot long retain it.",
"The leaders of governments with long habits of control need to know: To serve your people you must learn to trust them. Start on this journey of progress and justice, and America will walk at your side.",
"And all the allies of the United States can know: we honor your friendship, we rely on your counsel, and we depend on your help. Division among free nations is a primary goal of freedom's enemies. ",
" The concerted effort of free nations to promote democracy is a prelude to our enemies' defeat.",
"Today, I also speak anew to my fellow citizens:",
"From all of you, I have asked patience in the hard task of securing America, which you have granted in good measure. Our country has accepted obligations that are difficult to fulfill,",
" and would be dishonorable to abandon. Yet because we have acted in the great liberating tradition of this nation, tens of millions have achieved their freedom. And as hope kindles hope, ",
" millions more will find it. By our efforts, we have lit a fire as well —a fire in the minds of men. It warms those who feel its power, it burns those who fight its progress, and one day this untamed ",
" fire of freedom will reach the darkest corners of our world.",
"A few Americans have accepted the hardest duties in this cause —in the quiet work of intelligence and diplomacy ... the idealistic work of helping raise up free governments ... ",
" the dangerous and necessary work of fighting our enemies. Some have shown their devotion to our country in deaths that honored their whole lives —and we will always honor their names and their sacrifice.",
"All Americans have witnessed this idealism, and some for the first time. I ask our youngest citizens to believe the evidence of your eyes. You have seen duty and allegiance in the determined faces of our soldiers. ",
" You have seen that life is fragile, and evil is real, and courage triumphs. Make the choice to serve in a cause larger than your wants, larger than yourself —and in your days you will add not just to ",
" the wealth of our country, but to its character.",
"America has need of idealism and courage, because we have essential work at home —the unfinished work of American freedom. In a world moving toward liberty, ",
" we are determined to show the meaning and promise of liberty.",
"In America's ideal of freedom, citizens find the dignity and security of economic independence, instead of laboring on the edge of subsistence. ",
" This is the broader definition of liberty that motivated the Homestead Act, the Social Security Act, and the G.I. Bill of Rights.",
" And now we will extend this vision by reforming great institutions to serve the needs of our time. To give every American a stake in the promise and future of our country,",
" we will bring the highest standards to our schools, and build an ownership society. We will widen the ownership of homes and businesses, retirement savings and health insurance —preparing ",
" our people for the challenges of life in a free society. By making every citizen an agent of his or her own destiny, we will give our fellow Americans greater freedom from want and fear, ",
| "In America's ideal of freedom, the public interest depends on private character —on integrity, and tolerance toward others, and the rule of conscience in our own lives. ",
" Self-government relies, in the end, on the governing of the self. That edifice of character is built in families, supported by communities with standards, and sustained in our national ",
" life by the truths of Sinai, the Sermon on the Mount, the words of the Koran, and the varied faiths of our people. Americans move forward in every generation by reaffirming all that is ",
" good and true that came before —ideals of justice and conduct that are the same yesterday, today, and forever.",
"In America's ideal of freedom, the exercise of rights is ennobled by service, and mercy, and a heart for the weak. Liberty for all does not mean independence from one another. Our nation relies",
" on men and women who look after a neighbor and surround the lost with love. Americans, at our best, value the life we see in one another, and must always remember that even the unwanted have worth.",
" And our country must abandon all the habits of racism, because we cannot carry the message of freedom and the baggage of bigotry at the same time.",
"From the perspective of a single day, including this day of dedication, the issues and questions before our country are many. From the viewpoint of centuries, the questions that come to us are narrowed and few.",
" Did our generation advance the cause of freedom? And did our character bring credit to that cause?",
"These questions that judge us also unite us, because Americans of every party and background, Americans by choice and by birth, are bound to one another in the cause of freedom. We have known divisions, ",
" which must be healed to move forward in great purposes —and I will strive in good faith to heal them. Yet those divisions do not define America. We felt the unity and fellowship of our nation when freedom ",
" came under attack, and our response came like a single hand over a single heart. And we can feel that same unity and pride whenever America acts for good, and the victims of disaster are given hope, ",
" and the unjust encounter justice, and the captives are set free.",
"We go forward with complete confidence in the eventual triumph of freedom. Not because history runs on the wheels of inevitability; it is human choices that move events. Not because we consider ourselves ",
" a chosen nation; God moves and chooses as He wills. We have confidence because freedom is the permanent hope of mankind, the hunger in dark places, the longing of the soul. When our Founders declared ",
" a new order of the ages; when soldiers died in wave upon wave for a union based on liberty; when citizens marched in peaceful outrage under the banner Freedom Now —they were acting on an ancient hope that ",
" is meant to be fulfilled. History has an ebb and flow of justice, but history also has a visible direction, set by liberty and the Author of Liberty.",
"When the Declaration of Independence was first read in public and the Liberty Bell was sounded in celebration, a witness said, It rang as if it meant something. In our time it means something still. ",
" America, in this young century, proclaims liberty throughout all the world, and to all the inhabitants thereof. Renewed in our strength — tested | " and make our society more prosperous and just and equal.",
| random_line_split |
wk11_main.py | ():
survey = [["highly satisfactoty", "satisfaction", "dissatisfaction" ,"highly unsatisfactory"],
[13.1, 37.1, 8.7, 1.5],
[10.6, 34.6, 13.4, 1.9],
[27.1, 40.0, 2.9, 1.5],
[16.2, 37.8, 6.8, 0.8],
[11.4, 29.8, 14.8, 4.9],
[12.2, 26.5, 14.9, 4.4],
[13.5, 29.7, 11.1, 2.4],
[13.7, 37.6, 4.1, 1.2]]
grade=survey[1:8]
sSum=0
dsSum=0
for i in range(len(grade)):
sSum = sSum + grade[i][0] + grade[i][1]
dsSum = dsSum + grade[i][2] + grade[i][3]
sAvg=sSum/len(grade)
dsAvg=dsSum/len(grade)
print "Average of (highly) Satisfaction:", sAvg
print "Average of (highly) unsatisfactory :", dsAvg
def speech():
Bush=["Vice President Cheney, Mr. Chief Justice, President Carter, President Bush, President Clinton, reverend clergy, distinguished guests, fellow citizens:",
"On this day, prescribed by law and marked by ceremony, we celebrate the durable wisdom of our Constitution, and recall the deep commitments that unite our country.",
"I am grateful for the honor of this hour, mindful of the consequential times in which we live, and determined to fulfill the oath that I have sworn and you have witnessed.",
"At this second gathering, our duties are defined not by the words I use, but by the history we have seen together. For a half century, America defended our own freedom by standing watch on distant borders.",
"After the shipwreck of communism came years of relative quiet, years of repose, years of sabbatical —and then there came a day of fire.",
"We have seen our vulnerability —and we have seen its deepest source. For as long as whole regions of the world simmer in resentment and tyranny —prone to ideologies that feed hatred and excuse murder ",
"— violence will gather, and multiply in destructive power, and cross the most defended borders, and raise a mortal threat. There is only one force of history that can break the reign of hatred and resentment,",
"and expose the pretensions of tyrants, and reward the hopes of the decent and tolerant, and that is the force of human freedom.",
"We are led, by events and common sense, to one conclusion: The survival of liberty in our land increasingly depends on the success of liberty in other lands. The best hope for peace in our world is the expansion of",
"freedom in all the world.",
"America's vital interests and our deepest beliefs are now one. From the day of our Founding, we have proclaimed that every man and woman on this earth has rights, and dignity, and matchless value, because they bear",
" the image of the Maker of Heaven and earth. Across the generations we have proclaimed the imperative of self-government, because no one is fit to be a master, and no one deserves to be a slave. Advancing these ideals",
"is the mission that created our Nation. It is the honorable achievement of our fathers. Now it is the urgent requirement of our nation's security, and the calling of our time.",
"So it is the policy of the United States to seek and support the growth of democratic movements and institutions in every nation and culture, with the ultimate goal of ending tyranny in our world.",
"This is not primarily the task of arms, though we will defend ourselves and our friends by force of arms when necessary. Freedom, by its nature, must be chosen, and defended by citizens, and sustained by the rule of ",
" law and the protection of minorities. And when the soul of a nation finally speaks, the institutions that arise may reflect customs and traditions very different from our own. America will not impose our own style of",
" government on the unwilling. Our goal instead is to help others find their own voice, attain their own freedom, and make their own way.",
"The great objective of ending tyranny is the concentrated work of generations. The difficulty of the task is no excuse for avoiding it. America's influence is not unlimited, but fortunately for the oppressed, America's",
" influence is considerable, and we will use it confidently in freedom's cause.",
"My most solemn duty is to protect this nation and its people against further attacks and emerging threats. Some have unwisely chosen to test America's resolve, and have found it firm.",
"We will persistently clarify the choice before every ruler and every nation: The moral choice between oppression, which is always wrong, and freedom, which is eternally right. America will not ",
" pretend that jailed dissidents prefer their chains, or that women welcome humiliation and servitude, or that any human being aspires to live at the mercy of bullies.",
"We will encourage reform in other governments by making clear that success in our relations will require the decent treatment of their own people. America's belief in human dignity will guide our policies, ",
" yet rights must be more than the grudging concessions of dictators; they are secured by free dissent and the participation of the governed. In the long run, there is no justice without freedom, ",
" and there can be no human rights without human liberty.",
"Some, I know, have questioned the global appeal of liberty —though this time in history, four decades defined by the swiftest advance of freedom ever seen, ",
" is an odd time for doubt. Americans, of all people, should never be surprised by the power of our ideals. Eventually, the call of freedom comes to every mind and every soul. ",
" We do not accept the existence of permanent tyranny because we do not accept the possibility of permanent slavery. Liberty will come to those who love it.",
"Today, America speaks anew to the peoples of the world:",
"All who live in tyranny and hopelessness can know: the United States will not ignore your oppression, or excuse your oppressors. When you stand for your liberty, we will stand with you.",
"Democratic reformers facing repression, prison, or exile can know: America sees you for who you are: the future leaders of your free country.",
"The rulers of outlaw regimes can know that we still believe as Abraham Lincoln did: Those who deny freedom to others deserve it not for themselves; and, under the rule of a just God, cannot long retain it.",
"The leaders of governments with long habits of control need to know: To serve your people you must learn to trust them. Start on this journey of progress and justice, and America will walk at your side.",
"And all the allies of the United States can know: we honor your friendship, we rely on your counsel, and we depend on your help. Division among free nations is a primary goal of freedom's enemies. ",
" The concerted effort of free nations to promote democracy is a prelude to our enemies' defeat.",
"Today, I also speak anew to my fellow citizens:",
"From all of you, I have asked patience in the hard task of securing America, which you have granted in good measure. Our country has accepted obligations that are difficult to fulfill,",
" and would be dishonorable to abandon. Yet because we have acted in the great liberating tradition of this nation, tens of millions have achieved their freedom. And as hope kindles hope, ",
" millions more will find it. By our efforts, we have lit a fire as well —a fire in the minds of men. It warms those who feel its power, it burns those who fight its progress, and one day this untamed ",
" fire of freedom will reach the darkest corners of our world.",
"A few Americans have accepted the hardest duties in this cause —in the quiet work of intelligence and diplomacy ... the idealistic work of helping raise up free governments ... ",
" the dangerous and necessary work of fighting our enemies. Some have shown their devotion to our country in deaths that honored their whole lives —and we will always honor their names and their sacrifice.",
"All Americans have witnessed this idealism, and some for the first time. I ask our youngest citizens to believe the evidence of your eyes. You have seen duty and allegiance in the determined faces of our soldiers. ",
" You have seen that life is fragile, and evil is real, and courage triumphs. Make the choice to serve in a cause larger than your wants, larger than yourself —and in your days you will add not just to ",
" the wealth of our country, but to its character.",
"America has need of idealism and courage, because we have essential work at home —the unfinished work of American freedom. In a world moving toward liberty, ",
" we are determined to show the meaning and promise of liberty.",
"In America's ideal of freedom, citizens find the dignity and security of economic independence, instead of laboring on the edge of subsistence. ",
" | schoolLife | identifier_name |
|
GAN_word_embedding.py |
print('done.')
lines=np.array(map(int, lines[:-1].split(',')))
# if desired the titles are shuffled
if shuffle:
endoftitles = [x for x in range(len(lines)) if lines[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
idx = np.random.permutation(len(endoftitles))
endoftitles=[endoftitles[x] for x in idx]
startoftitles = [startoftitles[x] for x in idx]
lines = [lines[range(startoftitles[x], endoftitles[x] + 1)] for x in range(len(endoftitles))]
lines = np.hstack(lines)
# the function returns a vector containing all dictionary indices of all titles
return lines
# load and arrange the word embedding file
def get_embeddedwords(namefile='word2vec.model'):
print('loading ' + namefile + '...')
with open(namefile, 'r') as f:
ss = f.readline().split()
# each line is split and the respective human readable word and the vector embedding vector is extracted
n_vocab, n_units = int(ss[0]), int(ss[1])
word2index = {}
index2word = {}
w = np.empty((n_vocab, n_units), dtype=np.float32)
# the embedding matrix is created by sorting all word vectors according to the dictionary index
# the resulting matrix is of size NumIndices x 200
# note that due to splitting white space got removed. For that reason it is added again and a vector of
# zeros is used within w
for i, line in enumerate(f):
ss = line.split()
if len(ss)<201:
ss = [' ']+ss
word = ss[0]
word2index[word] = i
index2word[i] = word
w[i] = np.array([float(s) for s in ss[1:]], dtype=np.float32)
w[word2index[' ']]=np.zeros((1, 200))
print('done.')
# word2index transforms a dictionary index to a human readable word
# index2word transforms a human readable word to a dictionary index
return word2index,index2word,w
# this function is used to obtain the maximum number of words across all titles by finding <eol> statements
def get_max_words_over_titles(titles_raw,dictionary):
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch = max(np.abs(np.subtract(startoftitles, endoftitles))) + 1
return max_title_length_in_batch
# this function creates batch data used to train the network
def createtitlebatch(titles_raw,dictionary,skipbatches=0,numtitles=10,testpart=0.05):
# skip_ is used to select parts of the title vector
# skip_ = 10 given numtitles = 80 would mean that the first 800 titles in the vector are skipped and
# all following operations are performed on data that comes after that
skip_=numtitles*skipbatches
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch=max(np.abs(np.subtract(startoftitles,endoftitles)))+1
max_skip=len(endoftitles)
if max_skip<(numtitles+skip_):
print('maximum requested number of titles is '+ str(numtitles+skip_)+'; dataset only has ' + str(max_skip) + ' titles')
print('maximum number of batches at ' + str(numtitles) + ' titles per batch: ' + str(max_skip/numtitles-1))
else:
title_matrix=[]
# extraction of the data from w given the amount of titles selected
for n in range(skip_,numtitles+skip_):
title_num=n
title_vec=titles_raw[range(startoftitles[title_num],endoftitles[title_num]+1)]
title_matrix.append([w[x-1] for x in title_vec])
# shuffling the selected batch
randidx=np.random.permutation(len(title_matrix))
idx_train=randidx[:-int(np.floor(len(randidx)*(testpart)))]
idx_test=randidx[int(np.floor(len(randidx)*(1-testpart))):]
train = [title_matrix[x] for x in idx_train]
test = [title_matrix[x] for x in idx_test]
train = [np.concatenate((x,np.zeros((max_title_length_in_batch-len(x),200))),0) for x in train]
test = [np.concatenate((x, np.zeros((max_title_length_in_batch - len(x), 200))), 0) for x in test]
# train and test are returned in a shape optimized for the use with convolutional networks
# the respective shape is numExamples x channel x numWords x ndimEmbedding
return np.asarray(train).astype('float32').reshape((len(train),max_title_length_in_batch/7,max_title_length_in_batch/7,200)),np.asarray(test).astype('float32').reshape((len(test),1,max_title_length_in_batch,200))
# this function can be used to transform a title matrix (words x ndim) back into a human readable title
# the argmin() L2 norm between each word vector of the title and all word vectors in w serves as the
# index for the respective word in the dictionary
def vec2title(vec_,w,index2word):
dict_trans=w#(w-w.min())/(w-w.min()).max()
title_recon=''
for i in range(len(vec_)):
word_ = vec_.data[i]
word_ = np.tile(word_,(len(w),1))
dist_=np.sqrt(np.sum((dict_trans-word_)**2,1))
title_recon=title_recon+index2word[dist_.argmin()]+' '
return title_recon
# classifier to compute loss based on softmax cross entropy and accuracy
class Classifier(link.Chain):
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy):
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
with self.init_scope():
self.predictor = predictor
def __call__(self, *args):
assert len(args) >= 2
x = args[:-1]
t = args[-1]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*x)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
# Convolutional neuronal network to do the discrimination
# respective architecture choices are explained in the report
class MLPConv(chainer.Chain):
def __init__(self,words_per_title):
super(MLPConv, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.conv = L.Convolution2D(in_channels=1, out_channels=1, ksize=3)
self.l2 = L.Linear(None, 2)
def __call__(self, x):
x2 = F.relu(self.conv(F.reshape(x,(x.data.shape[0], 1,self.words_per_title ,200))))
x3 = F.max_pooling_2d(x2, 3)
y = F.sigmoid(self.l2(F.dropout(x3,0.2)))
return y
# Deconvolutional neuronal network to do the generation
# respective architecture choices are explained in the report
class generator(chainer.Chain):
def __init__(self, words_per_title):
super(generator, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.l1 = L.Linear(None, words_per_title*200) # linear input layer
self.l2 = L.Deconvolution2D(in_channels=1, out_channels=1, ksize=3) # applying deconvolution
self.l3 = L.Linear(None, words_per_title * 200) # linear input layer
def __call__(self, x):
h = F.relu(self.l1(x)) # rectified activation function
h = F.reshape(h, (x.data.shape[0], 1,self.words_per_title,200))
h = F.relu(self.l2(h))
return F.reshape(self.l3(h),(x.data.shape[ | lines = lines + line.replace(',0', '').replace('\n', '').replace('"', '') + ',' + str(
dictionary.get('<eol>')) + ',' | conditional_block |
|
GAN_word_embedding.py | = [0] + list(np.add(endoftitles[:-1], 1))
idx = np.random.permutation(len(endoftitles))
endoftitles=[endoftitles[x] for x in idx]
startoftitles = [startoftitles[x] for x in idx]
lines = [lines[range(startoftitles[x], endoftitles[x] + 1)] for x in range(len(endoftitles))]
lines = np.hstack(lines)
# the function returns a vector containing all dictionary indices of all titles
return lines
# load and arrange the word embedding file
def get_embeddedwords(namefile='word2vec.model'):
print('loading ' + namefile + '...')
with open(namefile, 'r') as f:
ss = f.readline().split()
# each line is split and the respective human readable word and the vector embedding vector is extracted
n_vocab, n_units = int(ss[0]), int(ss[1])
word2index = {}
index2word = {}
w = np.empty((n_vocab, n_units), dtype=np.float32)
# the embedding matrix is created by sorting all word vectors according to the dictionary index
# the resulting matrix is of size NumIndices x 200
# note that due to splitting white space got removed. For that reason it is added again and a vector of
# zeros is used within w
for i, line in enumerate(f):
ss = line.split()
if len(ss)<201:
ss = [' ']+ss
word = ss[0]
word2index[word] = i
index2word[i] = word
w[i] = np.array([float(s) for s in ss[1:]], dtype=np.float32)
w[word2index[' ']]=np.zeros((1, 200))
print('done.')
# word2index transforms a dictionary index to a human readable word
# index2word transforms a human readable word to a dictionary index
return word2index,index2word,w
# this function is used to obtain the maximum number of words across all titles by finding <eol> statements
def get_max_words_over_titles(titles_raw,dictionary):
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch = max(np.abs(np.subtract(startoftitles, endoftitles))) + 1
return max_title_length_in_batch
# this function creates batch data used to train the network
def createtitlebatch(titles_raw,dictionary,skipbatches=0,numtitles=10,testpart=0.05):
# skip_ is used to select parts of the title vector
# skip_ = 10 given numtitles = 80 would mean that the first 800 titles in the vector are skipped and
# all following operations are performed on data that comes after that
skip_=numtitles*skipbatches
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch=max(np.abs(np.subtract(startoftitles,endoftitles)))+1
max_skip=len(endoftitles)
if max_skip<(numtitles+skip_):
print('maximum requested number of titles is '+ str(numtitles+skip_)+'; dataset only has ' + str(max_skip) + ' titles')
print('maximum number of batches at ' + str(numtitles) + ' titles per batch: ' + str(max_skip/numtitles-1))
else:
title_matrix=[]
# extraction of the data from w given the amount of titles selected
for n in range(skip_,numtitles+skip_):
title_num=n
title_vec=titles_raw[range(startoftitles[title_num],endoftitles[title_num]+1)]
title_matrix.append([w[x-1] for x in title_vec])
# shuffling the selected batch
randidx=np.random.permutation(len(title_matrix))
idx_train=randidx[:-int(np.floor(len(randidx)*(testpart)))]
idx_test=randidx[int(np.floor(len(randidx)*(1-testpart))):]
train = [title_matrix[x] for x in idx_train]
test = [title_matrix[x] for x in idx_test]
train = [np.concatenate((x,np.zeros((max_title_length_in_batch-len(x),200))),0) for x in train]
test = [np.concatenate((x, np.zeros((max_title_length_in_batch - len(x), 200))), 0) for x in test]
# train and test are returned in a shape optimized for the use with convolutional networks
# the respective shape is numExamples x channel x numWords x ndimEmbedding
return np.asarray(train).astype('float32').reshape((len(train),max_title_length_in_batch/7,max_title_length_in_batch/7,200)),np.asarray(test).astype('float32').reshape((len(test),1,max_title_length_in_batch,200))
# this function can be used to transform a title matrix (words x ndim) back into a human readable title
# the argmin() L2 norm between each word vector of the title and all word vectors in w serves as the
# index for the respective word in the dictionary
def vec2title(vec_,w,index2word):
|
# classifier to compute loss based on softmax cross entropy and accuracy
class Classifier(link.Chain):
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy):
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
with self.init_scope():
self.predictor = predictor
def __call__(self, *args):
assert len(args) >= 2
x = args[:-1]
t = args[-1]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*x)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
# Convolutional neuronal network to do the discrimination
# respective architecture choices are explained in the report
class MLPConv(chainer.Chain):
def __init__(self,words_per_title):
super(MLPConv, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.conv = L.Convolution2D(in_channels=1, out_channels=1, ksize=3)
self.l2 = L.Linear(None, 2)
def __call__(self, x):
x2 = F.relu(self.conv(F.reshape(x,(x.data.shape[0], 1,self.words_per_title ,200))))
x3 = F.max_pooling_2d(x2, 3)
y = F.sigmoid(self.l2(F.dropout(x3,0.2)))
return y
# Deconvolutional neuronal network to do the generation
# respective architecture choices are explained in the report
class generator(chainer.Chain):
def __init__(self, words_per_title):
super(generator, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.l1 = L.Linear(None, words_per_title*200) # linear input layer
self.l2 = L.Deconvolution2D(in_channels=1, out_channels=1, ksize=3) # applying deconvolution
self.l3 = L.Linear(None, words_per_title * 200) # linear input layer
def __call__(self, x):
h = F.relu(self.l1(x)) # rectified activation function
h = F.reshape(h, (x.data.shape[0], 1,self.words_per_title,200))
h = F.relu(self.l2(h))
return F.reshape(self.l3(h),(x.data.shape[0], 1, self.words_per_title, 200))
# loading the respective data
word2index,index2word,w=get_embeddedwords()
dictionary=get_dictionary('dictionary.txt')
titles_high_raw=get_titles('titlesDict_high.txt',dictionary,shuffle=1)
# get maximum number of words in all titles
words_per_title = get_max_words_over_titles(titles_high_raw,dictionary)
# setup networks
dis = MLPConv(words_per_title)
gen = | dict_trans=w#(w-w.min())/(w-w.min()).max()
title_recon=''
for i in range(len(vec_)):
word_ = vec_.data[i]
word_ = np.tile(word_,(len(w),1))
dist_=np.sqrt(np.sum((dict_trans-word_)**2,1))
title_recon=title_recon+index2word[dist_.argmin()]+' '
return title_recon | identifier_body |
GAN_word_embedding.py | itles = [0] + list(np.add(endoftitles[:-1], 1))
idx = np.random.permutation(len(endoftitles))
endoftitles=[endoftitles[x] for x in idx]
startoftitles = [startoftitles[x] for x in idx]
lines = [lines[range(startoftitles[x], endoftitles[x] + 1)] for x in range(len(endoftitles))]
lines = np.hstack(lines)
# the function returns a vector containing all dictionary indices of all titles
return lines
# load and arrange the word embedding file
def get_embeddedwords(namefile='word2vec.model'):
print('loading ' + namefile + '...')
with open(namefile, 'r') as f:
ss = f.readline().split()
# each line is split and the respective human readable word and the vector embedding vector is extracted
n_vocab, n_units = int(ss[0]), int(ss[1])
word2index = {}
index2word = {}
w = np.empty((n_vocab, n_units), dtype=np.float32)
# the embedding matrix is created by sorting all word vectors according to the dictionary index
# the resulting matrix is of size NumIndices x 200
# note that due to splitting white space got removed. For that reason it is added again and a vector of
# zeros is used within w
for i, line in enumerate(f):
ss = line.split()
if len(ss)<201:
ss = [' ']+ss
word = ss[0]
word2index[word] = i
index2word[i] = word
w[i] = np.array([float(s) for s in ss[1:]], dtype=np.float32)
w[word2index[' ']]=np.zeros((1, 200))
print('done.')
# word2index transforms a dictionary index to a human readable word
# index2word transforms a human readable word to a dictionary index
return word2index,index2word,w
# this function is used to obtain the maximum number of words across all titles by finding <eol> statements
def get_max_words_over_titles(titles_raw,dictionary):
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch = max(np.abs(np.subtract(startoftitles, endoftitles))) + 1
return max_title_length_in_batch
# this function creates batch data used to train the network
def createtitlebatch(titles_raw,dictionary,skipbatches=0,numtitles=10,testpart=0.05):
# skip_ is used to select parts of the title vector
# skip_ = 10 given numtitles = 80 would mean that the first 800 titles in the vector are skipped and
# all following operations are performed on data that comes after that
skip_=numtitles*skipbatches
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch=max(np.abs(np.subtract(startoftitles,endoftitles)))+1
max_skip=len(endoftitles)
if max_skip<(numtitles+skip_):
print('maximum requested number of titles is '+ str(numtitles+skip_)+'; dataset only has ' + str(max_skip) + ' titles')
print('maximum number of batches at ' + str(numtitles) + ' titles per batch: ' + str(max_skip/numtitles-1))
else:
title_matrix=[]
# extraction of the data from w given the amount of titles selected
for n in range(skip_,numtitles+skip_):
title_num=n
title_vec=titles_raw[range(startoftitles[title_num],endoftitles[title_num]+1)]
title_matrix.append([w[x-1] for x in title_vec])
# shuffling the selected batch
randidx=np.random.permutation(len(title_matrix))
idx_train=randidx[:-int(np.floor(len(randidx)*(testpart)))]
idx_test=randidx[int(np.floor(len(randidx)*(1-testpart))):]
train = [title_matrix[x] for x in idx_train]
test = [title_matrix[x] for x in idx_test]
train = [np.concatenate((x,np.zeros((max_title_length_in_batch-len(x),200))),0) for x in train]
test = [np.concatenate((x, np.zeros((max_title_length_in_batch - len(x), 200))), 0) for x in test]
# train and test are returned in a shape optimized for the use with convolutional networks
# the respective shape is numExamples x channel x numWords x ndimEmbedding
return np.asarray(train).astype('float32').reshape((len(train),max_title_length_in_batch/7,max_title_length_in_batch/7,200)),np.asarray(test).astype('float32').reshape((len(test),1,max_title_length_in_batch,200))
# this function can be used to transform a title matrix (words x ndim) back into a human readable title
# the argmin() L2 norm between each word vector of the title and all word vectors in w serves as the
# index for the respective word in the dictionary
def vec2title(vec_,w,index2word):
dict_trans=w#(w-w.min())/(w-w.min()).max()
title_recon=''
for i in range(len(vec_)):
word_ = vec_.data[i]
word_ = np.tile(word_,(len(w),1))
dist_=np.sqrt(np.sum((dict_trans-word_)**2,1))
title_recon=title_recon+index2word[dist_.argmin()]+' '
return title_recon
# classifier to compute loss based on softmax cross entropy and accuracy
class Classifier(link.Chain):
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy):
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun |
with self.init_scope():
self.predictor = predictor
def __call__(self, *args):
assert len(args) >= 2
x = args[:-1]
t = args[-1]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*x)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
# Convolutional neuronal network to do the discrimination
# respective architecture choices are explained in the report
class MLPConv(chainer.Chain):
def __init__(self,words_per_title):
super(MLPConv, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.conv = L.Convolution2D(in_channels=1, out_channels=1, ksize=3)
self.l2 = L.Linear(None, 2)
def __call__(self, x):
x2 = F.relu(self.conv(F.reshape(x,(x.data.shape[0], 1,self.words_per_title ,200))))
x3 = F.max_pooling_2d(x2, 3)
y = F.sigmoid(self.l2(F.dropout(x3,0.2)))
return y
# Deconvolutional neuronal network to do the generation
# respective architecture choices are explained in the report
class generator(chainer.Chain):
def __init__(self, words_per_title):
super(generator, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.l1 = L.Linear(None, words_per_title*200) # linear input layer
self.l2 = L.Deconvolution2D(in_channels=1, out_channels=1, ksize=3) # applying deconvolution
self.l3 = L.Linear(None, words_per_title * 200) # linear input layer
def __call__(self, x):
h = F.relu(self.l1(x)) # rectified activation function
h = F.reshape(h, (x.data.shape[0], 1,self.words_per_title,200))
h = F.relu(self.l2(h))
return F.reshape(self.l3(h),(x.data.shape[0], 1, self.words_per_title, 200))
# loading the respective data
word2index,index2word,w=get_embeddedwords()
dictionary=get_dictionary('dictionary.txt')
titles_high_raw=get_titles('titlesDict_high.txt',dictionary,shuffle=1)
# get maximum number of words in all titles
words_per_title = get_max_words_over_titles(titles_high_raw,dictionary)
# setup networks
dis = MLPConv(words_per_title)
gen = | self.y = None
self.loss = None
self.accuracy = None | random_line_split |
GAN_word_embedding.py | = [0] + list(np.add(endoftitles[:-1], 1))
idx = np.random.permutation(len(endoftitles))
endoftitles=[endoftitles[x] for x in idx]
startoftitles = [startoftitles[x] for x in idx]
lines = [lines[range(startoftitles[x], endoftitles[x] + 1)] for x in range(len(endoftitles))]
lines = np.hstack(lines)
# the function returns a vector containing all dictionary indices of all titles
return lines
# load and arrange the word embedding file
def get_embeddedwords(namefile='word2vec.model'):
print('loading ' + namefile + '...')
with open(namefile, 'r') as f:
ss = f.readline().split()
# each line is split and the respective human readable word and the vector embedding vector is extracted
n_vocab, n_units = int(ss[0]), int(ss[1])
word2index = {}
index2word = {}
w = np.empty((n_vocab, n_units), dtype=np.float32)
# the embedding matrix is created by sorting all word vectors according to the dictionary index
# the resulting matrix is of size NumIndices x 200
# note that due to splitting white space got removed. For that reason it is added again and a vector of
# zeros is used within w
for i, line in enumerate(f):
ss = line.split()
if len(ss)<201:
ss = [' ']+ss
word = ss[0]
word2index[word] = i
index2word[i] = word
w[i] = np.array([float(s) for s in ss[1:]], dtype=np.float32)
w[word2index[' ']]=np.zeros((1, 200))
print('done.')
# word2index transforms a dictionary index to a human readable word
# index2word transforms a human readable word to a dictionary index
return word2index,index2word,w
# this function is used to obtain the maximum number of words across all titles by finding <eol> statements
def get_max_words_over_titles(titles_raw,dictionary):
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch = max(np.abs(np.subtract(startoftitles, endoftitles))) + 1
return max_title_length_in_batch
# this function creates batch data used to train the network
def createtitlebatch(titles_raw,dictionary,skipbatches=0,numtitles=10,testpart=0.05):
# skip_ is used to select parts of the title vector
# skip_ = 10 given numtitles = 80 would mean that the first 800 titles in the vector are skipped and
# all following operations are performed on data that comes after that
skip_=numtitles*skipbatches
endoftitles = [x for x in range(len(titles_raw)) if titles_raw[x] == dictionary.get('<eol>')]
startoftitles = [0] + list(np.add(endoftitles[:-1], 1))
max_title_length_in_batch=max(np.abs(np.subtract(startoftitles,endoftitles)))+1
max_skip=len(endoftitles)
if max_skip<(numtitles+skip_):
print('maximum requested number of titles is '+ str(numtitles+skip_)+'; dataset only has ' + str(max_skip) + ' titles')
print('maximum number of batches at ' + str(numtitles) + ' titles per batch: ' + str(max_skip/numtitles-1))
else:
title_matrix=[]
# extraction of the data from w given the amount of titles selected
for n in range(skip_,numtitles+skip_):
title_num=n
title_vec=titles_raw[range(startoftitles[title_num],endoftitles[title_num]+1)]
title_matrix.append([w[x-1] for x in title_vec])
# shuffling the selected batch
randidx=np.random.permutation(len(title_matrix))
idx_train=randidx[:-int(np.floor(len(randidx)*(testpart)))]
idx_test=randidx[int(np.floor(len(randidx)*(1-testpart))):]
train = [title_matrix[x] for x in idx_train]
test = [title_matrix[x] for x in idx_test]
train = [np.concatenate((x,np.zeros((max_title_length_in_batch-len(x),200))),0) for x in train]
test = [np.concatenate((x, np.zeros((max_title_length_in_batch - len(x), 200))), 0) for x in test]
# train and test are returned in a shape optimized for the use with convolutional networks
# the respective shape is numExamples x channel x numWords x ndimEmbedding
return np.asarray(train).astype('float32').reshape((len(train),max_title_length_in_batch/7,max_title_length_in_batch/7,200)),np.asarray(test).astype('float32').reshape((len(test),1,max_title_length_in_batch,200))
# this function can be used to transform a title matrix (words x ndim) back into a human readable title
# the argmin() L2 norm between each word vector of the title and all word vectors in w serves as the
# index for the respective word in the dictionary
def vec2title(vec_,w,index2word):
dict_trans=w#(w-w.min())/(w-w.min()).max()
title_recon=''
for i in range(len(vec_)):
word_ = vec_.data[i]
word_ = np.tile(word_,(len(w),1))
dist_=np.sqrt(np.sum((dict_trans-word_)**2,1))
title_recon=title_recon+index2word[dist_.argmin()]+' '
return title_recon
# classifier to compute loss based on softmax cross entropy and accuracy
class | (link.Chain):
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy):
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
with self.init_scope():
self.predictor = predictor
def __call__(self, *args):
assert len(args) >= 2
x = args[:-1]
t = args[-1]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*x)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
# Convolutional neuronal network to do the discrimination
# respective architecture choices are explained in the report
class MLPConv(chainer.Chain):
def __init__(self,words_per_title):
super(MLPConv, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.conv = L.Convolution2D(in_channels=1, out_channels=1, ksize=3)
self.l2 = L.Linear(None, 2)
def __call__(self, x):
x2 = F.relu(self.conv(F.reshape(x,(x.data.shape[0], 1,self.words_per_title ,200))))
x3 = F.max_pooling_2d(x2, 3)
y = F.sigmoid(self.l2(F.dropout(x3,0.2)))
return y
# Deconvolutional neuronal network to do the generation
# respective architecture choices are explained in the report
class generator(chainer.Chain):
def __init__(self, words_per_title):
super(generator, self).__init__()
with self.init_scope():
self.words_per_title = words_per_title
self.l1 = L.Linear(None, words_per_title*200) # linear input layer
self.l2 = L.Deconvolution2D(in_channels=1, out_channels=1, ksize=3) # applying deconvolution
self.l3 = L.Linear(None, words_per_title * 200) # linear input layer
def __call__(self, x):
h = F.relu(self.l1(x)) # rectified activation function
h = F.reshape(h, (x.data.shape[0], 1,self.words_per_title,200))
h = F.relu(self.l2(h))
return F.reshape(self.l3(h),(x.data.shape[0], 1, self.words_per_title, 200))
# loading the respective data
word2index,index2word,w=get_embeddedwords()
dictionary=get_dictionary('dictionary.txt')
titles_high_raw=get_titles('titlesDict_high.txt',dictionary,shuffle=1)
# get maximum number of words in all titles
words_per_title = get_max_words_over_titles(titles_high_raw,dictionary)
# setup networks
dis = MLPConv(words_per_title)
gen | Classifier | identifier_name |
jobs.py | _ms
logger = logging.getLogger('arq.jobs')
Serializer = Callable[[Dict[str, Any]], bytes]
Deserializer = Callable[[bytes], Dict[str, Any]]
class ResultNotFound(RuntimeError):
pass
class JobStatus(str, Enum):
"""
Enum of job statuses.
"""
#: job is in the queue, time it should be run not yet reached
deferred = 'deferred'
#: job is in the queue, time it should run has been reached
queued = 'queued'
#: job is in progress
in_progress = 'in_progress'
#: job is complete, result is available
complete = 'complete'
#: job not found in any way
not_found = 'not_found'
@dataclass
class JobDef:
function: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
job_try: int
enqueue_time: datetime
score: Optional[int]
def __post_init__(self) -> None:
if isinstance(self.score, float):
self.score = int(self.score)
@dataclass
class JobResult(JobDef):
success: bool
result: Any
start_time: datetime
finish_time: datetime
queue_name: str
job_id: Optional[str] = None
class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis', '_queue_name', '_deserializer'
def __init__(
self,
job_id: str,
redis: 'Redis[bytes]',
_queue_name: str = default_queue_name,
_deserializer: Optional[Deserializer] = None,
):
self.job_id = job_id
self._redis = redis
self._queue_name = _queue_name
self._deserializer = _deserializer
async def result(
self, timeout: Optional[float] = None, *, poll_delay: float = 0.5, pole_delay: float = None
) -> Any:
"""
Get the result of the job or, if the job raised an exception, reraise it.
This function waits for the result if it's not yet available and the job is
present in the queue. Otherwise ``ResultNotFound`` is raised.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param poll_delay: how often to poll redis for the job result
:param pole_delay: deprecated, use poll_delay instead
"""
if pole_delay is not None:
warnings.warn(
'"pole_delay" is deprecated, use the correct spelling "poll_delay" instead', DeprecationWarning
)
poll_delay = pole_delay
async for delay in poll(poll_delay):
async with self._redis.pipeline(transaction=True) as tr:
tr.get(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
v, s = await tr.execute()
if v:
info = deserialize_result(v, deserializer=self._deserializer)
if info.success:
return info.result
elif isinstance(info.result, (Exception, asyncio.CancelledError)):
raise info.result
else:
raise SerializationError(info.result)
elif s is None:
raise ResultNotFound(
'Not waiting for job result because the job is not in queue. '
'Is the worker function configured to keep result?'
)
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info: Optional[JobDef] = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id)
if v:
info = deserialize_job(v, deserializer=self._deserializer)
if info:
s = await self._redis.zscore(self._queue_name, self.job_id)
info.score = None if s is None else int(s)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id)
if v:
return deserialize_result(v, deserializer=self._deserializer)
else:
return None
async def status(self) -> JobStatus:
"""
Status of the job.
"""
async with self._redis.pipeline(transaction=True) as tr:
tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
is_complete, is_in_progress, score = await tr.execute()
if is_complete:
return JobStatus.complete
elif is_in_progress:
return JobStatus.in_progress
elif score:
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
else:
return JobStatus.not_found
async def abort(self, *, timeout: Optional[float] = None, poll_delay: float = 0.5) -> bool:
"""
Abort the job.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``,
will wait forever on None
:param poll_delay: how often to poll redis for the job result
:return: True if the job aborted properly, False otherwise
"""
job_info = await self.info()
if job_info and job_info.score and job_info.score > timestamp_ms():
async with self._redis.pipeline(transaction=True) as tr:
tr.zrem(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
tr.zadd(self._queue_name, {self.job_id: 1}) # type: ignore[unused-coroutine]
await tr.execute()
await self._redis.zadd(abort_jobs_ss, {self.job_id: timestamp_ms()})
try:
await self.result(timeout=timeout, poll_delay=poll_delay)
except asyncio.CancelledError:
return True
except ResultNotFound:
# We do not know if the job was cancelled or not
return False
else:
return False
def __repr__(self) -> str:
return f'<arq job {self.job_id}>'
class SerializationError(RuntimeError):
pass
class DeserializationError(SerializationError):
pass
def serialize_job(
function_name: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: Optional[int],
enqueue_time_ms: int,
*,
serializer: Optional[Serializer] = None,
) -> bytes:
data = {'t': job_try, 'f': function_name, 'a': args, 'k': kwargs, 'et': enqueue_time_ms}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception as e:
raise SerializationError(f'unable to serialize job "{function_name}"') from e
def serialize_result(
function: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: int,
enqueue_time_ms: int,
success: bool,
result: Any,
start_ms: int,
finished_ms: int,
ref: str,
queue_name: str,
*,
serializer: Optional[Serializer] = None,
) -> Optional[bytes]:
data = {
't': job_try,
'f': function,
'a': args,
'k': kwargs,
'et': enqueue_time_ms,
's': success,
'r': result,
'st': start_ms,
'ft': finished_ms,
'q': queue_name,
}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception:
logger.warning('error serializing result of %s', ref, exc_info=True)
# use string in case serialization fails again
data.update(r='unable to serialize result', s=False)
try:
return serializer(data)
except Exception:
logger.critical('error serializing result of %s even after replacing result', ref, exc_info=True)
return None
def deserialize_job(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobDef:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobDef(
function=d['f'],
args=d['a'],
kwargs=d['k'],
job_try=d['t'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
)
except Exception as e:
raise DeserializationError('unable to deserialize job') from e
def | (
r: bytes, *, deserializer: Optional[Deserializer] = None
) -> Tuple[str, Tuple[Any, ...], Dict[str, Any], int, int]:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
| deserialize_job_raw | identifier_name |
jobs.py | _ms
logger = logging.getLogger('arq.jobs')
Serializer = Callable[[Dict[str, Any]], bytes]
Deserializer = Callable[[bytes], Dict[str, Any]]
class ResultNotFound(RuntimeError):
pass
class JobStatus(str, Enum):
"""
Enum of job statuses.
"""
#: job is in the queue, time it should be run not yet reached
deferred = 'deferred'
#: job is in the queue, time it should run has been reached
queued = 'queued'
#: job is in progress
in_progress = 'in_progress'
#: job is complete, result is available
complete = 'complete'
#: job not found in any way
not_found = 'not_found'
@dataclass
class JobDef:
function: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
job_try: int
enqueue_time: datetime
score: Optional[int]
def __post_init__(self) -> None:
if isinstance(self.score, float):
self.score = int(self.score)
@dataclass
class JobResult(JobDef):
success: bool
result: Any
start_time: datetime
finish_time: datetime
queue_name: str
job_id: Optional[str] = None
class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis', '_queue_name', '_deserializer'
def __init__(
self,
job_id: str,
redis: 'Redis[bytes]',
_queue_name: str = default_queue_name,
_deserializer: Optional[Deserializer] = None,
):
self.job_id = job_id
self._redis = redis
self._queue_name = _queue_name
self._deserializer = _deserializer
async def result(
self, timeout: Optional[float] = None, *, poll_delay: float = 0.5, pole_delay: float = None
) -> Any:
"""
Get the result of the job or, if the job raised an exception, reraise it.
This function waits for the result if it's not yet available and the job is
present in the queue. Otherwise ``ResultNotFound`` is raised.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param poll_delay: how often to poll redis for the job result
:param pole_delay: deprecated, use poll_delay instead
"""
if pole_delay is not None:
warnings.warn(
'"pole_delay" is deprecated, use the correct spelling "poll_delay" instead', DeprecationWarning
)
poll_delay = pole_delay
async for delay in poll(poll_delay):
async with self._redis.pipeline(transaction=True) as tr:
tr.get(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
v, s = await tr.execute()
if v:
info = deserialize_result(v, deserializer=self._deserializer)
if info.success:
return info.result
elif isinstance(info.result, (Exception, asyncio.CancelledError)):
raise info.result
else:
raise SerializationError(info.result)
elif s is None:
raise ResultNotFound(
'Not waiting for job result because the job is not in queue. '
'Is the worker function configured to keep result?'
)
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info: Optional[JobDef] = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id)
if v:
info = deserialize_job(v, deserializer=self._deserializer)
if info:
s = await self._redis.zscore(self._queue_name, self.job_id)
info.score = None if s is None else int(s)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id)
if v:
return deserialize_result(v, deserializer=self._deserializer)
else:
return None
async def status(self) -> JobStatus:
|
async def abort(self, *, timeout: Optional[float] = None, poll_delay: float = 0.5) -> bool:
"""
Abort the job.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``,
will wait forever on None
:param poll_delay: how often to poll redis for the job result
:return: True if the job aborted properly, False otherwise
"""
job_info = await self.info()
if job_info and job_info.score and job_info.score > timestamp_ms():
async with self._redis.pipeline(transaction=True) as tr:
tr.zrem(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
tr.zadd(self._queue_name, {self.job_id: 1}) # type: ignore[unused-coroutine]
await tr.execute()
await self._redis.zadd(abort_jobs_ss, {self.job_id: timestamp_ms()})
try:
await self.result(timeout=timeout, poll_delay=poll_delay)
except asyncio.CancelledError:
return True
except ResultNotFound:
# We do not know if the job was cancelled or not
return False
else:
return False
def __repr__(self) -> str:
return f'<arq job {self.job_id}>'
class SerializationError(RuntimeError):
pass
class DeserializationError(SerializationError):
pass
def serialize_job(
function_name: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: Optional[int],
enqueue_time_ms: int,
*,
serializer: Optional[Serializer] = None,
) -> bytes:
data = {'t': job_try, 'f': function_name, 'a': args, 'k': kwargs, 'et': enqueue_time_ms}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception as e:
raise SerializationError(f'unable to serialize job "{function_name}"') from e
def serialize_result(
function: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: int,
enqueue_time_ms: int,
success: bool,
result: Any,
start_ms: int,
finished_ms: int,
ref: str,
queue_name: str,
*,
serializer: Optional[Serializer] = None,
) -> Optional[bytes]:
data = {
't': job_try,
'f': function,
'a': args,
'k': kwargs,
'et': enqueue_time_ms,
's': success,
'r': result,
'st': start_ms,
'ft': finished_ms,
'q': queue_name,
}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception:
logger.warning('error serializing result of %s', ref, exc_info=True)
# use string in case serialization fails again
data.update(r='unable to serialize result', s=False)
try:
return serializer(data)
except Exception:
logger.critical('error serializing result of %s even after replacing result', ref, exc_info=True)
return None
def deserialize_job(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobDef:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobDef(
function=d['f'],
args=d['a'],
kwargs=d['k'],
job_try=d['t'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
)
except Exception as e:
raise DeserializationError('unable to deserialize job') from e
def deserialize_job_raw(
r: bytes, *, deserializer: Optional[Deserializer] = None
) -> Tuple[str, Tuple[Any, ...], Dict[str, Any], int, int]:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return | """
Status of the job.
"""
async with self._redis.pipeline(transaction=True) as tr:
tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
is_complete, is_in_progress, score = await tr.execute()
if is_complete:
return JobStatus.complete
elif is_in_progress:
return JobStatus.in_progress
elif score:
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
else:
return JobStatus.not_found | identifier_body |
jobs.py | _ms
logger = logging.getLogger('arq.jobs')
Serializer = Callable[[Dict[str, Any]], bytes]
Deserializer = Callable[[bytes], Dict[str, Any]]
class ResultNotFound(RuntimeError):
pass
class JobStatus(str, Enum):
"""
Enum of job statuses.
"""
#: job is in the queue, time it should be run not yet reached
deferred = 'deferred'
#: job is in the queue, time it should run has been reached
queued = 'queued'
#: job is in progress
in_progress = 'in_progress'
#: job is complete, result is available
complete = 'complete'
#: job not found in any way
not_found = 'not_found'
@dataclass
class JobDef:
function: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
job_try: int
enqueue_time: datetime
score: Optional[int]
def __post_init__(self) -> None:
if isinstance(self.score, float):
self.score = int(self.score)
@dataclass
class JobResult(JobDef):
success: bool
result: Any
start_time: datetime
finish_time: datetime
queue_name: str
job_id: Optional[str] = None
class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis', '_queue_name', '_deserializer'
def __init__(
self,
job_id: str, | ):
self.job_id = job_id
self._redis = redis
self._queue_name = _queue_name
self._deserializer = _deserializer
async def result(
self, timeout: Optional[float] = None, *, poll_delay: float = 0.5, pole_delay: float = None
) -> Any:
"""
Get the result of the job or, if the job raised an exception, reraise it.
This function waits for the result if it's not yet available and the job is
present in the queue. Otherwise ``ResultNotFound`` is raised.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param poll_delay: how often to poll redis for the job result
:param pole_delay: deprecated, use poll_delay instead
"""
if pole_delay is not None:
warnings.warn(
'"pole_delay" is deprecated, use the correct spelling "poll_delay" instead', DeprecationWarning
)
poll_delay = pole_delay
async for delay in poll(poll_delay):
async with self._redis.pipeline(transaction=True) as tr:
tr.get(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
v, s = await tr.execute()
if v:
info = deserialize_result(v, deserializer=self._deserializer)
if info.success:
return info.result
elif isinstance(info.result, (Exception, asyncio.CancelledError)):
raise info.result
else:
raise SerializationError(info.result)
elif s is None:
raise ResultNotFound(
'Not waiting for job result because the job is not in queue. '
'Is the worker function configured to keep result?'
)
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info: Optional[JobDef] = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id)
if v:
info = deserialize_job(v, deserializer=self._deserializer)
if info:
s = await self._redis.zscore(self._queue_name, self.job_id)
info.score = None if s is None else int(s)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id)
if v:
return deserialize_result(v, deserializer=self._deserializer)
else:
return None
async def status(self) -> JobStatus:
"""
Status of the job.
"""
async with self._redis.pipeline(transaction=True) as tr:
tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
is_complete, is_in_progress, score = await tr.execute()
if is_complete:
return JobStatus.complete
elif is_in_progress:
return JobStatus.in_progress
elif score:
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
else:
return JobStatus.not_found
async def abort(self, *, timeout: Optional[float] = None, poll_delay: float = 0.5) -> bool:
"""
Abort the job.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``,
will wait forever on None
:param poll_delay: how often to poll redis for the job result
:return: True if the job aborted properly, False otherwise
"""
job_info = await self.info()
if job_info and job_info.score and job_info.score > timestamp_ms():
async with self._redis.pipeline(transaction=True) as tr:
tr.zrem(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
tr.zadd(self._queue_name, {self.job_id: 1}) # type: ignore[unused-coroutine]
await tr.execute()
await self._redis.zadd(abort_jobs_ss, {self.job_id: timestamp_ms()})
try:
await self.result(timeout=timeout, poll_delay=poll_delay)
except asyncio.CancelledError:
return True
except ResultNotFound:
# We do not know if the job was cancelled or not
return False
else:
return False
def __repr__(self) -> str:
return f'<arq job {self.job_id}>'
class SerializationError(RuntimeError):
pass
class DeserializationError(SerializationError):
pass
def serialize_job(
function_name: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: Optional[int],
enqueue_time_ms: int,
*,
serializer: Optional[Serializer] = None,
) -> bytes:
data = {'t': job_try, 'f': function_name, 'a': args, 'k': kwargs, 'et': enqueue_time_ms}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception as e:
raise SerializationError(f'unable to serialize job "{function_name}"') from e
def serialize_result(
function: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: int,
enqueue_time_ms: int,
success: bool,
result: Any,
start_ms: int,
finished_ms: int,
ref: str,
queue_name: str,
*,
serializer: Optional[Serializer] = None,
) -> Optional[bytes]:
data = {
't': job_try,
'f': function,
'a': args,
'k': kwargs,
'et': enqueue_time_ms,
's': success,
'r': result,
'st': start_ms,
'ft': finished_ms,
'q': queue_name,
}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception:
logger.warning('error serializing result of %s', ref, exc_info=True)
# use string in case serialization fails again
data.update(r='unable to serialize result', s=False)
try:
return serializer(data)
except Exception:
logger.critical('error serializing result of %s even after replacing result', ref, exc_info=True)
return None
def deserialize_job(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobDef:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobDef(
function=d['f'],
args=d['a'],
kwargs=d['k'],
job_try=d['t'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
)
except Exception as e:
raise DeserializationError('unable to deserialize job') from e
def deserialize_job_raw(
r: bytes, *, deserializer: Optional[Deserializer] = None
) -> Tuple[str, Tuple[Any, ...], Dict[str, Any], int, int]:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return | redis: 'Redis[bytes]',
_queue_name: str = default_queue_name,
_deserializer: Optional[Deserializer] = None, | random_line_split |
jobs.py | _ms
logger = logging.getLogger('arq.jobs')
Serializer = Callable[[Dict[str, Any]], bytes]
Deserializer = Callable[[bytes], Dict[str, Any]]
class ResultNotFound(RuntimeError):
pass
class JobStatus(str, Enum):
"""
Enum of job statuses.
"""
#: job is in the queue, time it should be run not yet reached
deferred = 'deferred'
#: job is in the queue, time it should run has been reached
queued = 'queued'
#: job is in progress
in_progress = 'in_progress'
#: job is complete, result is available
complete = 'complete'
#: job not found in any way
not_found = 'not_found'
@dataclass
class JobDef:
function: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
job_try: int
enqueue_time: datetime
score: Optional[int]
def __post_init__(self) -> None:
if isinstance(self.score, float):
self.score = int(self.score)
@dataclass
class JobResult(JobDef):
success: bool
result: Any
start_time: datetime
finish_time: datetime
queue_name: str
job_id: Optional[str] = None
class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis', '_queue_name', '_deserializer'
def __init__(
self,
job_id: str,
redis: 'Redis[bytes]',
_queue_name: str = default_queue_name,
_deserializer: Optional[Deserializer] = None,
):
self.job_id = job_id
self._redis = redis
self._queue_name = _queue_name
self._deserializer = _deserializer
async def result(
self, timeout: Optional[float] = None, *, poll_delay: float = 0.5, pole_delay: float = None
) -> Any:
"""
Get the result of the job or, if the job raised an exception, reraise it.
This function waits for the result if it's not yet available and the job is
present in the queue. Otherwise ``ResultNotFound`` is raised.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param poll_delay: how often to poll redis for the job result
:param pole_delay: deprecated, use poll_delay instead
"""
if pole_delay is not None:
warnings.warn(
'"pole_delay" is deprecated, use the correct spelling "poll_delay" instead', DeprecationWarning
)
poll_delay = pole_delay
async for delay in poll(poll_delay):
async with self._redis.pipeline(transaction=True) as tr:
tr.get(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
v, s = await tr.execute()
if v:
|
elif s is None:
raise ResultNotFound(
'Not waiting for job result because the job is not in queue. '
'Is the worker function configured to keep result?'
)
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info: Optional[JobDef] = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id)
if v:
info = deserialize_job(v, deserializer=self._deserializer)
if info:
s = await self._redis.zscore(self._queue_name, self.job_id)
info.score = None if s is None else int(s)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id)
if v:
return deserialize_result(v, deserializer=self._deserializer)
else:
return None
async def status(self) -> JobStatus:
"""
Status of the job.
"""
async with self._redis.pipeline(transaction=True) as tr:
tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]
tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
is_complete, is_in_progress, score = await tr.execute()
if is_complete:
return JobStatus.complete
elif is_in_progress:
return JobStatus.in_progress
elif score:
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
else:
return JobStatus.not_found
async def abort(self, *, timeout: Optional[float] = None, poll_delay: float = 0.5) -> bool:
"""
Abort the job.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``,
will wait forever on None
:param poll_delay: how often to poll redis for the job result
:return: True if the job aborted properly, False otherwise
"""
job_info = await self.info()
if job_info and job_info.score and job_info.score > timestamp_ms():
async with self._redis.pipeline(transaction=True) as tr:
tr.zrem(self._queue_name, self.job_id) # type: ignore[unused-coroutine]
tr.zadd(self._queue_name, {self.job_id: 1}) # type: ignore[unused-coroutine]
await tr.execute()
await self._redis.zadd(abort_jobs_ss, {self.job_id: timestamp_ms()})
try:
await self.result(timeout=timeout, poll_delay=poll_delay)
except asyncio.CancelledError:
return True
except ResultNotFound:
# We do not know if the job was cancelled or not
return False
else:
return False
def __repr__(self) -> str:
return f'<arq job {self.job_id}>'
class SerializationError(RuntimeError):
pass
class DeserializationError(SerializationError):
pass
def serialize_job(
function_name: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: Optional[int],
enqueue_time_ms: int,
*,
serializer: Optional[Serializer] = None,
) -> bytes:
data = {'t': job_try, 'f': function_name, 'a': args, 'k': kwargs, 'et': enqueue_time_ms}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception as e:
raise SerializationError(f'unable to serialize job "{function_name}"') from e
def serialize_result(
function: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
job_try: int,
enqueue_time_ms: int,
success: bool,
result: Any,
start_ms: int,
finished_ms: int,
ref: str,
queue_name: str,
*,
serializer: Optional[Serializer] = None,
) -> Optional[bytes]:
data = {
't': job_try,
'f': function,
'a': args,
'k': kwargs,
'et': enqueue_time_ms,
's': success,
'r': result,
'st': start_ms,
'ft': finished_ms,
'q': queue_name,
}
if serializer is None:
serializer = pickle.dumps
try:
return serializer(data)
except Exception:
logger.warning('error serializing result of %s', ref, exc_info=True)
# use string in case serialization fails again
data.update(r='unable to serialize result', s=False)
try:
return serializer(data)
except Exception:
logger.critical('error serializing result of %s even after replacing result', ref, exc_info=True)
return None
def deserialize_job(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobDef:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobDef(
function=d['f'],
args=d['a'],
kwargs=d['k'],
job_try=d['t'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
)
except Exception as e:
raise DeserializationError('unable to deserialize job') from e
def deserialize_job_raw(
r: bytes, *, deserializer: Optional[Deserializer] = None
) -> Tuple[str, Tuple[Any, ...], Dict[str, Any], int, int]:
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
| info = deserialize_result(v, deserializer=self._deserializer)
if info.success:
return info.result
elif isinstance(info.result, (Exception, asyncio.CancelledError)):
raise info.result
else:
raise SerializationError(info.result) | conditional_block |
machineconfig.go | fd = "/etc/crio/crio.conf.d"
crioRuntimesConfig = "99-runtimes.conf"
// OCIHooksConfigDir is the default directory for the OCI hooks
OCIHooksConfigDir = "/etc/containers/oci/hooks.d"
// OCIHooksConfig file contains the low latency hooks configuration
OCIHooksConfig = "99-low-latency-hooks.json"
ociTemplateRPSMask = "RPSMask"
udevRulesDir = "/etc/udev/rules.d"
udevRpsRules = "99-netdev-rps.rules"
// scripts
hugepagesAllocation = "hugepages-allocation"
ociHooks = "low-latency-hooks"
setRPSMask = "set-rps-mask"
)
const (
systemdSectionUnit = "Unit"
systemdSectionService = "Service"
systemdSectionInstall = "Install"
systemdDescription = "Description"
systemdBefore = "Before"
systemdEnvironment = "Environment"
systemdType = "Type"
systemdRemainAfterExit = "RemainAfterExit"
systemdExecStart = "ExecStart"
systemdWantedBy = "WantedBy"
)
const (
systemdServiceKubelet = "kubelet.service"
systemdServiceTypeOneshot = "oneshot"
systemdTargetMultiUser = "multi-user.target"
systemdTrue = "true"
)
const (
environmentHugepagesSize = "HUGEPAGES_SIZE"
environmentHugepagesCount = "HUGEPAGES_COUNT"
environmentNUMANode = "NUMA_NODE"
)
const (
templateReservedCpus = "ReservedCpus"
)
// New returns new machine configuration object for performance sensitive workloads
func New(profile *performancev2.PerformanceProfile) (*machineconfigv1.MachineConfig, error) | if err != nil {
return nil, err
}
mc.Spec.Config = runtime.RawExtension{Raw: rawIgnition}
enableRTKernel := profile.Spec.RealTimeKernel != nil &&
profile.Spec.RealTimeKernel.Enabled != nil &&
*profile.Spec.RealTimeKernel.Enabled
if enableRTKernel {
mc.Spec.KernelType = MCKernelRT
} else {
mc.Spec.KernelType = MCKernelDefault
}
return mc, nil
}
// GetMachineConfigName generates machine config name from the performance profile
func GetMachineConfigName(profile *performancev2.PerformanceProfile) string {
name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
return fmt.Sprintf("50-%s", name)
}
func getIgnitionConfig(profile *performancev2.PerformanceProfile) (*igntypes.Config, error) {
ignitionConfig := &igntypes.Config{
Ignition: igntypes.Ignition{
Version: defaultIgnitionVersion,
},
Storage: igntypes.Storage{
Files: []igntypes.File{},
},
}
// add script files under the node /usr/local/bin directory
mode := 0700
for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} {
dst := GetBashScriptPath(script)
content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script))
if err != nil {
return nil, err
}
AddContent(ignitionConfig, content, dst, &mode)
}
// add crio config snippet under the node /etc/crio/crio.conf.d/ directory
crioConfdRuntimesMode := 0644
crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig))
if err != nil {
return nil, err
}
crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig)
AddContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode)
// add crio hooks config under the node cri-o hook directory
crioHooksConfigsMode := 0644
ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile)
if err != nil {
return nil, err
}
ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig)
AddContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode)
// add rps udev rule
rpsRulesMode := 0644
rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules))
if err != nil {
return nil, err
}
rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules)
AddContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode)
if profile.Spec.HugePages != nil {
for _, page := range profile.Spec.HugePages.Pages {
// we already allocated non NUMA specific hugepages via kernel arguments
if page.Node == nil {
continue
}
hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size)
if err != nil {
return nil, err
}
hugepagesService, err := GetSystemdContent(GetHugepagesAllocationUnitOptions(
hugepagesSize,
page.Count,
*page.Node,
))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &hugepagesService,
Enabled: pointer.BoolPtr(true),
Name: GetSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)),
})
}
}
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
rpsService, err := GetSystemdContent(getRPSUnitOptions(rpsMask))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &rpsService,
Name: GetSystemdService("update-rps@"),
})
}
return ignitionConfig, nil
}
//GetBashScriptPath returns the script path containing teh directory and the script name
func GetBashScriptPath(scriptName string) string {
return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName)
}
func getSystemdEnvironment(key string, value string) string {
return fmt.Sprintf("%s=%s", key, value)
}
//GetSystemdService returns the service name in systemd
func GetSystemdService(serviceName string) string {
return fmt.Sprintf("%s.service", serviceName)
}
//GetSystemdContent get systemd content from list of unit options
func GetSystemdContent(options []*unit.UnitOption) (string, error) {
outReader := unit.Serialize(options)
outBytes, err := ioutil.ReadAll(outReader)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// GetOCIHooksConfigContent reads and returns the content of the OCI hook file
func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) {
ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile))
if err != nil {
return nil, err
}
rpsMask := "0" // RPS disabled
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
}
outContent := &bytes.Buffer{}
templateArgs := map[string]string{ociTemplateRPSMask: rpsMask}
if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil {
return nil, err
}
return outContent.Bytes(), nil
}
// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes
func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) {
switch hugepagesSize {
case "1G":
return "1048576", nil
case "2M":
return "2 | {
name := GetMachineConfigName(profile)
mc := &machineconfigv1.MachineConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: machineconfigv1.GroupVersion.String(),
Kind: "MachineConfig",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: profilecomponent.GetMachineConfigLabel(profile),
},
Spec: machineconfigv1.MachineConfigSpec{},
}
ignitionConfig, err := getIgnitionConfig(profile)
if err != nil {
return nil, err
}
rawIgnition, err := json.Marshal(ignitionConfig) | identifier_body |
machineconfig.go | OCIHooksConfig file contains the low latency hooks configuration
OCIHooksConfig = "99-low-latency-hooks.json"
ociTemplateRPSMask = "RPSMask"
udevRulesDir = "/etc/udev/rules.d"
udevRpsRules = "99-netdev-rps.rules"
// scripts
hugepagesAllocation = "hugepages-allocation"
ociHooks = "low-latency-hooks"
setRPSMask = "set-rps-mask"
)
const (
systemdSectionUnit = "Unit"
systemdSectionService = "Service"
systemdSectionInstall = "Install"
systemdDescription = "Description"
systemdBefore = "Before"
systemdEnvironment = "Environment"
systemdType = "Type"
systemdRemainAfterExit = "RemainAfterExit"
systemdExecStart = "ExecStart"
systemdWantedBy = "WantedBy"
)
const (
systemdServiceKubelet = "kubelet.service"
systemdServiceTypeOneshot = "oneshot"
systemdTargetMultiUser = "multi-user.target"
systemdTrue = "true"
)
const (
environmentHugepagesSize = "HUGEPAGES_SIZE"
environmentHugepagesCount = "HUGEPAGES_COUNT"
environmentNUMANode = "NUMA_NODE"
)
const (
templateReservedCpus = "ReservedCpus"
)
// New returns new machine configuration object for performance sensitive workloads
func New(profile *performancev2.PerformanceProfile) (*machineconfigv1.MachineConfig, error) {
name := GetMachineConfigName(profile)
mc := &machineconfigv1.MachineConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: machineconfigv1.GroupVersion.String(),
Kind: "MachineConfig",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: profilecomponent.GetMachineConfigLabel(profile),
},
Spec: machineconfigv1.MachineConfigSpec{},
}
ignitionConfig, err := getIgnitionConfig(profile)
if err != nil {
return nil, err
}
rawIgnition, err := json.Marshal(ignitionConfig)
if err != nil {
return nil, err
}
mc.Spec.Config = runtime.RawExtension{Raw: rawIgnition}
enableRTKernel := profile.Spec.RealTimeKernel != nil &&
profile.Spec.RealTimeKernel.Enabled != nil &&
*profile.Spec.RealTimeKernel.Enabled
if enableRTKernel {
mc.Spec.KernelType = MCKernelRT
} else {
mc.Spec.KernelType = MCKernelDefault
}
return mc, nil
}
// GetMachineConfigName generates machine config name from the performance profile
func GetMachineConfigName(profile *performancev2.PerformanceProfile) string {
name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
return fmt.Sprintf("50-%s", name)
}
func getIgnitionConfig(profile *performancev2.PerformanceProfile) (*igntypes.Config, error) {
ignitionConfig := &igntypes.Config{
Ignition: igntypes.Ignition{
Version: defaultIgnitionVersion,
},
Storage: igntypes.Storage{
Files: []igntypes.File{},
},
}
// add script files under the node /usr/local/bin directory
mode := 0700
for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} {
dst := GetBashScriptPath(script)
content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script))
if err != nil {
return nil, err
}
AddContent(ignitionConfig, content, dst, &mode)
}
// add crio config snippet under the node /etc/crio/crio.conf.d/ directory
crioConfdRuntimesMode := 0644
crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig))
if err != nil {
return nil, err
}
crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig)
AddContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode)
// add crio hooks config under the node cri-o hook directory
crioHooksConfigsMode := 0644
ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile)
if err != nil {
return nil, err
}
ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig)
AddContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode)
// add rps udev rule
rpsRulesMode := 0644
rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules))
if err != nil {
return nil, err
}
rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules)
AddContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode)
if profile.Spec.HugePages != nil {
for _, page := range profile.Spec.HugePages.Pages {
// we already allocated non NUMA specific hugepages via kernel arguments
if page.Node == nil {
continue
}
hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size)
if err != nil {
return nil, err
}
hugepagesService, err := GetSystemdContent(GetHugepagesAllocationUnitOptions(
hugepagesSize,
page.Count,
*page.Node,
))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &hugepagesService,
Enabled: pointer.BoolPtr(true),
Name: GetSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)),
})
}
}
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
rpsService, err := GetSystemdContent(getRPSUnitOptions(rpsMask))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &rpsService,
Name: GetSystemdService("update-rps@"),
})
}
return ignitionConfig, nil
}
//GetBashScriptPath returns the script path containing teh directory and the script name
func GetBashScriptPath(scriptName string) string {
return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName)
}
func getSystemdEnvironment(key string, value string) string {
return fmt.Sprintf("%s=%s", key, value)
}
//GetSystemdService returns the service name in systemd
func GetSystemdService(serviceName string) string {
return fmt.Sprintf("%s.service", serviceName)
}
//GetSystemdContent get systemd content from list of unit options
func GetSystemdContent(options []*unit.UnitOption) (string, error) {
outReader := unit.Serialize(options)
outBytes, err := ioutil.ReadAll(outReader)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// GetOCIHooksConfigContent reads and returns the content of the OCI hook file
func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) {
ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile))
if err != nil {
return nil, err
}
rpsMask := "0" // RPS disabled
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
}
outContent := &bytes.Buffer{}
templateArgs := map[string]string{ociTemplateRPSMask: rpsMask}
if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil {
return nil, err
}
return outContent.Bytes(), nil
}
// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes
func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) {
switch hugepagesSize {
case "1G":
return "1048576", nil
case "2M":
return "2048", nil
default:
return "", fmt.Errorf("can not convert size %q to kilobytes", hugepagesSize)
}
}
//GetHugepagesAllocationUnitOptions returns list of unit options based on the settings of the hugepage
func | GetHugepagesAllocationUnitOptions | identifier_name |
|
machineconfig.go | = "/etc/crio/crio.conf.d"
crioRuntimesConfig = "99-runtimes.conf"
// OCIHooksConfigDir is the default directory for the OCI hooks
OCIHooksConfigDir = "/etc/containers/oci/hooks.d"
// OCIHooksConfig file contains the low latency hooks configuration
OCIHooksConfig = "99-low-latency-hooks.json"
ociTemplateRPSMask = "RPSMask"
udevRulesDir = "/etc/udev/rules.d"
udevRpsRules = "99-netdev-rps.rules"
// scripts
hugepagesAllocation = "hugepages-allocation"
ociHooks = "low-latency-hooks"
setRPSMask = "set-rps-mask"
)
const (
systemdSectionUnit = "Unit"
systemdSectionService = "Service"
systemdSectionInstall = "Install"
systemdDescription = "Description"
systemdBefore = "Before"
systemdEnvironment = "Environment"
systemdType = "Type"
systemdRemainAfterExit = "RemainAfterExit"
systemdExecStart = "ExecStart"
systemdWantedBy = "WantedBy"
)
const (
systemdServiceKubelet = "kubelet.service"
systemdServiceTypeOneshot = "oneshot"
systemdTargetMultiUser = "multi-user.target"
systemdTrue = "true"
)
const (
environmentHugepagesSize = "HUGEPAGES_SIZE"
environmentHugepagesCount = "HUGEPAGES_COUNT"
environmentNUMANode = "NUMA_NODE"
)
const (
templateReservedCpus = "ReservedCpus"
)
// New returns new machine configuration object for performance sensitive workloads
func New(profile *performancev2.PerformanceProfile) (*machineconfigv1.MachineConfig, error) {
name := GetMachineConfigName(profile)
mc := &machineconfigv1.MachineConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: machineconfigv1.GroupVersion.String(),
Kind: "MachineConfig",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: profilecomponent.GetMachineConfigLabel(profile),
},
Spec: machineconfigv1.MachineConfigSpec{},
}
ignitionConfig, err := getIgnitionConfig(profile)
if err != nil {
return nil, err
}
rawIgnition, err := json.Marshal(ignitionConfig)
if err != nil {
return nil, err
}
mc.Spec.Config = runtime.RawExtension{Raw: rawIgnition}
enableRTKernel := profile.Spec.RealTimeKernel != nil &&
profile.Spec.RealTimeKernel.Enabled != nil &&
*profile.Spec.RealTimeKernel.Enabled
if enableRTKernel {
mc.Spec.KernelType = MCKernelRT
} else {
mc.Spec.KernelType = MCKernelDefault
}
return mc, nil
}
// GetMachineConfigName generates machine config name from the performance profile
func GetMachineConfigName(profile *performancev2.PerformanceProfile) string {
name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
return fmt.Sprintf("50-%s", name)
}
func getIgnitionConfig(profile *performancev2.PerformanceProfile) (*igntypes.Config, error) {
ignitionConfig := &igntypes.Config{
Ignition: igntypes.Ignition{
Version: defaultIgnitionVersion,
},
Storage: igntypes.Storage{
Files: []igntypes.File{},
},
}
// add script files under the node /usr/local/bin directory
mode := 0700
for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} {
dst := GetBashScriptPath(script)
content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script))
if err != nil {
return nil, err
}
AddContent(ignitionConfig, content, dst, &mode)
}
// add crio config snippet under the node /etc/crio/crio.conf.d/ directory
crioConfdRuntimesMode := 0644
crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig))
if err != nil {
return nil, err
}
crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig)
AddContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode)
// add crio hooks config under the node cri-o hook directory
crioHooksConfigsMode := 0644
ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile)
if err != nil {
return nil, err
}
ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig)
AddContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode)
// add rps udev rule
rpsRulesMode := 0644
rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules))
if err != nil {
return nil, err
}
rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules)
AddContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode)
if profile.Spec.HugePages != nil {
for _, page := range profile.Spec.HugePages.Pages {
// we already allocated non NUMA specific hugepages via kernel arguments
if page.Node == nil {
continue
}
hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size)
if err != nil {
return nil, err
}
hugepagesService, err := GetSystemdContent(GetHugepagesAllocationUnitOptions(
hugepagesSize,
page.Count,
*page.Node,
))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &hugepagesService,
Enabled: pointer.BoolPtr(true),
Name: GetSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)),
})
}
}
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
rpsService, err := GetSystemdContent(getRPSUnitOptions(rpsMask))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &rpsService,
Name: GetSystemdService("update-rps@"),
})
}
return ignitionConfig, nil
}
//GetBashScriptPath returns the script path containing teh directory and the script name
func GetBashScriptPath(scriptName string) string {
return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName)
}
func getSystemdEnvironment(key string, value string) string {
return fmt.Sprintf("%s=%s", key, value)
}
//GetSystemdService returns the service name in systemd
func GetSystemdService(serviceName string) string {
return fmt.Sprintf("%s.service", serviceName)
}
//GetSystemdContent get systemd content from list of unit options
func GetSystemdContent(options []*unit.UnitOption) (string, error) {
outReader := unit.Serialize(options)
outBytes, err := ioutil.ReadAll(outReader)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// GetOCIHooksConfigContent reads and returns the content of the OCI hook file
func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) {
ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile))
if err != nil |
rpsMask := "0" // RPS disabled
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
}
outContent := &bytes.Buffer{}
templateArgs := map[string]string{ociTemplateRPSMask: rpsMask}
if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil {
return nil, err
}
return outContent.Bytes(), nil
}
// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes
func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) {
switch hugepagesSize {
case "1G":
return "1048576", nil
case "2M":
return "2 | {
return nil, err
} | conditional_block |
machineconfig.go | itionVersion,
},
Storage: igntypes.Storage{
Files: []igntypes.File{},
},
}
// add script files under the node /usr/local/bin directory
mode := 0700
for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} {
dst := GetBashScriptPath(script)
content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script))
if err != nil {
return nil, err
}
AddContent(ignitionConfig, content, dst, &mode)
}
// add crio config snippet under the node /etc/crio/crio.conf.d/ directory
crioConfdRuntimesMode := 0644
crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig))
if err != nil {
return nil, err
}
crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig)
AddContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode)
// add crio hooks config under the node cri-o hook directory
crioHooksConfigsMode := 0644
ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile)
if err != nil {
return nil, err
}
ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig)
AddContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode)
// add rps udev rule
rpsRulesMode := 0644
rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules))
if err != nil {
return nil, err
}
rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules)
AddContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode)
if profile.Spec.HugePages != nil {
for _, page := range profile.Spec.HugePages.Pages {
// we already allocated non NUMA specific hugepages via kernel arguments
if page.Node == nil {
continue
}
hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size)
if err != nil {
return nil, err
}
hugepagesService, err := GetSystemdContent(GetHugepagesAllocationUnitOptions(
hugepagesSize,
page.Count,
*page.Node,
))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &hugepagesService,
Enabled: pointer.BoolPtr(true),
Name: GetSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)),
})
}
}
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
rpsService, err := GetSystemdContent(getRPSUnitOptions(rpsMask))
if err != nil {
return nil, err
}
ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
Contents: &rpsService,
Name: GetSystemdService("update-rps@"),
})
}
return ignitionConfig, nil
}
//GetBashScriptPath returns the script path containing teh directory and the script name
func GetBashScriptPath(scriptName string) string {
return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName)
}
func getSystemdEnvironment(key string, value string) string {
return fmt.Sprintf("%s=%s", key, value)
}
//GetSystemdService returns the service name in systemd
func GetSystemdService(serviceName string) string {
return fmt.Sprintf("%s.service", serviceName)
}
//GetSystemdContent get systemd content from list of unit options
func GetSystemdContent(options []*unit.UnitOption) (string, error) {
outReader := unit.Serialize(options)
outBytes, err := ioutil.ReadAll(outReader)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// GetOCIHooksConfigContent reads and returns the content of the OCI hook file
func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) {
ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile))
if err != nil {
return nil, err
}
rpsMask := "0" // RPS disabled
if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
if err != nil {
return nil, err
}
}
outContent := &bytes.Buffer{}
templateArgs := map[string]string{ociTemplateRPSMask: rpsMask}
if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil {
return nil, err
}
return outContent.Bytes(), nil
}
// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes
func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) {
switch hugepagesSize {
case "1G":
return "1048576", nil
case "2M":
return "2048", nil
default:
return "", fmt.Errorf("can not convert size %q to kilobytes", hugepagesSize)
}
}
//GetHugepagesAllocationUnitOptions returns list of unit options based on the settings of the hugepage
func GetHugepagesAllocationUnitOptions(hugepagesSize string, hugepagesCount int32, numaNode int32) []*unit.UnitOption {
return []*unit.UnitOption{
// [Unit]
// Description
unit.NewUnitOption(systemdSectionUnit, systemdDescription, fmt.Sprintf("Hugepages-%skB allocation on the node %d", hugepagesSize, numaNode)),
// Before
unit.NewUnitOption(systemdSectionUnit, systemdBefore, systemdServiceKubelet),
// [Service]
// Environment
unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesCount, fmt.Sprint(hugepagesCount))),
unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesSize, hugepagesSize)),
unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentNUMANode, fmt.Sprint(numaNode))),
// Type
unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot),
// RemainAfterExit
unit.NewUnitOption(systemdSectionService, systemdRemainAfterExit, systemdTrue),
// ExecStart
unit.NewUnitOption(systemdSectionService, systemdExecStart, GetBashScriptPath(hugepagesAllocation)),
// [Install]
// WantedBy
unit.NewUnitOption(systemdSectionInstall, systemdWantedBy, systemdTargetMultiUser),
}
}
func getRPSUnitOptions(rpsMask string) []*unit.UnitOption {
cmd := fmt.Sprintf("%s %%i %s", GetBashScriptPath(setRPSMask), rpsMask)
return []*unit.UnitOption{
// [Unit]
// Description
unit.NewUnitOption(systemdSectionUnit, systemdDescription, "Sets network devices RPS mask"),
// [Service]
// Type
unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot),
// ExecStart
unit.NewUnitOption(systemdSectionService, systemdExecStart, cmd),
}
}
//AddContent appends more content to the ignition configuration
func AddContent(ignitionConfig *igntypes.Config, content []byte, dst string, mode *int) {
contentBase64 := base64.StdEncoding.EncodeToString(content)
ignitionConfig.Storage.Files = append(ignitionConfig.Storage.Files, igntypes.File{
Node: igntypes.Node{
Path: dst,
},
FileEmbedded1: igntypes.FileEmbedded1{
Contents: igntypes.Resource{
Source: pointer.StringPtr(fmt.Sprintf("%s,%s", defaultIgnitionContentSource, contentBase64)),
},
Mode: mode,
},
})
}
func renderCrioConfigSnippet(profile *performancev2.PerformanceProfile, src string) ([]byte, error) {
templateArgs := make(map[string]string)
if profile.Spec.CPU.Reserved != nil {
templateArgs[templateReservedCpus] = string(*profile.Spec.CPU.Reserved)
}
profileTemplate, err := template.ParseFS(assets.Configs, src)
if err != nil {
return nil, err | }
| random_line_split |
|
main.go | return cli.WithContext(func(ctx context.Context, g *errgroup.Group) error {
return run(ctx, g, &opts, args)
})
},
}
// AddCommand adds the 'run' command to cmd.
func AddCommand(c *cobra.Command) {
c.AddCommand(cmd)
fs := cmd.Flags()
fs.SortFlags = false
fs.StringSliceVarP(&opts.Range, "range", "r", nil, "set range `from-to`")
fs.StringVar(&opts.RangeFormat, "range-format", "%d", "set `format` for range")
fs.StringVarP(&opts.Filename, "file", "f", "", "read values from `filename`")
fs.StringVar(&opts.Logfile, "logfile", "", "write copy of printed messages to `filename`.log")
fs.StringVar(&opts.Logdir, "logdir", os.Getenv("MONSOON_LOG_DIR"), "automatically log all output to files in `dir`")
fs.IntVarP(&opts.Threads, "threads", "t", 5, "make as many as `n` parallel requests")
fs.IntVar(&opts.BufferSize, "buffer-size", 100000, "set number of buffered items to `n`")
fs.IntVar(&opts.Skip, "skip", 0, "skip the first `n` requests")
fs.IntVar(&opts.Limit, "limit", 0, "only run `n` requests, then exit")
fs.Float64Var(&opts.RequestsPerSecond, "requests-per-second", 0, "do at most `n` requests per second (e.g. 0.5)")
// add all options to define a request
opts.Request = request.New("")
request.AddFlags(opts.Request, fs)
fs.IntVar(&opts.FollowRedirect, "follow-redirect", 0, "follow `n` redirects")
fs.StringSliceVar(&opts.HideStatusCodes, "hide-status", nil, "hide responses with this status `code,[code-code],[-code],[...]`")
fs.StringSliceVar(&opts.ShowStatusCodes, "show-status", nil, "show only responses with this status `code,[code-code],[code-],[...]`")
fs.StringSliceVar(&opts.HideHeaderSize, "hide-header-size", nil, "hide responses with this header size (`size,from-to,from-,-to`)")
fs.StringSliceVar(&opts.HideBodySize, "hide-body-size", nil, "hide responses with this body size (`size,from-to,from-,-to`)")
fs.StringArrayVar(&opts.HidePattern, "hide-pattern", nil, "hide responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.ShowPattern, "show-pattern", nil, "show only responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.Extract, "extract", nil, "extract `regex` from response body (can be specified multiple times)")
fs.StringArrayVar(&opts.ExtractPipe, "extract-pipe", nil, "pipe response body to `cmd` to extract data (can be specified multiple times)")
fs.IntVar(&opts.MaxBodySize, "max-body-size", 5, "read at most `n` MiB from a returned response body (used for extracting data from the body)")
}
// logfilePath returns the prefix for the logfiles, if any.
func logfilePath(opts *Options, inputURL string) (prefix string, err error) {
if opts.Logdir != "" && opts.Logfile == "" {
url, err := url.Parse(inputURL)
if err != nil {
return "", err
}
ts := time.Now().Format("20060102_150405")
fn := fmt.Sprintf("monsoon_%s_%s", url.Host, ts)
p := filepath.Join(opts.Logdir, fn)
return p, nil
}
return opts.Logfile, nil
}
func setupProducer(ctx context.Context, g *errgroup.Group, opts *Options, ch chan<- string, count chan<- int) error {
switch {
case len(opts.Range) > 0:
var ranges []producer.Range
for _, r := range opts.Range {
rng, err := producer.ParseRange(r)
if err != nil {
return err
}
ranges = append(ranges, rng)
}
g.Go(func() error {
return producer.Ranges(ctx, ranges, opts.RangeFormat, ch, count)
})
return nil
case opts.Filename == "-":
g.Go(func() error {
return producer.Reader(ctx, os.Stdin, ch, count)
})
return nil
case opts.Filename != "":
file, err := os.Open(opts.Filename)
if err != nil {
return err
}
g.Go(func() error {
return producer.Reader(ctx, file, ch, count)
})
return nil
default:
return errors.New("neither file nor range specified, nothing to do")
}
}
func setupTerminal(ctx context.Context, g *errgroup.Group, maxFrameRate uint, logfilePrefix string) (term cli.Terminal, cleanup func(), err error) {
ctx, cancel := context.WithCancel(context.Background())
statusTerm := termstatus.New(os.Stdout, os.Stderr, false)
if maxFrameRate != 0 {
statusTerm.MaxFrameRate = maxFrameRate
}
term = statusTerm
if logfilePrefix != "" {
fmt.Printf(reporter.Bold("Logfile:")+" %s.log\n", logfilePrefix)
logfile, err := os.Create(logfilePrefix + ".log")
if err != nil {
return nil, cancel, err
}
fmt.Fprintln(logfile, shell.Join(os.Args))
// write copies of messages to logfile
term = &cli.LogTerminal{
Terminal: statusTerm,
Writer: logfile,
}
}
// make sure error messages logged via the log package are printed nicely
w := cli.NewStdioWrapper(term)
log.SetOutput(w.Stderr())
g.Go(func() error {
term.Run(ctx)
return nil
})
return term, cancel, nil
}
func setupResponseFilters(opts *Options) ([]response.Filter, error) {
var filters []response.Filter
filter, err := response.NewFilterStatusCode(opts.HideStatusCodes, opts.ShowStatusCodes)
if err != nil {
return nil, err
}
filters = append(filters, filter)
if len(opts.HideHeaderSize) > 0 || len(opts.HideBodySize) > 0 {
f, err := response.NewFilterSize(opts.HideHeaderSize, opts.HideBodySize)
if err != nil {
return nil, err
}
filters = append(filters, f)
}
if len(opts.hidePattern) > 0 {
filters = append(filters, response.FilterRejectPattern{Pattern: opts.hidePattern})
}
if len(opts.showPattern) > 0 {
filters = append(filters, response.FilterAcceptPattern{Pattern: opts.showPattern})
}
return filters, nil
}
func setupValueFilters(ctx context.Context, opts *Options, valueCh <-chan string, countCh <-chan int) (<-chan string, <-chan int) {
if opts.Skip > 0 {
f := &producer.FilterSkip{Skip: opts.Skip}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
if opts.Limit > 0 {
f := &producer.FilterLimit{Max: opts.Limit}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
return valueCh, countCh
}
func startRunners(ctx context.Context, opts *Options, in <-chan string) (<-chan response.Response, error) {
out := make(chan response.Response)
var wg sync.WaitGroup
transport, err := response.NewTransport(opts.Request.Insecure, opts.Request.TLSClientKeyCertFile,
opts.Request.DisableHTTP2, opts.Threads)
if err != nil {
return nil, err
}
for i := 0; i < opts.Threads; i++ {
runner := response.NewRunner(transport, opts.Request, in, out)
runner.MaxBodySize = opts.MaxBodySize * 1024 * 1024
runner.Extract = opts.extract
runner.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) <= opts.FollowRedirect {
return nil
}
return http.ErrUseLastResponse
}
wg.Add(1)
go func() {
runner.Run(ctx)
wg.Done()
}()
}
go func() {
// wait until the runners are done, then close the output channel
wg.Wait()
close(out)
}()
return out, nil
}
func run(ctx context.Context, g *errgroup.Group, opts *Options, args []string) error | {
// make sure the options and arguments are valid
if len(args) == 0 {
return errors.New("last argument needs to be the URL")
}
if len(args) > 1 {
return errors.New("more than one target URL specified")
}
err := opts.valid()
if err != nil {
return err
}
inputURL := args[0]
opts.Request.URL = inputURL
// setup logging and the terminal
logfilePrefix, err := logfilePath(opts, inputURL) | identifier_body |
|
main.go | }
return res, nil
}
func splitShell(cmds []string) ([][]string, error) {
var data [][]string
for _, cmd := range cmds {
args, err := shell.Split(cmd)
if err != nil {
return nil, err
}
if len(args) < 1 {
return nil, fmt.Errorf("invalid command: %q", cmd)
}
data = append(data, args)
}
return data, nil
}
// valid validates the options and returns an error if something is invalid.
func (opts *Options) valid() (err error) {
if opts.Threads <= 0 {
return errors.New("invalid number of threads")
}
if len(opts.Range) > 0 && opts.Filename != "" {
return errors.New("only one source allowed but both range and filename specified")
}
if len(opts.Range) == 0 && opts.Filename == "" {
return errors.New("neither file nor range specified, nothing to do")
}
opts.extract, err = compileRegexps(opts.Extract)
if err != nil {
return err
}
opts.extractPipe, err = splitShell(opts.ExtractPipe)
if err != nil {
return err
}
opts.hidePattern, err = compileRegexps(opts.HidePattern)
if err != nil {
return err
}
opts.showPattern, err = compileRegexps(opts.ShowPattern)
if err != nil {
return err
}
return nil
}
var cmd = &cobra.Command{
Use: "fuzz [options] URL",
DisableFlagsInUseLine: true,
Short: helpShort,
Long: helpLong,
Example: helpExamples,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.WithContext(func(ctx context.Context, g *errgroup.Group) error {
return run(ctx, g, &opts, args)
})
},
}
// AddCommand adds the 'run' command to cmd.
func AddCommand(c *cobra.Command) {
c.AddCommand(cmd)
fs := cmd.Flags()
fs.SortFlags = false
fs.StringSliceVarP(&opts.Range, "range", "r", nil, "set range `from-to`")
fs.StringVar(&opts.RangeFormat, "range-format", "%d", "set `format` for range")
fs.StringVarP(&opts.Filename, "file", "f", "", "read values from `filename`")
fs.StringVar(&opts.Logfile, "logfile", "", "write copy of printed messages to `filename`.log")
fs.StringVar(&opts.Logdir, "logdir", os.Getenv("MONSOON_LOG_DIR"), "automatically log all output to files in `dir`")
fs.IntVarP(&opts.Threads, "threads", "t", 5, "make as many as `n` parallel requests")
fs.IntVar(&opts.BufferSize, "buffer-size", 100000, "set number of buffered items to `n`")
fs.IntVar(&opts.Skip, "skip", 0, "skip the first `n` requests")
fs.IntVar(&opts.Limit, "limit", 0, "only run `n` requests, then exit")
fs.Float64Var(&opts.RequestsPerSecond, "requests-per-second", 0, "do at most `n` requests per second (e.g. 0.5)")
// add all options to define a request
opts.Request = request.New("")
request.AddFlags(opts.Request, fs)
fs.IntVar(&opts.FollowRedirect, "follow-redirect", 0, "follow `n` redirects")
fs.StringSliceVar(&opts.HideStatusCodes, "hide-status", nil, "hide responses with this status `code,[code-code],[-code],[...]`")
fs.StringSliceVar(&opts.ShowStatusCodes, "show-status", nil, "show only responses with this status `code,[code-code],[code-],[...]`")
fs.StringSliceVar(&opts.HideHeaderSize, "hide-header-size", nil, "hide responses with this header size (`size,from-to,from-,-to`)")
fs.StringSliceVar(&opts.HideBodySize, "hide-body-size", nil, "hide responses with this body size (`size,from-to,from-,-to`)")
fs.StringArrayVar(&opts.HidePattern, "hide-pattern", nil, "hide responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.ShowPattern, "show-pattern", nil, "show only responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.Extract, "extract", nil, "extract `regex` from response body (can be specified multiple times)")
fs.StringArrayVar(&opts.ExtractPipe, "extract-pipe", nil, "pipe response body to `cmd` to extract data (can be specified multiple times)")
fs.IntVar(&opts.MaxBodySize, "max-body-size", 5, "read at most `n` MiB from a returned response body (used for extracting data from the body)")
}
// logfilePath returns the prefix for the logfiles, if any.
func logfilePath(opts *Options, inputURL string) (prefix string, err error) {
if opts.Logdir != "" && opts.Logfile == "" {
url, err := url.Parse(inputURL)
if err != nil {
return "", err
}
ts := time.Now().Format("20060102_150405")
fn := fmt.Sprintf("monsoon_%s_%s", url.Host, ts)
p := filepath.Join(opts.Logdir, fn)
return p, nil
}
return opts.Logfile, nil
}
func setupProducer(ctx context.Context, g *errgroup.Group, opts *Options, ch chan<- string, count chan<- int) error {
switch {
case len(opts.Range) > 0:
var ranges []producer.Range
for _, r := range opts.Range {
rng, err := producer.ParseRange(r)
if err != nil {
return err
}
ranges = append(ranges, rng)
}
g.Go(func() error {
return producer.Ranges(ctx, ranges, opts.RangeFormat, ch, count)
})
return nil
case opts.Filename == "-":
g.Go(func() error {
return producer.Reader(ctx, os.Stdin, ch, count)
})
return nil
case opts.Filename != "":
file, err := os.Open(opts.Filename)
if err != nil {
return err
}
g.Go(func() error {
return producer.Reader(ctx, file, ch, count)
})
return nil
default:
return errors.New("neither file nor range specified, nothing to do")
}
}
func setupTerminal(ctx context.Context, g *errgroup.Group, maxFrameRate uint, logfilePrefix string) (term cli.Terminal, cleanup func(), err error) {
ctx, cancel := context.WithCancel(context.Background())
statusTerm := termstatus.New(os.Stdout, os.Stderr, false)
if maxFrameRate != 0 {
statusTerm.MaxFrameRate = maxFrameRate
}
term = statusTerm
if logfilePrefix != "" {
fmt.Printf(reporter.Bold("Logfile:")+" %s.log\n", logfilePrefix)
logfile, err := os.Create(logfilePrefix + ".log")
if err != nil {
return nil, cancel, err
}
fmt.Fprintln(logfile, shell.Join(os.Args)) | }
}
// make sure error messages logged via the log package are printed nicely
w := cli.NewStdioWrapper(term)
log.SetOutput(w.Stderr())
g.Go(func() error {
term.Run(ctx)
return nil
})
return term, cancel, nil
}
func setupResponseFilters(opts *Options) ([]response.Filter, error) {
var filters []response.Filter
filter, err := response.NewFilterStatusCode(opts.HideStatusCodes, opts.ShowStatusCodes)
if err != nil {
return nil, err
}
filters = append(filters, filter)
if len(opts.HideHeaderSize) > 0 || len(opts.HideBodySize) > 0 {
f, err := response.NewFilterSize(opts.HideHeaderSize, opts.HideBodySize)
if err != nil {
return nil, err
}
filters = append(filters, f)
}
if len(opts.hidePattern) > 0 {
filters = append(filters, response.FilterRejectPattern{Pattern: opts.hidePattern})
}
if len(opts.showPattern) > 0 {
filters = append(filters, response.FilterAcceptPattern{Pattern: opts.showPattern})
}
return filters, nil
}
func setupValueFilters(ctx context.Context, opts *Options, valueCh <-chan string, countCh <-chan int) (<-chan string, <-chan int) {
if opts.Skip > 0 {
f := &producer.FilterSkip{Skip: opts.Skip}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
if opts.Limit > 0 {
f := &producer.FilterLimit{Max: opts.Limit}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
return valueCh, countCh
}
func startRunners(ctx context.Context, opts *Options, in <-chan string |
// write copies of messages to logfile
term = &cli.LogTerminal{
Terminal: statusTerm,
Writer: logfile, | random_line_split |
main.go | Pipe)
if err != nil {
return err
}
opts.hidePattern, err = compileRegexps(opts.HidePattern)
if err != nil {
return err
}
opts.showPattern, err = compileRegexps(opts.ShowPattern)
if err != nil {
return err
}
return nil
}
var cmd = &cobra.Command{
Use: "fuzz [options] URL",
DisableFlagsInUseLine: true,
Short: helpShort,
Long: helpLong,
Example: helpExamples,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.WithContext(func(ctx context.Context, g *errgroup.Group) error {
return run(ctx, g, &opts, args)
})
},
}
// AddCommand adds the 'run' command to cmd.
func AddCommand(c *cobra.Command) {
c.AddCommand(cmd)
fs := cmd.Flags()
fs.SortFlags = false
fs.StringSliceVarP(&opts.Range, "range", "r", nil, "set range `from-to`")
fs.StringVar(&opts.RangeFormat, "range-format", "%d", "set `format` for range")
fs.StringVarP(&opts.Filename, "file", "f", "", "read values from `filename`")
fs.StringVar(&opts.Logfile, "logfile", "", "write copy of printed messages to `filename`.log")
fs.StringVar(&opts.Logdir, "logdir", os.Getenv("MONSOON_LOG_DIR"), "automatically log all output to files in `dir`")
fs.IntVarP(&opts.Threads, "threads", "t", 5, "make as many as `n` parallel requests")
fs.IntVar(&opts.BufferSize, "buffer-size", 100000, "set number of buffered items to `n`")
fs.IntVar(&opts.Skip, "skip", 0, "skip the first `n` requests")
fs.IntVar(&opts.Limit, "limit", 0, "only run `n` requests, then exit")
fs.Float64Var(&opts.RequestsPerSecond, "requests-per-second", 0, "do at most `n` requests per second (e.g. 0.5)")
// add all options to define a request
opts.Request = request.New("")
request.AddFlags(opts.Request, fs)
fs.IntVar(&opts.FollowRedirect, "follow-redirect", 0, "follow `n` redirects")
fs.StringSliceVar(&opts.HideStatusCodes, "hide-status", nil, "hide responses with this status `code,[code-code],[-code],[...]`")
fs.StringSliceVar(&opts.ShowStatusCodes, "show-status", nil, "show only responses with this status `code,[code-code],[code-],[...]`")
fs.StringSliceVar(&opts.HideHeaderSize, "hide-header-size", nil, "hide responses with this header size (`size,from-to,from-,-to`)")
fs.StringSliceVar(&opts.HideBodySize, "hide-body-size", nil, "hide responses with this body size (`size,from-to,from-,-to`)")
fs.StringArrayVar(&opts.HidePattern, "hide-pattern", nil, "hide responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.ShowPattern, "show-pattern", nil, "show only responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.Extract, "extract", nil, "extract `regex` from response body (can be specified multiple times)")
fs.StringArrayVar(&opts.ExtractPipe, "extract-pipe", nil, "pipe response body to `cmd` to extract data (can be specified multiple times)")
fs.IntVar(&opts.MaxBodySize, "max-body-size", 5, "read at most `n` MiB from a returned response body (used for extracting data from the body)")
}
// logfilePath returns the prefix for the logfiles, if any.
func logfilePath(opts *Options, inputURL string) (prefix string, err error) {
if opts.Logdir != "" && opts.Logfile == "" {
url, err := url.Parse(inputURL)
if err != nil {
return "", err
}
ts := time.Now().Format("20060102_150405")
fn := fmt.Sprintf("monsoon_%s_%s", url.Host, ts)
p := filepath.Join(opts.Logdir, fn)
return p, nil
}
return opts.Logfile, nil
}
func setupProducer(ctx context.Context, g *errgroup.Group, opts *Options, ch chan<- string, count chan<- int) error {
switch {
case len(opts.Range) > 0:
var ranges []producer.Range
for _, r := range opts.Range {
rng, err := producer.ParseRange(r)
if err != nil {
return err
}
ranges = append(ranges, rng)
}
g.Go(func() error {
return producer.Ranges(ctx, ranges, opts.RangeFormat, ch, count)
})
return nil
case opts.Filename == "-":
g.Go(func() error {
return producer.Reader(ctx, os.Stdin, ch, count)
})
return nil
case opts.Filename != "":
file, err := os.Open(opts.Filename)
if err != nil {
return err
}
g.Go(func() error {
return producer.Reader(ctx, file, ch, count)
})
return nil
default:
return errors.New("neither file nor range specified, nothing to do")
}
}
func setupTerminal(ctx context.Context, g *errgroup.Group, maxFrameRate uint, logfilePrefix string) (term cli.Terminal, cleanup func(), err error) {
ctx, cancel := context.WithCancel(context.Background())
statusTerm := termstatus.New(os.Stdout, os.Stderr, false)
if maxFrameRate != 0 {
statusTerm.MaxFrameRate = maxFrameRate
}
term = statusTerm
if logfilePrefix != "" {
fmt.Printf(reporter.Bold("Logfile:")+" %s.log\n", logfilePrefix)
logfile, err := os.Create(logfilePrefix + ".log")
if err != nil {
return nil, cancel, err
}
fmt.Fprintln(logfile, shell.Join(os.Args))
// write copies of messages to logfile
term = &cli.LogTerminal{
Terminal: statusTerm,
Writer: logfile,
}
}
// make sure error messages logged via the log package are printed nicely
w := cli.NewStdioWrapper(term)
log.SetOutput(w.Stderr())
g.Go(func() error {
term.Run(ctx)
return nil
})
return term, cancel, nil
}
func setupResponseFilters(opts *Options) ([]response.Filter, error) {
var filters []response.Filter
filter, err := response.NewFilterStatusCode(opts.HideStatusCodes, opts.ShowStatusCodes)
if err != nil {
return nil, err
}
filters = append(filters, filter)
if len(opts.HideHeaderSize) > 0 || len(opts.HideBodySize) > 0 {
f, err := response.NewFilterSize(opts.HideHeaderSize, opts.HideBodySize)
if err != nil {
return nil, err
}
filters = append(filters, f)
}
if len(opts.hidePattern) > 0 {
filters = append(filters, response.FilterRejectPattern{Pattern: opts.hidePattern})
}
if len(opts.showPattern) > 0 {
filters = append(filters, response.FilterAcceptPattern{Pattern: opts.showPattern})
}
return filters, nil
}
func setupValueFilters(ctx context.Context, opts *Options, valueCh <-chan string, countCh <-chan int) (<-chan string, <-chan int) {
if opts.Skip > 0 {
f := &producer.FilterSkip{Skip: opts.Skip}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
if opts.Limit > 0 {
f := &producer.FilterLimit{Max: opts.Limit}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
return valueCh, countCh
}
func startRunners(ctx context.Context, opts *Options, in <-chan string) (<-chan response.Response, error) {
out := make(chan response.Response)
var wg sync.WaitGroup
transport, err := response.NewTransport(opts.Request.Insecure, opts.Request.TLSClientKeyCertFile,
opts.Request.DisableHTTP2, opts.Threads)
if err != nil {
return nil, err
}
for i := 0; i < opts.Threads; i++ {
runner := response.NewRunner(transport, opts.Request, in, out)
runner.MaxBodySize = opts.MaxBodySize * 1024 * 1024
runner.Extract = opts.extract
runner.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) <= opts.FollowRedirect {
return nil
}
return http.ErrUseLastResponse
}
wg.Add(1)
go func() {
runner.Run(ctx)
wg.Done()
}()
}
go func() {
// wait until the runners are done, then close the output channel
wg.Wait()
close(out)
}()
return out, nil
}
func | run | identifier_name |
|
main.go | errgroup.Group) error {
return run(ctx, g, &opts, args)
})
},
}
// AddCommand adds the 'run' command to cmd.
func AddCommand(c *cobra.Command) {
c.AddCommand(cmd)
fs := cmd.Flags()
fs.SortFlags = false
fs.StringSliceVarP(&opts.Range, "range", "r", nil, "set range `from-to`")
fs.StringVar(&opts.RangeFormat, "range-format", "%d", "set `format` for range")
fs.StringVarP(&opts.Filename, "file", "f", "", "read values from `filename`")
fs.StringVar(&opts.Logfile, "logfile", "", "write copy of printed messages to `filename`.log")
fs.StringVar(&opts.Logdir, "logdir", os.Getenv("MONSOON_LOG_DIR"), "automatically log all output to files in `dir`")
fs.IntVarP(&opts.Threads, "threads", "t", 5, "make as many as `n` parallel requests")
fs.IntVar(&opts.BufferSize, "buffer-size", 100000, "set number of buffered items to `n`")
fs.IntVar(&opts.Skip, "skip", 0, "skip the first `n` requests")
fs.IntVar(&opts.Limit, "limit", 0, "only run `n` requests, then exit")
fs.Float64Var(&opts.RequestsPerSecond, "requests-per-second", 0, "do at most `n` requests per second (e.g. 0.5)")
// add all options to define a request
opts.Request = request.New("")
request.AddFlags(opts.Request, fs)
fs.IntVar(&opts.FollowRedirect, "follow-redirect", 0, "follow `n` redirects")
fs.StringSliceVar(&opts.HideStatusCodes, "hide-status", nil, "hide responses with this status `code,[code-code],[-code],[...]`")
fs.StringSliceVar(&opts.ShowStatusCodes, "show-status", nil, "show only responses with this status `code,[code-code],[code-],[...]`")
fs.StringSliceVar(&opts.HideHeaderSize, "hide-header-size", nil, "hide responses with this header size (`size,from-to,from-,-to`)")
fs.StringSliceVar(&opts.HideBodySize, "hide-body-size", nil, "hide responses with this body size (`size,from-to,from-,-to`)")
fs.StringArrayVar(&opts.HidePattern, "hide-pattern", nil, "hide responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.ShowPattern, "show-pattern", nil, "show only responses containing `regex` in response header or body (can be specified multiple times)")
fs.StringArrayVar(&opts.Extract, "extract", nil, "extract `regex` from response body (can be specified multiple times)")
fs.StringArrayVar(&opts.ExtractPipe, "extract-pipe", nil, "pipe response body to `cmd` to extract data (can be specified multiple times)")
fs.IntVar(&opts.MaxBodySize, "max-body-size", 5, "read at most `n` MiB from a returned response body (used for extracting data from the body)")
}
// logfilePath returns the prefix for the logfiles, if any.
func logfilePath(opts *Options, inputURL string) (prefix string, err error) {
if opts.Logdir != "" && opts.Logfile == "" {
url, err := url.Parse(inputURL)
if err != nil {
return "", err
}
ts := time.Now().Format("20060102_150405")
fn := fmt.Sprintf("monsoon_%s_%s", url.Host, ts)
p := filepath.Join(opts.Logdir, fn)
return p, nil
}
return opts.Logfile, nil
}
func setupProducer(ctx context.Context, g *errgroup.Group, opts *Options, ch chan<- string, count chan<- int) error {
switch {
case len(opts.Range) > 0:
var ranges []producer.Range
for _, r := range opts.Range {
rng, err := producer.ParseRange(r)
if err != nil {
return err
}
ranges = append(ranges, rng)
}
g.Go(func() error {
return producer.Ranges(ctx, ranges, opts.RangeFormat, ch, count)
})
return nil
case opts.Filename == "-":
g.Go(func() error {
return producer.Reader(ctx, os.Stdin, ch, count)
})
return nil
case opts.Filename != "":
file, err := os.Open(opts.Filename)
if err != nil {
return err
}
g.Go(func() error {
return producer.Reader(ctx, file, ch, count)
})
return nil
default:
return errors.New("neither file nor range specified, nothing to do")
}
}
func setupTerminal(ctx context.Context, g *errgroup.Group, maxFrameRate uint, logfilePrefix string) (term cli.Terminal, cleanup func(), err error) {
ctx, cancel := context.WithCancel(context.Background())
statusTerm := termstatus.New(os.Stdout, os.Stderr, false)
if maxFrameRate != 0 {
statusTerm.MaxFrameRate = maxFrameRate
}
term = statusTerm
if logfilePrefix != "" {
fmt.Printf(reporter.Bold("Logfile:")+" %s.log\n", logfilePrefix)
logfile, err := os.Create(logfilePrefix + ".log")
if err != nil {
return nil, cancel, err
}
fmt.Fprintln(logfile, shell.Join(os.Args))
// write copies of messages to logfile
term = &cli.LogTerminal{
Terminal: statusTerm,
Writer: logfile,
}
}
// make sure error messages logged via the log package are printed nicely
w := cli.NewStdioWrapper(term)
log.SetOutput(w.Stderr())
g.Go(func() error {
term.Run(ctx)
return nil
})
return term, cancel, nil
}
func setupResponseFilters(opts *Options) ([]response.Filter, error) {
var filters []response.Filter
filter, err := response.NewFilterStatusCode(opts.HideStatusCodes, opts.ShowStatusCodes)
if err != nil {
return nil, err
}
filters = append(filters, filter)
if len(opts.HideHeaderSize) > 0 || len(opts.HideBodySize) > 0 {
f, err := response.NewFilterSize(opts.HideHeaderSize, opts.HideBodySize)
if err != nil {
return nil, err
}
filters = append(filters, f)
}
if len(opts.hidePattern) > 0 {
filters = append(filters, response.FilterRejectPattern{Pattern: opts.hidePattern})
}
if len(opts.showPattern) > 0 {
filters = append(filters, response.FilterAcceptPattern{Pattern: opts.showPattern})
}
return filters, nil
}
func setupValueFilters(ctx context.Context, opts *Options, valueCh <-chan string, countCh <-chan int) (<-chan string, <-chan int) {
if opts.Skip > 0 {
f := &producer.FilterSkip{Skip: opts.Skip}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
if opts.Limit > 0 {
f := &producer.FilterLimit{Max: opts.Limit}
countCh = f.Count(ctx, countCh)
valueCh = f.Select(ctx, valueCh)
}
return valueCh, countCh
}
func startRunners(ctx context.Context, opts *Options, in <-chan string) (<-chan response.Response, error) {
out := make(chan response.Response)
var wg sync.WaitGroup
transport, err := response.NewTransport(opts.Request.Insecure, opts.Request.TLSClientKeyCertFile,
opts.Request.DisableHTTP2, opts.Threads)
if err != nil {
return nil, err
}
for i := 0; i < opts.Threads; i++ {
runner := response.NewRunner(transport, opts.Request, in, out)
runner.MaxBodySize = opts.MaxBodySize * 1024 * 1024
runner.Extract = opts.extract
runner.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) <= opts.FollowRedirect {
return nil
}
return http.ErrUseLastResponse
}
wg.Add(1)
go func() {
runner.Run(ctx)
wg.Done()
}()
}
go func() {
// wait until the runners are done, then close the output channel
wg.Wait()
close(out)
}()
return out, nil
}
func run(ctx context.Context, g *errgroup.Group, opts *Options, args []string) error {
// make sure the options and arguments are valid
if len(args) == 0 {
return errors.New("last argument needs to be the URL")
}
if len(args) > 1 {
return errors.New("more than one target URL specified")
}
err := opts.valid()
if err != nil {
return err
}
inputURL := args[0]
opts.Request.URL = inputURL
// setup logging and the terminal
logfilePrefix, err := logfilePath(opts, inputURL)
if err != nil | {
return err
} | conditional_block |
|
resp.go | return "Integer"
case '$':
return "BulkString"
case '*':
return "Array"
case 'R':
return "RDB"
}
}
// Value represents the data of a valid RESP type.
type Value struct {
Typ Type
IntegerV int
Str []byte
ArrayV []Value
Null bool
RDB bool
Size int
}
func (v Value) ReplInfo() (runID string, offset int64) {
if v.Type() != Rdb {
return
}
buf := bytes.Split(v.Str, []byte(" "))
if len(buf) < 3 {
return
}
_offset, err := strconv.ParseInt(string(buf[2]), 10, 64)
if err != nil {
return
}
return string(buf[1]), _offset | switch v.Typ {
default:
n, _ := strconv.ParseInt(v.String(), 10, 64)
return int(n)
case ':':
return v.IntegerV
}
}
// String converts Value to a string.
func (v Value) String() string {
if v.Typ == '$' {
return string(v.Str)
}
switch v.Typ {
case '+', '-':
return string(v.Str)
case ':':
return strconv.FormatInt(int64(v.IntegerV), 10)
case '*':
buf := bytes.NewBuffer(nil)
concatArray(buf, v.ArrayV...)
return strings.TrimSuffix(buf.String(), " ")
case '\r':
return "\r\n"
}
return ""
}
func concatArray(wr io.Writer, vs ...Value) {
for i := range vs {
_, err := wr.Write([]byte(vs[i].String()))
if err != nil {
panic(err)
}
_, err = wr.Write([]byte("\r\n"))
if err != nil {
panic(err)
}
concatArray(wr, vs[i].Array()...)
}
}
// Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array.
// If it's a RESP Null value, nil is returned.
func (v Value) Bytes() []byte {
switch v.Typ {
default:
return []byte(v.String())
case '$', '+', '-':
return v.Str
}
}
// Float converts Value to a float64. If Value cannot be converted
// Zero is returned.
func (v Value) Float() float64 {
switch v.Typ {
default:
f, _ := strconv.ParseFloat(v.String(), 64)
return f
case ':':
return float64(v.IntegerV)
}
}
// IsNull indicates whether or not the base value is null.
func (v Value) IsNull() bool {
return v.Null
}
// Bool converts Value to an bool. If Value cannot be converted, false is returned.
func (v Value) Bool() bool {
return v.Integer() != 0
}
// Error converts the Value to an error. If Value is not an error, nil is returned.
func (v Value) Error() error {
switch v.Typ {
case '-':
return errors.New(string(v.Str))
}
return nil
}
// Array converts the Value to a an array.
// If Value is not an array or when it's is a RESP Null value, nil is returned.
func (v Value) Array() []Value {
if v.Typ == '*' && !v.Null {
return v.ArrayV
}
return nil
}
// Type returns the underlying RESP type.
// The following types are represent valid RESP values.
func (v Value) Type() Type {
return v.Typ
}
func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) {
bb := make([]byte, 3+len(b))
bb[0] = byte(typ)
copy(bb[1:], b)
bb[1+len(b)+0] = '\r'
bb[1+len(b)+1] = '\n'
return bb, nil
}
func marshalBulkRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("$-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10))
bb := make([]byte, 5+len(szb)+len(v.Str))
bb[0] = '$'
copy(bb[1:], szb)
bb[1+len(szb)+0] = '\r'
bb[1+len(szb)+1] = '\n'
copy(bb[1+len(szb)+2:], v.Str)
bb[1+len(szb)+2+len(v.Str)+0] = '\r'
bb[1+len(szb)+2+len(v.Str)+1] = '\n'
return bb, nil
}
func marshalArrayRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("*-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10))
var buf bytes.Buffer
buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer
buf.WriteByte('*')
buf.Write(szb)
buf.WriteByte('\r')
buf.WriteByte('\n')
for i := 0; i < len(v.ArrayV); i++ {
data, err := v.ArrayV[i].MarshalRESP()
if err != nil {
return nil, err
}
buf.Write(data)
}
return buf.Bytes(), nil
}
func marshalAnyRESP(v Value) ([]byte, error) {
switch v.Typ {
default:
if v.Typ == 0 && v.Null {
return []byte("$-1\r\n"), nil
}
return nil, errors.New("unknown resp type encountered")
case '-', '+':
return marshalSimpleRESP(v.Typ, v.Str)
case ':':
return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10)))
case '$':
return marshalBulkRESP(v)
case '*':
return marshalArrayRESP(v)
}
}
// Equals compares one value to another value.
func (v Value) Equals(value Value) bool {
data1, err := v.MarshalRESP()
if err != nil {
return false
}
data2, err := value.MarshalRESP()
if err != nil {
return false
}
return string(data1) == string(data2)
}
// MarshalRESP returns the original serialized byte representation of Value.
// For more information on this format please see http://redis.io/topics/protocol.
func (v Value) MarshalRESP() ([]byte, error) {
return marshalAnyRESP(v)
}
var NilValue = Value{Null: true}
type ErrProtocol struct{ Msg string }
func (err ErrProtocol) Error() string {
return "Protocol error: " + err.Msg
}
// AnyValue returns a RESP value from an interface.
// This function infers the types. Arrays are not allowed.
func AnyValue(v interface{}) Value {
switch v := v.(type) {
default:
return StringValue(fmt.Sprintf("%v", v))
case nil:
return NullValue()
case int:
return IntegerValue(int(v))
case uint:
return IntegerValue(int(v))
case int8:
return IntegerValue(int(v))
case uint8:
return IntegerValue(int(v))
case int16:
return IntegerValue(int(v))
case uint16:
return IntegerValue(int(v))
case int32:
return IntegerValue(int(v))
case uint32:
return IntegerValue(int(v))
case int64:
return IntegerValue(int(v))
case uint64:
return IntegerValue(int(v))
case bool:
return BoolValue(v)
case float32:
return FloatValue(float64(v))
case float64:
return FloatValue(float64(v))
case []byte:
return BytesValue(v)
case string:
return StringValue(v)
}
}
// SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces.
func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} }
// BytesValue returns a RESP bulk string. A bulk string can represent any data.
func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} }
// StringValue returns a RESP bulk string. A bulk string can represent any data.
func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} }
// NullValue returns a RESP null bulk string.
func NullValue() Value { return Value{Typ: '$', Null: true} }
// ErrorValue returns a RESP error.
func ErrorValue(err error) Value {
if err == nil {
return Value{Typ: '-'}
}
return Value{Typ: '-', Str: []byte(err.Error())}
}
// IntegerValue returns a RESP integer.
func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} }
// BoolValue returns a RESP integer representation of a bool.
func BoolValue(t bool) Value {
if t {
return Value{Typ: ':', IntegerV: 1}
}
return Value{Typ: ':', IntegerV: 0}
}
// FloatValue returns a RESP bulk string representation of a float.
func FloatValue(f float64 | }
// Integer converts Value to an int. If Value cannot be converted, Zero is returned.
func (v Value) Integer() int { | random_line_split |
resp.go | return "Integer"
case '$':
return "BulkString"
case '*':
return "Array"
case 'R':
return "RDB"
}
}
// Value represents the data of a valid RESP type.
type Value struct {
Typ Type
IntegerV int
Str []byte
ArrayV []Value
Null bool
RDB bool
Size int
}
func (v Value) ReplInfo() (runID string, offset int64) {
if v.Type() != Rdb {
return
}
buf := bytes.Split(v.Str, []byte(" "))
if len(buf) < 3 {
return
}
_offset, err := strconv.ParseInt(string(buf[2]), 10, 64)
if err != nil {
return
}
return string(buf[1]), _offset
}
// Integer converts Value to an int. If Value cannot be converted, Zero is returned.
func (v Value) Integer() int {
switch v.Typ {
default:
n, _ := strconv.ParseInt(v.String(), 10, 64)
return int(n)
case ':':
return v.IntegerV
}
}
// String converts Value to a string.
func (v Value) String() string {
if v.Typ == '$' {
return string(v.Str)
}
switch v.Typ {
case '+', '-':
return string(v.Str)
case ':':
return strconv.FormatInt(int64(v.IntegerV), 10)
case '*':
buf := bytes.NewBuffer(nil)
concatArray(buf, v.ArrayV...)
return strings.TrimSuffix(buf.String(), " ")
case '\r':
return "\r\n"
}
return ""
}
func concatArray(wr io.Writer, vs ...Value) {
for i := range vs {
_, err := wr.Write([]byte(vs[i].String()))
if err != nil {
panic(err)
}
_, err = wr.Write([]byte("\r\n"))
if err != nil {
panic(err)
}
concatArray(wr, vs[i].Array()...)
}
}
// Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array.
// If it's a RESP Null value, nil is returned.
func (v Value) Bytes() []byte {
switch v.Typ {
default:
return []byte(v.String())
case '$', '+', '-':
return v.Str
}
}
// Float converts Value to a float64. If Value cannot be converted
// Zero is returned.
func (v Value) Float() float64 {
switch v.Typ {
default:
f, _ := strconv.ParseFloat(v.String(), 64)
return f
case ':':
return float64(v.IntegerV)
}
}
// IsNull indicates whether or not the base value is null.
func (v Value) IsNull() bool {
return v.Null
}
// Bool converts Value to an bool. If Value cannot be converted, false is returned.
func (v Value) Bool() bool {
return v.Integer() != 0
}
// Error converts the Value to an error. If Value is not an error, nil is returned.
func (v Value) Error() error {
switch v.Typ {
case '-':
return errors.New(string(v.Str))
}
return nil
}
// Array converts the Value to a an array.
// If Value is not an array or when it's is a RESP Null value, nil is returned.
func (v Value) Array() []Value {
if v.Typ == '*' && !v.Null {
return v.ArrayV
}
return nil
}
// Type returns the underlying RESP type.
// The following types are represent valid RESP values.
func (v Value) Type() Type {
return v.Typ
}
func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) {
bb := make([]byte, 3+len(b))
bb[0] = byte(typ)
copy(bb[1:], b)
bb[1+len(b)+0] = '\r'
bb[1+len(b)+1] = '\n'
return bb, nil
}
func marshalBulkRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("$-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10))
bb := make([]byte, 5+len(szb)+len(v.Str))
bb[0] = '$'
copy(bb[1:], szb)
bb[1+len(szb)+0] = '\r'
bb[1+len(szb)+1] = '\n'
copy(bb[1+len(szb)+2:], v.Str)
bb[1+len(szb)+2+len(v.Str)+0] = '\r'
bb[1+len(szb)+2+len(v.Str)+1] = '\n'
return bb, nil
}
func marshalArrayRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("*-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10))
var buf bytes.Buffer
buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer
buf.WriteByte('*')
buf.Write(szb)
buf.WriteByte('\r')
buf.WriteByte('\n')
for i := 0; i < len(v.ArrayV); i++ {
data, err := v.ArrayV[i].MarshalRESP()
if err != nil {
return nil, err
}
buf.Write(data)
}
return buf.Bytes(), nil
}
func marshalAnyRESP(v Value) ([]byte, error) {
switch v.Typ {
default:
if v.Typ == 0 && v.Null {
return []byte("$-1\r\n"), nil
}
return nil, errors.New("unknown resp type encountered")
case '-', '+':
return marshalSimpleRESP(v.Typ, v.Str)
case ':':
return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10)))
case '$':
return marshalBulkRESP(v)
case '*':
return marshalArrayRESP(v)
}
}
// Equals compares one value to another value.
func (v Value) Equals(value Value) bool {
data1, err := v.MarshalRESP()
if err != nil {
return false
}
data2, err := value.MarshalRESP()
if err != nil {
return false
}
return string(data1) == string(data2)
}
// MarshalRESP returns the original serialized byte representation of Value.
// For more information on this format please see http://redis.io/topics/protocol.
func (v Value) MarshalRESP() ([]byte, error) {
return marshalAnyRESP(v)
}
var NilValue = Value{Null: true}
type ErrProtocol struct{ Msg string }
func (err ErrProtocol) Error() string {
return "Protocol error: " + err.Msg
}
// AnyValue returns a RESP value from an interface.
// This function infers the types. Arrays are not allowed.
func AnyValue(v interface{}) Value {
switch v := v.(type) {
default:
return StringValue(fmt.Sprintf("%v", v))
case nil:
return NullValue()
case int:
return IntegerValue(int(v))
case uint:
return IntegerValue(int(v))
case int8:
return IntegerValue(int(v))
case uint8:
return IntegerValue(int(v))
case int16:
return IntegerValue(int(v))
case uint16:
return IntegerValue(int(v))
case int32:
return IntegerValue(int(v))
case uint32:
return IntegerValue(int(v))
case int64:
return IntegerValue(int(v))
case uint64:
return IntegerValue(int(v))
case bool:
return BoolValue(v)
case float32:
return FloatValue(float64(v))
case float64:
return FloatValue(float64(v))
case []byte:
return BytesValue(v)
case string:
return StringValue(v)
}
}
// SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces.
func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} }
// BytesValue returns a RESP bulk string. A bulk string can represent any data.
func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} }
// StringValue returns a RESP bulk string. A bulk string can represent any data.
func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} }
// NullValue returns a RESP null bulk string.
func NullValue() Value { return Value{Typ: '$', Null: true} }
// ErrorValue returns a RESP error.
func ErrorValue(err error) Value {
if err == nil {
return Value{Typ: '-'}
}
return Value{Typ: '-', Str: []byte(err.Error())}
}
// IntegerValue returns a RESP integer.
func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} }
// BoolValue returns a RESP integer representation of a bool.
func BoolValue(t bool) Value {
if t {
return Value{Typ: ':', IntegerV: 1}
}
return Value{Typ: ':', IntegerV: 0}
}
// FloatValue returns a RESP bulk string representation of a float.
func | (f float | FloatValue | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.