content
stringlengths 7
2.61M
|
---|
/** returns whether the solving process will be / was stopped before proving optimality;
* if the solving process was stopped, stores the reason as status in stat
*/
SCIP_Bool SCIPsolveIsStopped(
SCIP_SET* set,
SCIP_STAT* stat,
SCIP_Bool checknodelimits
)
{
assert(set != NULL);
assert(stat != NULL);
SCIPstatIncrement(stat, set, nisstoppedcalls);
if( set->stage >= SCIP_STAGE_SOLVING && SCIPsetIsLE(set, SCIPgetUpperbound(set->scip), SCIPgetLowerbound(set->scip)) )
return TRUE;
if( set->limitchanged )
{
stat->status = SCIP_STATUS_UNKNOWN;
set->limitchanged = FALSE;
}
if( SCIPinterrupted() || stat->userinterrupt )
{
stat->status = SCIP_STATUS_USERINTERRUPT;
stat->userinterrupt = FALSE;
if( set->misc_catchctrlc )
{
SCIPresetInterrupted();
}
}
else if( SCIPterminated() )
{
stat->status = SCIP_STATUS_TERMINATE;
return TRUE;
}
else if( set->istimelimitfinite )
{
if( stat->nclockskipsleft <= 0 || stat->nisstoppedcalls < NINITCALLS )
{
SCIP_Real currtime = SCIPclockGetTime(stat->solvingtime);
if( set->time_rareclockcheck && stat->nisstoppedcalls >= NINITCALLS )
{
SCIP_Real avgisstoppedfreq;
int nclockskips = MAXNCLOCKSKIPS;
avgisstoppedfreq = currtime / stat->nisstoppedcalls;
if( (SAFETYFACTOR * (set->limit_time - currtime) / (avgisstoppedfreq + 1e-6)) < nclockskips )
nclockskips = 0;
stat->nclockskipsleft = nclockskips;
}
else
stat->nclockskipsleft = 0;
if( currtime >= set->limit_time )
{
stat->status = SCIP_STATUS_TIMELIMIT;
return TRUE;
}
}
else if( SCIPclockGetLastTime(stat->solvingtime) >= set->limit_time )
{
stat->status = SCIP_STATUS_TIMELIMIT;
return TRUE;
}
else
--stat->nclockskipsleft;
}
if( SCIPgetConcurrentMemTotal(set->scip) >= set->limit_memory*1048576.0 - stat->externmemestim * (1.0 + SCIPgetNConcurrentSolvers(set->scip)) )
stat->status = SCIP_STATUS_MEMLIMIT;
else if( SCIPgetNLimSolsFound(set->scip) > 0
&& (SCIPsetIsLT(set, SCIPgetGap(set->scip), set->limit_gap)
|| SCIPsetIsLT(set, SCIPgetUpperbound(set->scip) - SCIPgetLowerbound(set->scip), set->limit_absgap)) )
stat->status = SCIP_STATUS_GAPLIMIT;
else if( set->limit_solutions >= 0 && set->stage >= SCIP_STAGE_PRESOLVED
&& SCIPgetNLimSolsFound(set->scip) >= set->limit_solutions )
stat->status = SCIP_STATUS_SOLLIMIT;
else if( set->limit_bestsol >= 0 && set->stage >= SCIP_STAGE_PRESOLVED
&& SCIPgetNBestSolsFound(set->scip) >= set->limit_bestsol )
stat->status = SCIP_STATUS_BESTSOLLIMIT;
else if( checknodelimits && set->limit_nodes >= 0 && stat->nnodes >= set->limit_nodes )
stat->status = SCIP_STATUS_NODELIMIT;
else if( checknodelimits && set->limit_totalnodes >= 0 && stat->ntotalnodes >= set->limit_totalnodes )
stat->status = SCIP_STATUS_TOTALNODELIMIT;
else if( checknodelimits && set->limit_stallnodes >= 0 && stat->nnodes >= stat->bestsolnode + set->limit_stallnodes )
stat->status = SCIP_STATUS_STALLNODELIMIT;
if( !checknodelimits )
return SCIPsyncstoreSolveIsStopped(SCIPgetSyncstore(set->scip)) || (stat->status != SCIP_STATUS_UNKNOWN && stat->status != SCIP_STATUS_NODELIMIT && stat->status != SCIP_STATUS_TOTALNODELIMIT && stat->status != SCIP_STATUS_STALLNODELIMIT);
else
return SCIPsyncstoreSolveIsStopped(SCIPgetSyncstore(set->scip)) || (stat->status != SCIP_STATUS_UNKNOWN);
} |
Effects of muramyl dipeptide and lead acetate on carbon clearance and endotoxin-induced mortality in mice. Muramyl dipeptide (MDP) is a nonspecific immune adjuvant thought to affect the macrophage. MDP had been used safely without immunosuppressive or toxic side effects in our laboratory and others. Endotoxin, or lipopolysaccharide (LPS), is thought to be responsible for many of the systemic toxic effects of gram-negative infection. Lead acetate potentiates the lethal effects of endotoxin, an effect attributed to increased hepatotoxicity involving both hepatocytes and Kupffer macrophages. This study was undertaken to examine putative mechanism of action of MDP relating to the reticuloendothelial system. Endotoxin was given intraperitoneally to susceptible mice that were pretreated with MDP, lead acetate, or both, and to unmodified controls. Lead acetate significantly enhanced lethality due to LPS, but pretreatment with MDP did not alter mortality. Carbon clearance was measured in mice treated with MDP, lead, or both. There was no difference in the phagocytic index of control mice and those mice treated with lead acetate at various times prior to the injection. Carbon clearance increased significantly in mice pretreated with MDP but was unaltered by the addition of lead acetate. We conclude that if hyperphagocytosis of endotoxin occurs in MDP-pretreated mice, it does not cause additional mortality. Muramyl dipeptide appeared to be a safe reticuloendothelial stimulant that did not enhance the toxicity of lead or LPS in this experimental model. |
Sirtuin 3 is a Novel Regulator of Mitochondrial Ceramide Metabolism in Stroke Experimental evidence supports the role of mitochondrial ceramide accumulation as a cause of mitochondrial dysfunction and brain injury after stroke. Herein, we report that SIRT3 regulates mitochondrial ceramide biosynthesis via deacetylation of ceramide synthase (CerS) 1, 2 and 6. Reciprocal immunoprecipitation experiments revealed that CerS1, CerS2 and CerS6, but not CerS4, are associated with SIRT3 in cerebral mitochondria. Furthermore, CerS1, 2 and 6 are hyperacetylated in the mitochondria of SIRT3null mice and SIRT3 directly deacetylates the ceramide synthases in a NAD+dependent manner that increases enzyme activity. Investigation of SIRT3 role in mitochondrial response to brain ischemia/reperfusion (IR) showed that SIRT3mediated deacetylation of ceramide synthases increased enzyme activity and ceramide accumulation after IR. Functional studies demonstrated that absence of SIRT3 rescued the IRinduced blockade of the electrontransport chain at the level of Complex III, decreased ROS generation and protein carbonyls in mitochondria. Importantly, Sirt3 gene ablation protected the brain from IR injury. These data support the hypothesis that IR activates SIRT3, resulting in the deacetylation of ceramide synthases and the elevation of ceramide which could inhibit Complex III, leading to increased ROS generation and brain injury. The results of these studies highlight a novel mechanism of SIRT3 involvement in modulating mitochondrial ceramide biosynthesis and suggest a critical role of SIRT3 in promoting mitochondrial dysfunction and brain injury after stroke. |
Profound endothelial damage predicts impending organ failure and death in sepsis. Endothelial damage contributes to organ failure and mortality in sepsis, but the extent of the contribution remains poorly quantified. Here, we examine the association between biomarkers of superficial and profound endothelial damage (syndecan-1 and soluble thrombomodulin , respectively), organ failure, and death in sepsis. The data from a clinical trial, including critically ill patients predominantly suffering sepsis (Clinicaltrials.gov: NCT00271752) were studied. Syndecan-1 and sTM levels at the time of study enrollment were determined. The predictive ability of biomarker levels on death and organ failures during follow-up were assessed in Cox models adjusted for potential confounders including key organ dysfunction measures assessed at enrollment. Of the 1,103 included patients, 418 died. sTM levels at the time of enrollment independently predicted risk of death in adjusted models (hazard ratio 2.2 [95% confidence interval : 1.2-4.0], p=0.02, respectively). Conversely, syndecan-1 levels failed to predict death (adjusted HR 1.0 , p=0.67). sTM but not syndecan-1 levels at enrollment predicted risk of multiple organ failure during follow-up (HR 3.5 , p=0.005 and 2.0 , p=0.1321, respectively). Profound damage to the endothelium independently predicts risk of multiple organ failure and death in septic patients. Our findings also suggest that the detrimental effect of profound endothelial damage on risk of death operates via mechanisms other than causing organ failures per se. Therefore, damage to the endothelium appears centrally involved in the pathogenesis of death in sepsis and could be a target for intervention. |
// Copyright 2020 Jij Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file hash.hpp
* @author <NAME>
* @brief Hash function for std::pair
* @version 1.0.0
* @date 2020-03-13
*
* @copyright Copyright (c) Jij Inc. 2020
*
*/
#ifndef HASH_HPP__
#define HASH_HPP__
#include <utility>
#include <cstdint>
#include <iostream>
namespace cimod
{
/**
* @brief Hash function for std::unordered_map
*
*/
struct pair_hash {
template <class T1, class T2>
unsigned int operator() (const std::pair<T1, T2>& p) const {
unsigned int lhs = std::hash<T1>()(p.first), rhs = std::hash<T2>()(p.second);
return lhs^(rhs+0x9e3779b9+(lhs<<6)+(lhs>>2));
}
};
}
#endif |
<reponame>hugmyndakassi/hvmi
/*
* Copyright (c) 2020 Bitdefender
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _QUEUE_H_
#define _QUEUE_H_
#include "introtypes.h"
typedef struct _QUEUE
{
union
{
struct _QUEUE *Next;
struct _QUEUE *Head;
};
} QUEUE_ENTRY, *PQUEUE_ENTRY, QUEUE_HEAD, *PQUEUE_HEAD;
__forceinline void
QueueInitialize(
_Out_ QUEUE_HEAD *QueueHead
)
{
QueueHead->Next = QueueHead;
}
__forceinline BOOLEAN
QueueIsEmpty(
_In_ QUEUE_HEAD *QueueHead
)
{
return (BOOLEAN)(QueueHead->Next == QueueHead);
}
__forceinline QUEUE_ENTRY *
QueueRemove(
_In_ QUEUE_HEAD *QueueHead
)
{
QUEUE_ENTRY *head = QueueHead->Head;
QueueHead->Head = head->Next;
return head;
}
__forceinline void
QueueInsert(
_In_ QUEUE_HEAD *QueueHead,
_In_ QUEUE_ENTRY *Element
)
{
QUEUE_ENTRY *oldHead = QueueHead->Head;
QueueHead->Head = Element;
Element->Next = oldHead;
}
#endif // _QUEUE_H_
|
Geology, stratigraphy and palaeoenvironmental evolution of the Stephanorhinus kirchbergensisbearing Quaternary palaeolake(s) of Gorzw Wielkopolski (NW Poland, Central Europe) The sedimentary succession exposed in the Gorzw Wielkopolski area includes Eemian Interglacial (MIS 5e) or Early Weichselian (MIS 5de) deposits. The sedimentary sequence has been the object of intense interdisciplinary study, which has resulted in the identification of at least two palaeolake horizons. Both yielded fossil remains of large mammals, alongside pollen and plant macrofossils. All these proxies have been used to reconstruct the environmental conditions prevailing at the time of deposition, as well as to define the geological context and the biochronological position of the fauna. Optically stimulated luminescence dating of the glaciofluvial layers of the GS3 succession to 123.6±10.1 (below the lower palaeolake) and 72.0±5.2ka (above the upper palaeolake) indicate that the site formed during the MiddleLate Pleistocene (MIS 6 MIS 5). Radiocarbondating of the lacustrine organic matter revealed a tight cluster of Middle Pleniglacial Period (MIS 3) ages in the range of ~4132ka cal bp (Hengelo Denekamp Interstadials). Holocene organic layers have also been found, with 14C ages within a range of 43304280calbp (Neolithic). Pollen and plant macrofossil records, together with sedimentological and geochemical data, confirm the dating to the Eemian Interglacial. |
<gh_stars>0
#pragma once
//---------------------------------------------------------------------------
#include "parser/ParseTreeNode.hpp"
#include "ast/SymbolTable.hpp"
#include "ast/ASTNode.hpp"
#include <vector>
#include <unordered_map>
//---------------------------------------------------------------------------
namespace pljit {
//---------------------------------------------------------------------------
/// (The class name: SemanticAnalyzer is not specified.)
/// The task of this class is mainly the semantic analysis:
/// - to traverse the existing parse tree as an input provided by parser and lexer.
/// - to find and report semantic errors in the program, like using an undeclared identifier.
/// Those errors cannot be prevented by only enforcing the grammar with a symbol table, which is why the parser will not find them.
/// - to generate an AST as an output.
class SemanticAnalyzer {
public:
/// The publicly exposed function to traverse the parse tree and to generate an AST.
/// This function is implemented by calling other private functions that take a node from the parse tree,
/// recursively call each other with the child nodes, and return the generated AST nodes.
std::unique_ptr<FunctionAST> AnalyzeParseTree(std::unique_ptr<NonTerminalParseTreeNode> parse_tree_root);
/// Get the symbol table.
const SymbolTable& GetSymbolTable() const;
private:
/// A symbol table keeps track of all identifiers.
SymbolTable symbol_table;
/// The function for analyze a statement of a parse tree and the generation of an AST node.
/// @return unique pointer to AssignmentStatementAST or ReturnStatementAST. nullptr_t if failure (using undeclared identifiers or assignment to constants).
std::unique_ptr<StatementAST> AnalyzeStatement(NonTerminalParseTreeNode* node);
/// The function for analyze an additive expression of a parse tree and the generation of an AST node.
std::unique_ptr<ExpressionAST> AnalyzeAdditiveExpression(NonTerminalParseTreeNode* node);
/// The function for analyze a multiplicative expression of a parse tree and the generation of an AST node.
std::unique_ptr<ExpressionAST> AnalyzeMultiplicativeExpression(NonTerminalParseTreeNode* node);
/// The function for analyze declarations (parameter-declarations, variable-declarations actually) of a parse tree and the generation of an AST node.
/// @return True for success, false for failure (Duplicate identifier name found).
bool AnalyzeDeclarations(const NonTerminalParseTreeNode* const node, Symbol::Type type);
/// The function for analyze constant declarations of a parse tree and the generation of an AST node.
/// @return True for success, false for failure (Duplicate identifier name found).
bool AnalyzeConstDeclarations(const NonTerminalParseTreeNode* const node);
};
//---------------------------------------------------------------------------
} //namespace pljit
//---------------------------------------------------------------------------
|
<gh_stars>1000+
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for data_augmenter."""
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.tasks.mt import data_augmenter
import numpy as np
FLAGS = tf.flags.FLAGS
class MassLayerTest(test_utils.TestCase):
def _MassParams(self):
p = data_augmenter.MASS.Params()
p.mask_id = 3
p.mask_ratio = 0.5
p.mask_minlen = 0
p.span_len = 3
p.random_start_prob = 0.8
p.keep_prob = 0
p.rand_prob = 0
p.mask_prob = 1
p.mask_target = True
p.vocab_size = 64
p.first_unreserved_id = 4
p.name = "mass_layer"
return p
def testMassLayer(self):
with self.session(use_gpu=False) as sess:
batch_size = 3
seq_len = 10
p = self._MassParams()
mass_layer = data_augmenter.MASS(p)
seq_ids = tf.fill([batch_size, seq_len], 4)
weights = tf.ones([batch_size, seq_len])
actual_seq_len = tf.fill([batch_size], 10)
mass_out = mass_layer.Mask(seq_ids, weights, actual_seq_len)
(src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([
mass_out.src.ids, mass_out.tgt.ids, mass_out.tgt.labels,
mass_out.tgt.weights
])
self.assertAllEqual(np.sum(src_ids == 3, axis=1), [5, 5, 5])
self.assertAllEqual(np.sum(tgt_ids == 3, axis=1), [5, 5, 5])
self.assertAllEqual(tgt_labels,
4 * np.ones([batch_size, seq_len], dtype=np.int32))
self.assertAllEqual(np.sum(tgt_weights, axis=1), [5., 5., 5.])
if __name__ == "__main__":
tf.test.main()
|
<reponame>mstim/glycresoft
from collections import OrderedDict
import threading
try:
from Queue import Queue as ThreadQueue
except ImportError:
from queue import Queue as ThreadQueue
try:
import cPickle as pickle
except ImportError:
import pickle
from glycan_profiling.chromatogram_tree import Unmodified
from glycan_profiling.structure import (
CachingGlycopeptideParser,
SequenceReversingCachingGlycopeptideParser,
FragmentCachingGlycopeptide,
DecoyFragmentCachingGlycopeptide)
from glycan_profiling.structure.structure_loader import GlycanAwareGlycopeptideFragmentCachingContext
from ..spectrum_evaluation import TandemClusterEvaluatorBase, DEFAULT_WORKLOAD_MAX
from ..process_dispatcher import SpectrumIdentificationWorkerBase
from ..oxonium_ions import gscore_scanner
class ParserClosure(object):
def __init__(self, parser_type, sequence_cls):
self.parser_type = parser_type
self.sequence_cls = sequence_cls
def __call__(self):
return self.parser_type(sequence_cls=self.sequence_cls)
class GlycopeptideSpectrumGroupEvaluatorMixin(object):
__slots__ = ()
def create_evaluation_context(self, subgroup):
return GlycanAwareGlycopeptideFragmentCachingContext()
def construct_cache_subgroups(self, work_order):
record_by_id = {}
subgroups = []
for key, order in work_order['work_orders'].items():
record = order[0]
record_by_id[record.id] = record
structure = self.parser(record)
for group in subgroups:
# This works when the localized modification is a core (N-Glycosylation, O-Glycosylation), but
# will break down if there is a fully defined glycan (composition or structure). Those will need
# to be handled differently, especially considering ExD-type dissociation where they will actually
# matter.
if structure.modified_sequence_equality(group[0]):
group.append(structure)
break
else:
subgroups.append([structure])
subgroups = [
sorted([record_by_id[structure.id]
for structure in subgroup], key=lambda x: x.id.structure_type)
if len(subgroup) > 1 else [record_by_id[structure.id] for structure in subgroup]
for subgroup in subgroups
]
return subgroups
class GlycopeptideIdentificationWorker(GlycopeptideSpectrumGroupEvaluatorMixin, SpectrumIdentificationWorkerBase):
process_name = 'glycopeptide-identification-worker'
def __init__(self, input_queue, output_queue, producer_done_event, consumer_done_event,
scorer_type, evaluation_args, spectrum_map, mass_shift_map, log_handler,
parser_type, solution_packer, cache_seeds=None):
if cache_seeds is None:
cache_seeds = {}
SpectrumIdentificationWorkerBase.__init__(
self, input_queue, output_queue, producer_done_event, consumer_done_event,
scorer_type, evaluation_args, spectrum_map, mass_shift_map,
log_handler=log_handler, solution_packer=solution_packer)
self.parser = parser_type()
self.cache_seeds = cache_seeds
def evaluate(self, scan, structure, evaluation_context=None, *args, **kwargs):
target = self.parser(structure)
if evaluation_context is not None:
evaluation_context(target)
matcher = self.scorer_type.evaluate(scan, target, *args, **kwargs)
return matcher
def before_task(self):
if self.cache_seeds is None:
return
cache_seeds = self.cache_seeds
if isinstance(cache_seeds, (str, bytes)):
cache_seeds = pickle.loads(cache_seeds)
oxonium_cache_seed = cache_seeds.get('oxonium_ion_cache')
if oxonium_cache_seed is not None:
oxonium_cache_seed = pickle.loads(oxonium_cache_seed)
from glycan_profiling.structure.structure_loader import oxonium_ion_cache
oxonium_ion_cache.update(oxonium_cache_seed)
class PeptideMassFilteringDatabaseSearchMixin(object):
def find_precursor_candidates(self, scan, error_tolerance=1e-5, probing_range=0,
mass_shift=None):
if mass_shift is None:
mass_shift = Unmodified
peptide_filter = None
hits = []
intact_mass = scan.precursor_information.extracted_neutral_mass
if self.peptide_mass_filter:
peptide_filter = self.peptide_mass_filter.build_peptide_filter(
scan, self.peptide_mass_filter.product_error_tolerance, mass_shift=mass_shift)
for i in range(probing_range + 1):
query_mass = intact_mass - (i * self.neutron_offset) - mass_shift.mass
unfiltered_matches = self.search_database_for_precursors(query_mass, error_tolerance)
if self.peptide_mass_filter:
hits.extend(map(self._mark_hit, [match for match in unfiltered_matches if peptide_filter(
match.peptide_mass - (i * self.neutron_offset))]))
else:
hits.extend(map(self._mark_hit, unfiltered_matches))
return hits
class GlycopeptideMatcher(GlycopeptideSpectrumGroupEvaluatorMixin, PeptideMassFilteringDatabaseSearchMixin, TandemClusterEvaluatorBase):
def __init__(self, tandem_cluster, scorer_type, structure_database, parser_type=None,
n_processes=5, ipc_manager=None, probing_range_for_missing_precursors=3,
mass_shifts=None, batch_size=DEFAULT_WORKLOAD_MAX, peptide_mass_filter=None,
trust_precursor_fits=True, cache_seeds=None, sequence_type=None):
if parser_type is None:
parser_type = self._default_parser_type()
if sequence_type is None:
sequence_type = self._default_sequence_type()
super(GlycopeptideMatcher, self).__init__(
tandem_cluster, scorer_type, structure_database, verbose=False, n_processes=n_processes,
ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, batch_size=batch_size, trust_precursor_fits=trust_precursor_fits)
self.peptide_mass_filter = peptide_mass_filter
self.parser_type = parser_type
self.sequence_type = sequence_type
self.parser = None
self.reset_parser()
self.cache_seeds = cache_seeds
def _default_sequence_type(self):
return FragmentCachingGlycopeptide
def _default_parser_type(self):
return CachingGlycopeptideParser
def reset_parser(self):
self.parser = self.parser_type(sequence_cls=self.sequence_type)
def evaluate(self, scan, structure, *args, **kwargs):
target = self.parser(structure)
matcher = self.scorer_type.evaluate(scan, target, *args, **kwargs)
return matcher
def _transform_matched_collection(self, solution_set_collection):
cache = {}
for solution_set in solution_set_collection:
for sm in solution_set:
target = sm.target
if target.id in cache:
sm.target = cache[target.id]
else:
sm.target = cache[target.id] = self.parser(target)
return solution_set_collection
@property
def _worker_specification(self):
return GlycopeptideIdentificationWorker, {
"parser_type": ParserClosure(self.parser_type, self.sequence_type),
"cache_seeds": self.cache_seeds
}
class SequenceReversingDecoyGlycopeptideMatcher(GlycopeptideMatcher):
def _default_parser_type(self):
return SequenceReversingCachingGlycopeptideParser
class GlycanFragmentPermutingDecoyGlycopeptideMatcher(GlycopeptideMatcher):
def _default_sequence_type(self):
return DecoyFragmentCachingGlycopeptide
class SequenceReversingGlycanFragmentPermutingGlycopeptideMatcher(GlycopeptideMatcher):
def _default_sequence_type(self):
return DecoyFragmentCachingGlycopeptide
def _default_parser_type(self):
return SequenceReversingCachingGlycopeptideParser
class TargetDecoyInterleavingGlycopeptideMatcher(GlycopeptideSpectrumGroupEvaluatorMixin, PeptideMassFilteringDatabaseSearchMixin, TandemClusterEvaluatorBase):
'''Searches a single database against all spectra, where targets are
database matches, and decoys are the reverse of the individual target
glycopeptides.
A spectrum has a best target match and a best decoy match tracked
separately.
This means that targets and decoys share the same glycan composition and
peptide backbone mass, and ergo share stub glycopeptides. This may not produce
"random" enough decoy matches.
'''
def __init__(self, tandem_cluster, scorer_type, structure_database, minimum_oxonium_ratio=0.05,
n_processes=5, ipc_manager=None, probing_range_for_missing_precursors=3,
mass_shifts=None, batch_size=DEFAULT_WORKLOAD_MAX, peptide_mass_filter=None,
trust_precursor_fits=True, cache_seeds=None, permute_decoy_glycans=False):
super(TargetDecoyInterleavingGlycopeptideMatcher, self).__init__(
tandem_cluster, scorer_type, structure_database, verbose=False,
n_processes=n_processes, ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, batch_size=batch_size,
trust_precursor_fits=trust_precursor_fits)
self.tandem_cluster = tandem_cluster
self.scorer_type = scorer_type
self.structure_database = structure_database
self.minimum_oxonium_ratio = minimum_oxonium_ratio
self.peptide_mass_filter = peptide_mass_filter
self.permute_decoy_glycans = permute_decoy_glycans
self.target_evaluator = GlycopeptideMatcher(
[], self.scorer_type, self.structure_database, n_processes=n_processes,
ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, peptide_mass_filter=peptide_mass_filter,
trust_precursor_fits=trust_precursor_fits, cache_seeds=cache_seeds)
decoy_matcher_type = SequenceReversingDecoyGlycopeptideMatcher
if self.permute_decoy_glycans:
decoy_matcher_type = SequenceReversingGlycanFragmentPermutingGlycopeptideMatcher
self.decoy_evaluator = decoy_matcher_type(
[], self.scorer_type, self.structure_database, n_processes=n_processes,
ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, peptide_mass_filter=peptide_mass_filter,
trust_precursor_fits=trust_precursor_fits, cache_seeds=cache_seeds)
def filter_for_oxonium_ions(self, error_tolerance=1e-5):
keep = []
for scan in self.tandem_cluster:
minimum_mass = 0
if scan.acquisition_information:
try:
scan_windows = scan.acquisition_information[0]
window = scan_windows[0]
minimum_mass = window.lower
except IndexError:
pass
try:
ratio = gscore_scanner(
peak_list=scan.deconvoluted_peak_set, error_tolerance=error_tolerance,
minimum_mass=minimum_mass)
except Exception:
self.error("An error occurred while calculating the G-score for \"%s\"" % scan.id)
ratio = 0
scan.oxonium_score = ratio
if ratio >= self.minimum_oxonium_ratio:
keep.append(scan)
else:
self.debug("... Skipping scan %s with G-score %f" % (scan.id, ratio))
self.tandem_cluster = keep
def score_one(self, scan, precursor_error_tolerance=1e-5, *args, **kwargs):
target_result = self.target_evaluator.score_one(scan, precursor_error_tolerance, *args, **kwargs)
decoy_result = self.decoy_evaluator.score_one(scan, precursor_error_tolerance, *args, **kwargs)
return target_result, decoy_result
def score_bunch(self, scans, precursor_error_tolerance=1e-5, simplify=True, *args, **kwargs):
# Map scans to target database
workload = self.target_evaluator._map_scans_to_hits(
scans, precursor_error_tolerance)
# Evaluate mapped target hits
target_solutions = []
total_work = workload.total_work_required()
running_total_work = 0
for i, batch in enumerate(workload.batches(self.batch_size)):
self.log("... Batch %d (%d/%d) %0.2f%%" % (
i + 1, running_total_work + batch.batch_size, total_work,
((running_total_work + batch.batch_size) * 100.) / float(total_work)))
target_scan_solution_map = self.target_evaluator._evaluate_hit_groups(
batch, *args, **kwargs)
running_total_work += batch.batch_size
# Aggregate and reduce target solutions
temp = self.target_evaluator._collect_scan_solutions(target_scan_solution_map, batch.scan_map)
if simplify:
temp = [case for case in temp if len(case) > 0]
for case in temp:
try:
case.simplify()
case.select_top()
except IndexError:
self.log("Failed to simplify %r" % (case.scan.id,))
raise
else:
temp = [case for case in temp if len(case) > 0]
target_solutions += temp
# Reuse mapped hits from target database using the decoy evaluator
# since this assumes that the decoys will be direct reversals of
# target sequences. The decoy evaluator will handle the reversals.
decoy_solutions = []
running_total_work = 0
for i, batch in enumerate(workload.batches(self.batch_size)):
self.log("... Batch %d (%d/%d) %0.2f%%" % (
i + 1, running_total_work + batch.batch_size, total_work,
((running_total_work + batch.batch_size) * 100.) / float(total_work)))
decoy_scan_solution_map = self.decoy_evaluator._evaluate_hit_groups(
batch, *args, **kwargs)
# Aggregate and reduce target solutions
temp = self.decoy_evaluator._collect_scan_solutions(decoy_scan_solution_map, batch.scan_map)
if simplify:
temp = [case for case in temp if len(case) > 0]
for case in temp:
try:
case.simplify()
case.select_top()
except IndexError:
self.log("Failed to simplify %r" % (case.scan.id,))
raise
else:
temp = [case for case in temp if len(case) > 0]
decoy_solutions += temp
running_total_work += batch.batch_size
return target_solutions, decoy_solutions
def score_all(self, precursor_error_tolerance=1e-5, simplify=False, *args, **kwargs):
target_out = []
decoy_out = []
self.filter_for_oxonium_ions(**kwargs)
target_out, decoy_out = self.score_bunch(
self.tandem_cluster, precursor_error_tolerance,
simplify=simplify, *args, **kwargs)
if simplify:
for case in target_out:
case.simplify()
case.select_top()
for case in decoy_out:
case.simplify()
case.select_top()
target_out = [x for x in target_out if len(x) > 0]
decoy_out = [x for x in decoy_out if len(x) > 0]
return target_out, decoy_out
class CompetativeTargetDecoyInterleavingGlycopeptideMatcher(TargetDecoyInterleavingGlycopeptideMatcher):
'''A variation of :class:`TargetDecoyInterleavingGlycopeptideMatcher` where
a spectrum can have only one match which is either a target or a decoy.
'''
def score_bunch(self, scans, precursor_error_tolerance=1e-5, simplify=True, *args, **kwargs):
target_solutions, decoy_solutions = super(
CompetativeTargetDecoyInterleavingGlycopeptideMatcher, self).score_bunch(
scans, precursor_error_tolerance, simplify, *args, **kwargs)
target_solutions = OrderedDict([(s.scan.id, s) for s in target_solutions])
decoy_solutions = OrderedDict([(s.scan.id, s) for s in target_solutions])
remove_target = []
for key in target_solutions:
try:
if target_solutions[key].score > decoy_solutions[key].score:
decoy_solutions.pop(key)
else:
remove_target.append(key)
except KeyError:
pass
for key in remove_target:
target_solutions.pop(key)
return list(target_solutions.values()), list(decoy_solutions.values())
class ComparisonGlycopeptideMatcher(TargetDecoyInterleavingGlycopeptideMatcher):
'''A variation of :class:`TargetDecoyInterleavingGlycopeptideMatcher` where
targets and decoys are drawn from separate hypotheses, and decoys are taken
verbatim from their database without be reversed.
'''
def __init__(self, tandem_cluster, scorer_type, target_structure_database, decoy_structure_database,
minimum_oxonium_ratio=0.05, n_processes=5, ipc_manager=None,
probing_range_for_missing_precursors=3, mass_shifts=None,
batch_size=DEFAULT_WORKLOAD_MAX, peptide_mass_filter=None,
trust_precursor_fits=True, cache_seeds=None, permute_decoy_glycans=False):
super(ComparisonGlycopeptideMatcher, self).__init__(
tandem_cluster, scorer_type, target_structure_database,
n_processes=n_processes, ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, batch_size=batch_size, peptide_mass_filter=peptide_mass_filter,
trust_precursor_fits=trust_precursor_fits, cache_seeds=cache_seeds,
permute_decoy_glycans=permute_decoy_glycans)
self.tandem_cluster = tandem_cluster
self.scorer_type = scorer_type
self.target_structure_database = target_structure_database
self.decoy_structure_database = decoy_structure_database
self.minimum_oxonium_ratio = minimum_oxonium_ratio
self.target_evaluator = GlycopeptideMatcher(
[], self.scorer_type, self.target_structure_database,
n_processes=n_processes, ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, peptide_mass_filter=peptide_mass_filter,
trust_precursor_fits=trust_precursor_fits)
decoy_matcher_type = GlycopeptideMatcher
if self.permute_decoy_glycans:
decoy_matcher_type = GlycanFragmentPermutingDecoyGlycopeptideMatcher
self.decoy_evaluator = decoy_matcher_type(
[], self.scorer_type, self.decoy_structure_database,
n_processes=n_processes, ipc_manager=ipc_manager,
probing_range_for_missing_precursors=probing_range_for_missing_precursors,
mass_shifts=mass_shifts, peptide_mass_filter=peptide_mass_filter,
trust_precursor_fits=trust_precursor_fits)
def score_bunch(self, scans, precursor_error_tolerance=1e-5, simplify=True, *args, **kwargs):
# Map scans to target database
self.log("... Querying Targets")
waiting_task_results = ThreadQueue()
def decoy_query_task():
self.log("... Querying Decoys")
workload = self.decoy_evaluator._map_scans_to_hits(
scans, precursor_error_tolerance)
waiting_task_results.put(workload)
workload = self.target_evaluator._map_scans_to_hits(
scans, precursor_error_tolerance)
# Execute the potentially disk-heavy task in the background while
# evaluating the target spectrum matches.
decoy_query_thread = threading.Thread(target=decoy_query_task)
decoy_query_thread.start()
# Evaluate mapped target hits
target_solutions = []
workload_graph = workload.compute_workloads()
total_work = workload.total_work_required(workload_graph)
running_total_work = 0
for i, batch in enumerate(workload.batches(self.batch_size)):
running_total_work += batch.batch_size
self.log("... Batch %d (%d/%d) %0.2f%%" % (
i + 1, running_total_work, total_work,
(100. * running_total_work) / total_work))
target_scan_solution_map = self.target_evaluator._evaluate_hit_groups(
batch, *args, **kwargs)
# Aggregate and reduce target solutions
temp = self.target_evaluator._collect_scan_solutions(
target_scan_solution_map, batch.scan_map)
temp = [case for case in temp if len(case) > 0]
if simplify:
for case in temp:
try:
case.simplify()
case.select_top()
except IndexError:
self.log("Failed to simplify %r" % (case.scan.id,))
raise
target_solutions.extend(temp)
self.debug("... Waiting For Decoy Mapping")
decoy_query_thread.join()
# workload = self.decoy_evaluator._map_scans_to_hits(
# scans, precursor_error_tolerance)
workload = waiting_task_results.get()
# Evaluate mapped target hits
decoy_solutions = []
workload_graph = workload.compute_workloads()
total_work = workload.total_work_required(workload_graph)
running_total_work = 0
for i, batch in enumerate(workload.batches(self.batch_size)):
running_total_work += batch.batch_size
self.log("... Batch %d (%d/%d) %0.2f%%" % (
i + 1, running_total_work, total_work,
(100. * running_total_work) / total_work))
decoy_scan_solution_map = self.decoy_evaluator._evaluate_hit_groups(
batch, *args, **kwargs)
# Aggregate and reduce decoy solutions
temp = self.decoy_evaluator._collect_scan_solutions(decoy_scan_solution_map, batch.scan_map)
temp = [case for case in temp if len(case) > 0]
if simplify:
for case in temp:
try:
case.simplify()
case.select_top()
except IndexError:
self.log("Failed to simplify %r" % (case.scan.id,))
raise
decoy_solutions.extend(temp)
return target_solutions, decoy_solutions
|
$NetBSD: patch-Opcodes_wavegde.h,v 1.2 2019/11/02 22:25:46 mrg Exp $
Use native len_t on SunOS.
--- Opcodes/wavegde.h.orig 2019-07-12 14:54:19.000000000 -0700
+++ Opcodes/wavegde.h 2019-10-29 13:45:37.120974231 -0700
@@ -43,7 +43,9 @@
#endif
/* TYPEDEFS */
+#ifndef __sun
typedef int64_t len_t; /* length type */
+#endif
/* CLASS DEFINITIONS */
|
/*
This simple code demonstrates CreateEvent, WaitForMultipleObjects
Win32 API functions and event objects.
This code computes sum of Mercator's series.
Mercator's series expression for ln( x + 1 ) function is:
ln( x + 1 ) = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - ...
for -1 < x <= 1.
*/
#include <stdio.h>
#include <conio.h>
#include <windows.h>
#include <pthread.h>
#define NUMTHREADS 4
#define SERIES_MEMBER_COUNT 10000
pthread_t masterThreadHandle;
pthread_t threadHandles[NUMTHREADS + 1];
pthread_mutex_t Mutex;
pthread_mutex_t NumMutex;
pthread_cond_t eventHandle;
int ThreadsComplete = 0;
double* sums;
double x = 1.0;
double res = 0.0;
double getMember(int n, double x)
{
double numerator = 1;
for( int i=0; i<n; i++ )
numerator = numerator*x;
if ( n % 2 == 0 )
return ( - numerator / n );
else
return numerator/n;
}
void* threadProc(void* par)
{
int threadIndex = *((int *)par);
printf("Thread id = %d\n",threadIndex);
sums[threadIndex] = 0;
for(int i=threadIndex; i<SERIES_MEMBER_COUNT;i+=NUMTHREADS)
sums[threadIndex] += getMember(i+1, x);
pthread_mutex_lock(&NumMutex);
ThreadsComplete++;
pthread_mutex_unlock(&NumMutex);
if( ThreadsComplete >= NUMTHREADS )
{
pthread_cond_signal(&eventHandle);
}
delete par;
return 0;
}
void* masterThreadProc(void* par)
{
//for( int i=0; i < NUMTHREADS; i++ )
//{
// pthread_join(threadHandles[i], NULL);
//}
pthread_mutex_lock(&Mutex);
pthread_cond_wait(&eventHandle, &Mutex);
pthread_mutex_unlock(&Mutex);
// WaitForMultipleObjects(NUMTHREADS, eventHandles, TRUE,INFINITE);
res = 0;
for( int i = 0; i < NUMTHREADS; i++ )
res += sums[i];
return 0;
}
void main()
{
sums = new double[NUMTHREADS];
int iErr(0);
pthread_mutex_init(&Mutex, NULL);
pthread_mutex_init(&NumMutex, NULL);
eventHandle = PTHREAD_COND_INITIALIZER;
for( int i = 0; i < NUMTHREADS; i++ )
{
int * threadIdPtr = new int;
*threadIdPtr = i;
iErr = pthread_create(&threadHandles[i], NULL, threadProc, threadIdPtr);
if( iErr )
{
printf("ERROR: Return code from pthread_create() is %d\n", iErr);
}
//eventHandles[i] = CreateEvent(NULL, TRUE, FALSE, NULL);
}
iErr = pthread_create(&threadHandles[NUMTHREADS], NULL, masterThreadProc, NULL);
printf("Count of ln(1 + x) Mercator's series members is %d\n",SERIES_MEMBER_COUNT);
printf("Argument value is %f\n", (double)x);
pthread_join(threadHandles[NUMTHREADS], NULL);
delete sums;
printf("Result is %f\n", (double)res);
printf("Press any key ... ");
_getch();
} |
// Copyright 2021 The casbin Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discuzx
import "fmt"
type Forum struct {
Fid int
Fup int
Type string
Name string
Status int
Displayorder int
Threads int
Forums []*Forum `xorm:"-"`
}
func getForums() []*Forum {
forums := []*Forum{}
err := adapter.Engine.Table("pre_forum_forum").Find(&forums)
if err != nil {
panic(err)
}
return forums
}
func getForum(id int) *Forum {
forum := Forum{Fid: id}
existed, err := adapter.Engine.Table("pre_forum_forum").Get(&forum)
if err != nil {
panic(err)
}
if existed {
return &forum
} else {
return nil
}
}
func getForumMap() map[int]*Forum {
forums := getForums()
m := map[int]*Forum{}
for _, forum := range forums {
m[forum.Fid] = forum
}
return m
}
func getStructuredForums() []*Forum {
res := []*Forum{}
forumMap := getForumMap()
for _, forum := range forumMap {
if forum.Type == "group" {
res = append(res, forum)
} else {
parentForum := forumMap[forum.Fup]
parentForum.Forums = append(parentForum.Forums, forum)
}
}
forumNameCountMap := map[string]int{}
for _, forum := range forumMap {
if forum.Type == "group" {
forumNameCountMap[forum.Name] = 0
continue
}
if v, ok := forumNameCountMap[forum.Name]; ok {
forumNameCountMap[forum.Name] = v + 1
} else {
forumNameCountMap[forum.Name] = 1
}
}
for _, forum := range forumMap {
if forumNameCountMap[forum.Name] > 1 {
parentForum := forumMap[forum.Fup]
forum.Name = fmt.Sprintf("%s-%s", parentForum.Name, forum.Name)
}
}
return res
}
|
/**
* Created by michalsvacha on 22.04.17.
*/
public class TestCase {
private List<City> cities = new ArrayList<City>();
private int[][] distances;
private List<Delivery> deliveries = new ArrayList<Delivery>();
public TestCase(int cityCount) {
this.distances = new int[cityCount][cityCount];
}
public void addCity(City city) {
this.cities.add(city);
}
public void setDistance(int x, int y, int distance) {
this.distances[x][y] = distance;
}
public void addDelivery(Delivery delivery) {
this.deliveries.add(delivery);
}
@Override
public String toString() {
return "\nTestCase{" +
"cities=" + cities + "\n" +
", distances=\n" + this.prettyDistances() +
", deliveries=" + deliveries +
"}\n";
}
public String prettyDistances() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < this.distances.length; i++) {
for (int j = 0; j < this.distances.length; j++) {
sb.append(this.distances[i][j]).append(" ");
}
sb.append("\n");
}
return sb.toString();
}
private double shortestTime(int from, int to) {
boolean[] visited = new boolean[this.cities.size()];
visited[from] = true;
return visit(from, to, visited, 0, 0, 0, Double.MAX_VALUE);
}
private double visit(int city, int destination, boolean[] visited, double currentTime, int remainingHorseDistance,
int horseSpeed, double bestTime) {
City currentCity = this.cities.get(city);
if (city == destination) {
return currentTime < bestTime ? currentTime : bestTime;
} else if (currentTime > bestTime) {
return bestTime;
}
for (int i = 0; i < this.distances.length; i++) {
if (i != city && this.distances[city][i] != -1 && !visited[i]) {
double distanceToCity = (double) this.distances[city][i];
double currentCityHorseTime = distanceToCity / (double) currentCity.getAverageHorseSpeed();
int currentCityRemainingHorseDistance = currentCity.getMaxHorseDistance() - (int) distanceToCity;
double previousCityHorseTime = distanceToCity / (double) horseSpeed;
visited[i] = true;
double subTime = visit(i, destination, visited, currentTime + currentCityHorseTime,
currentCityRemainingHorseDistance, currentCity.getAverageHorseSpeed(), bestTime);
if (subTime < bestTime) {
bestTime = subTime;
}
visited[i] = false;
if (remainingHorseDistance >= distanceToCity) {
visited[i] = true;
subTime = visit(i, destination, visited, currentTime + previousCityHorseTime,
remainingHorseDistance - (int) distanceToCity, horseSpeed, bestTime);
if (subTime < bestTime) {
bestTime = subTime;
}
visited[i] = false;
}
}
}
return bestTime;
}
public String deliveryTimes() {
StringBuilder sb = new StringBuilder();
for (Delivery delivery : this.deliveries) {
double shortestTime = this.shortestTime(delivery.getStart() - 1, delivery.getEnd() - 1);
sb.append(shortestTime).append(" ");
}
return sb.toString();
}
} |
<gh_stars>1-10
// Code generated from Test.G4 by ANTLR 4.7.1. DO NOT EDIT.
package parser // Test
import "github.com/antlr/antlr4/runtime/Go/antlr"
type BaseTestVisitor struct {
*antlr.BaseParseTreeVisitor
}
func (v *BaseTestVisitor) VisitExpression(ctx *ExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
|
import React from "react";
import { Dimensions, Text, View } from "react-native";
import { StatusBar } from "expo-status-bar";
import { styles } from "./OnboardingStyles";
import { nhs } from "../../globals";
import SizedImage from "../../components/SizedImage";
import Button from "../../components/Button";
const hello = require("../../../assets/illustrations/hello.png");
interface WelcomeProps {
installCallback: () => void,
verifyCallback: () => void,
staffCallback: () => void
}
class Welcome extends React.Component<WelcomeProps> {
render() {
return (
<View style={styles.container}>
<StatusBar translucent={true} style="light" />
<SizedImage
source={nhs}
width={100}
onPress={this.props.staffCallback} />
<SizedImage
source={hello}
width={Dimensions.get("window").width - 128}
style={{ marginTop: 48 }} />
<View style={styles.text}>
<Text style={styles.title}>Welcome to the Vaccert app!</Text>
<Text style={styles.body}>Let's start getting your Vaccert set up.</Text>
</View>
<Button text="Install a Vaccert" onPress={this.props.installCallback} />
<Button text="Verify a Vaccert" onPress={this.props.verifyCallback} style={{ marginTop: 18 }} />
</View>
)
}
}
export default Welcome; |
The Limited Usefulness of Social Media and Digital Trace Data for Urban Social Research Drawing on the literature on social science methodology, this paper argues the potential research contributions from studies using social media data are limited. These data sources are conceptualized as unobtrusive measures, a longstanding category of information for social research. Although useful in some respects, digital unobtrusive measures are limited by their content poverty, focus on espoused theory, and positivist assumptions about social reality. To conclude, the paper describes the limitations of all forms of social research, and the need for mixed methods and action research. |
count = 0
def new_name():
global count
if count < 26:
r = 'A' + chr(ord('a') + count)
else:
r = 'B' + chr(ord('a') + count - 26)
count += 1
return r
n, k = map(int, input().split())
s = [s_i == 'YES' for s_i in input().split()]
names = [new_name() for i in range(k - 1)]
for s_i in s:
if s_i:
names.append(new_name())
else:
names.append(names[-(k-1)])
print(' '.join(names))
|
<reponame>sigoden/swag<filename>packages/core/src/index.ts
/* eslint-disable @typescript-eslint/no-namespace, @typescript-eslint/no-empty-interface */
import * as Openapize from "@sigodenjs/openapize";
import * as express from "express";
import * as createDebug from "debug";
import { ServiceGroup, SrvContext, SrvConfig, STOP_KEY } from "@sigodenjs/dee-srv";
import { createSrvs, ServiceOptionMap } from "@sigodenjs/dee-srv-create";
import { Server } from "http";
import { Request, Response, NextFunction, RequestHandler, Express, ErrorRequestHandler } from "express";
export { SecurityError, ValidationError } from "@sigodenjs/openapize";
export { Request, Response, NextFunction, RequestHandler, Express, ErrorRequestHandler, ServiceGroup };
const debugDee = createDebug("dee");
declare module "express" {
interface Request {
openapi: Openapize.API;
srvs: ServiceGroup;
}
}
const DEFAULT_HOST = "localhost";
const DEFAULT_PORT = 3000;
export interface HandlerFuncMap extends Openapize.HandlerFuncMap { }
export type AsyncRequestHandler = (req: Request, res: Response, next: NextFunction) => Promise<any>;
// options to init dee app
export interface Options {
// general config
config: Config;
// options to init openapize service
openapize?: Openapize.Options | Openapize.Options[];
// hook to run before bind route handlers
beforeRoute?: RouteHooks;
// hook to run after bind route handlers
afterRoute?: RouteHooks;
// error handler
errorHandler?: ErrorRequestHandler;
// run when app is ready
ready?: (app: App) => void;
// options to init external services
services?: ServiceOptionMap;
}
export interface App {
srvs: ServiceGroup;
express: Express;
start: () => Promise<Server>;
stop: () => Promise<void>;
}
export interface Service { }
export interface Config extends SrvConfig {
// namespace of service
ns: string;
// name of app
name: string;
// listenning host
host?: string;
// listenning port
port?: number;
// whether production mode
prod?: boolean;
[k: string]: any;
}
export type RouteHooks = (srvs: ServiceGroup, app: Express) => void | RequestHandler[];
function useMiddlewares(srvs: ServiceGroup, app: Express, hooks: RouteHooks) {
if (typeof hooks === "function") {
hooks(srvs, app);
return;
}
for (const mid of Array<RequestHandler>(hooks)) {
app.use(mid);
}
}
export async function init(options: Options): Promise<App> {
debugDee("init");
const app = express();
const srvContext: SrvContext = { config: options.config, srvs: { $config: options.config as any } };
await createSrvs(srvContext, options.services);
const srvs = srvContext.srvs;
app.use((req: Request, res, next) => {
req.srvs = srvs;
next();
});
if (options.beforeRoute) {
debugDee("beforeRoute");
useMiddlewares(srvs, app, options.beforeRoute);
}
debugDee("openize");
if (Array.isArray(options.openapize)) {
for (const openapizeOptions of options.openapize) {
await Openapize.openapize(app, openapizeOptions);
}
} else if (options.openapize) {
await Openapize.openapize(app, options.openapize);
}
if (options.afterRoute) {
debugDee("afterRoute");
useMiddlewares(srvs, app, options.afterRoute);
}
const start = () => {
const port = options.config.port || DEFAULT_PORT;
const host = options.config.host || DEFAULT_HOST;
return new Promise<Server>(resolve => {
if (options.errorHandler) {
app.use(options.errorHandler);
}
debugDee(`listen ${host}:${port}`);
const server = app.listen(port, host, () => {
resolve(server);
});
});
};
const stop = async () => {
const errs = [];
await Promise.all(Object.keys(srvs).map(async key => {
if (srvs[key][STOP_KEY]) {
try {
await srvs[key][STOP_KEY]();
} catch (err) {
errs.push({ err, key });
}
}
}));
if (errs.length > 0) {
throw new DeeStopError(errs);
}
};
const deeApp = { srvs, express: app, start, stop };
if (options.ready) {
debugDee("ready");
options.ready(deeApp);
}
return deeApp;
}
export function resolveAsynRequestHandler(fn: AsyncRequestHandler): RequestHandler {
return (req: Request, res: Response, next: NextFunction) => {
const fnReturn = fn(req, res, next);
Promise.resolve(fnReturn).catch(next);
};
}
export interface ErrItem {
key: string;
err: any;
}
export class DeeStopError extends Error {
public readonly errs: ErrItem[]
constructor(errs: ErrItem[]) {
super("dee cannot stop");
this.errs = errs;
this.name = "DeeStopError";
}
}
|
<reponame>sandreenko/performance
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from dataclasses import dataclass
from pathlib import Path
from typing import cast, Mapping, Optional, Sequence, Type
from .analysis.diffable import get_diffables
from .analysis.process_trace import ProcessedTraces
from .analysis.report import (
print_diff_score_summary,
get_run_metrics_for_diff,
show_diff_from_diffables,
)
from .analysis.types import RunMetrics, SampleKind, SAMPLE_KIND_DOC
from .commonlib.bench_file import (
AllocType,
BenchFile,
BenchOptions,
Benchmark,
CoreclrSpecifier,
parse_bench_file,
GCPerfSimArgs,
MAX_ITERATIONS_FOR_ANALYZE_DOC,
MemoryLoadOptions,
TestConfigContainer,
Config,
Vary,
)
from .commonlib.get_built import get_corerun_path_from_core_root
from .commonlib.collection_util import combine_mappings
from .commonlib.command import Command, CommandKind, CommandsMapping, run_command_worker
from .commonlib.document import handle_doc, OutputOptions
from .commonlib.frozen_dict import FrozenDict
from .commonlib.host_info import read_this_machines_host_info
from .commonlib.option import option_or
from .commonlib.parse_and_serialize import load_yaml, write_yaml_file
from .commonlib.score_spec import ScoreElement, ScoreSpec
from .commonlib.type_utils import argument, with_slots
from .commonlib.util import ensure_empty_dir
from .exec.run_tests import run, RunArgs
SuiteCommand = str
@with_slots
@dataclass(frozen=True)
class SuiteFile:
bench_files: Sequence[Path]
command_groups: Mapping[str, Sequence[SuiteCommand]]
@with_slots
@dataclass(frozen=True)
class SuiteCreateArgs:
path: Path = argument(name_optional=True, doc="Path to directory to write the suite to")
coreclrs: Sequence[Path] = argument(
doc="""
One of:
* A path to a '.yaml' file whose content is suitable to be the 'coreclrs' section of a benchfile.
* A list of core_root directories.
"""
)
proc_count: Optional[int] = argument(
default=None, doc="This is used for both thread and heap count."
)
overwrite: bool = argument(
default=False,
doc="If true, the suite directory will be deleted before creating a new suite.",
)
def suite_create(args: SuiteCreateArgs) -> None:
coreclrs: Mapping[str, CoreclrSpecifier] = _parse_coreclrs(args.coreclrs)
options = BenchOptions(default_iteration_count=2, default_max_seconds=300)
proc_count = option_or(args.proc_count, _get_default_proc_count())
tests: Mapping[str, BenchFile] = {
"normal_workstation": _create_scenario_normal_workstation(coreclrs, options),
"normal_server": _create_scenario_normal_server(coreclrs, options, proc_count),
"high_memory": _create_scenario_high_memory_load(coreclrs, options, proc_count),
# TODO: use a low proc_count here?
"low_memory_container": _create_scenario_low_memory_container(
coreclrs, options, proc_count
),
}
if args.path.exists():
assert args.overwrite
ensure_empty_dir(args.path)
for test_name, bench_file in tests.items():
write_yaml_file(args.path / f"{test_name}.yaml", bench_file, overwrite=True)
command_groups: Mapping[str, Sequence[SuiteCommand]] = {
# "diff": [
# f"diff $suite/{test_name}.yaml --vary coreclr --txt $suite/{test_name}.diff.txt"
# for test_name in tests.keys()
# ]
}
suite_file = SuiteFile(
bench_files=[Path(f"{test_name}.yaml") for test_name in tests],
command_groups=command_groups,
)
write_yaml_file(args.path / "suite.yaml", suite_file, overwrite=True)
def _get_default_proc_count() -> int:
n_processors = read_this_machines_host_info().n_logical_processors
if n_processors <= 2:
return n_processors
elif n_processors <= 4:
return n_processors - 1
else:
return n_processors - 2
SUITE_PATH_DOC = "Path to a 'suite.yaml' generated by `suite-create`."
@with_slots
@dataclass(frozen=True)
class SuiteRunArgs:
suite_path: Path = argument(name_optional=True, doc=SUITE_PATH_DOC)
overwrite: bool = argument(
default=False,
doc="""
This is like the '--overwrite' argument to the normal 'run' command.
""",
)
skip_where_exists: bool = argument(
default=False,
doc="This is like the '--skip-where-exists' argument to the normal 'run' command.",
)
def suite_run(args: SuiteRunArgs) -> None:
suite = load_yaml(SuiteFile, args.suite_path)
for file in suite.bench_files:
bench_file_path = args.suite_path.parent / file
print(f"\n=== {bench_file_path} ===\n")
run(
RunArgs(
bench_file_path=bench_file_path,
overwrite=args.overwrite,
skip_where_exists=args.skip_where_exists,
)
)
@with_slots
@dataclass(frozen=True)
class SuiteRunCommandArgs:
suite_path: Path = argument(name_optional=True, doc=SUITE_PATH_DOC)
command_name: str = argument(doc="Name of a command specified in 'suite.yaml'.")
def suite_run_command(args: SuiteRunCommandArgs) -> None:
from .all_commands import ALL_COMMANDS # pylint:disable=import-outside-toplevel
suite = load_yaml(SuiteFile, args.suite_path)
commands = suite.command_groups[args.command_name]
for command in commands:
# TODO: this will fail if there are spaces in path
assert " " not in str(
args.suite_path.parent
), "TODO: Substitution will fail if there are spaces in path"
full_command = command.replace("$suite", str(args.suite_path.parent))
print(f"=== {full_command} ===")
run_command_worker(ALL_COMMANDS, full_command.split())
def _parse_coreclrs(coreclrs: Sequence[Path]) -> Mapping[str, CoreclrSpecifier]:
if len(coreclrs) == 1 and coreclrs[0].name.endswith(".yaml"):
return load_yaml(
cast(Type[Mapping[str, CoreclrSpecifier]], Mapping[str, CoreclrSpecifier]), coreclrs[0]
)
else:
return {chr(ord("a") + i): _parse_coreclr(coreclr) for i, coreclr in enumerate(coreclrs)}
def _parse_coreclr(coreclr: Path) -> CoreclrSpecifier:
if get_corerun_path_from_core_root(coreclr).exists():
return CoreclrSpecifier(core_root=coreclr)
else:
return CoreclrSpecifier(repo_path=coreclr)
def _normal_benchmarks(proc_count: int) -> Mapping[str, Benchmark]:
tagb_factor = 0.5 if proc_count == 1 else 1
return {
"0gb": Benchmark(
arguments=GCPerfSimArgs(tc=proc_count, tagb=300 * tagb_factor, tlgb=0), min_seconds=10
),
"2gb": Benchmark(
arguments=GCPerfSimArgs(tc=proc_count, tagb=300 * tagb_factor, tlgb=2, sohsi=50)
),
# The pinning makes this test a lot slower, so allocate many fewer BG
"2gb_pinning": Benchmark(
arguments=GCPerfSimArgs(
tc=proc_count, tagb=100 * tagb_factor, tlgb=2, sohsi=50, sohpi=50
)
),
# This must allocate 600GB to ensure the test isn't dominated by
# the startup time of allocating the initial 20GB
"20gb": Benchmark(
arguments=GCPerfSimArgs(
tc=proc_count, tagb=600 * tagb_factor, tlgb=20, sohsi=50, allocType=AllocType.simple
)
),
}
TYPICAL_SCORES: Mapping[str, ScoreSpec] = {
"speed": FrozenDict(
{
"FirstToLastGCSeconds": ScoreElement(weight=1),
"PauseDurationMSec_95P": ScoreElement(weight=1),
}
)
}
LOW_MEMORY_SCORES: Mapping[str, ScoreSpec] = combine_mappings(
TYPICAL_SCORES,
{
"space": FrozenDict(
{
# Better to have a bigger size before, means we are using the space better
"Gen2ObjSpaceBeforeMB_Sum_MeanWhereIsBlockingGen2": ScoreElement(weight=-1),
# We want a lower size after (to have collected more)
"Gen2ObjSizeAfterMB_Sum_MeanWhereIsBlockingGen2": ScoreElement(weight=1),
}
)
},
)
def _create_scenario_normal_workstation(
coreclrs: Mapping[str, CoreclrSpecifier], options: BenchOptions
) -> BenchFile:
common_config = Config(complus_gcserver=False, complus_gcconcurrent=False)
return BenchFile(
vary=Vary.coreclr,
coreclrs=coreclrs,
options=options,
common_config=common_config,
benchmarks=_normal_benchmarks(proc_count=1),
scores=TYPICAL_SCORES,
)
def _create_scenario_normal_server(
coreclrs: Mapping[str, CoreclrSpecifier], options: BenchOptions, proc_count: int
) -> BenchFile:
common_config = Config(
complus_gcserver=True, complus_gcconcurrent=False, complus_gcheapcount=proc_count
)
return BenchFile(
vary=Vary.coreclr,
coreclrs=coreclrs,
options=options,
common_config=common_config,
benchmarks=_normal_benchmarks(proc_count),
scores=TYPICAL_SCORES,
)
def _create_scenario_high_memory_load(
coreclrs: Mapping[str, CoreclrSpecifier], options: BenchOptions, proc_count: int
) -> BenchFile:
common_config = Config(
complus_gcserver=True, complus_gcconcurrent=False, complus_gcheapcount=proc_count
)
# TODO: Don't specify a percent, specify an amount remaining in GB
configs: Mapping[str, Config] = {
"80pct": Config(memory_load=MemoryLoadOptions(percent=80)),
"90pct": Config(memory_load=MemoryLoadOptions(percent=90)),
}
benchmarks: Mapping[str, Benchmark] = {
"a": Benchmark(arguments=GCPerfSimArgs(tc=proc_count, tagb=40, tlgb=5, sohsi=30, sohpi=50))
}
return BenchFile(
vary=Vary.coreclr,
coreclrs=coreclrs,
options=options,
common_config=common_config,
configs=configs,
benchmarks=benchmarks,
scores=LOW_MEMORY_SCORES,
)
def _create_scenario_low_memory_container(
coreclrs: Mapping[str, CoreclrSpecifier], options: BenchOptions, proc_count: int
) -> BenchFile:
# In a small container, coreclr should choose a low heap count for itself
common_config = Config(
complus_gcserver=True,
complus_gcconcurrent=True,
complus_gcheapcount=proc_count,
# Remember, coreclr multiplies container size by 0.75 to get hard limit.
container=TestConfigContainer(memory_mb=600),
)
benchmarks: Mapping[str, Benchmark] = {
"tlgb0.2": Benchmark(
arguments=GCPerfSimArgs(tc=proc_count, tagb=80, tlgb=0.2, sohsi=30, sohpi=50)
)
}
return BenchFile(
vary=Vary.coreclr,
coreclrs=coreclrs,
options=options,
common_config=common_config,
benchmarks=benchmarks,
scores=LOW_MEMORY_SCORES,
)
@with_slots
@dataclass(frozen=True)
class SuiteDiffArgs:
path: Path = argument(name_optional=True, doc=SUITE_PATH_DOC)
max_iterations: Optional[int] = argument(default=None, doc=MAX_ITERATIONS_FOR_ANALYZE_DOC)
sample_kind: SampleKind = argument(default=0, doc=SAMPLE_KIND_DOC)
def suite_diff(args: SuiteDiffArgs) -> None:
suite = load_yaml(SuiteFile, args.path)
bench_files = [parse_bench_file(args.path.parent / bf) for bf in suite.bench_files]
for bench_file in bench_files:
print(f"\n=== {bench_file.path} ===\n")
run_metrics: RunMetrics = get_run_metrics_for_diff(
include_summary=True, sort_by_metric=None, run_metrics=()
)
diffables = get_diffables(
traces=ProcessedTraces(),
paths=(bench_file.path,),
run_metrics=run_metrics,
machines_arg=None,
vary=None,
test_where=None,
sample_kind=args.sample_kind,
max_iterations=args.max_iterations,
)
# Print a summary -- write detailed diff to a file
print_diff_score_summary(diffables)
txt = Path(str(bench_file.path) + ".diff.txt")
# Show summary too
doc = show_diff_from_diffables(
diffables,
metrics_as_columns=False,
sort_by_metric=None,
min_difference_pct=0,
sample_kind=0,
include_summary=True,
)
handle_doc(doc, OutputOptions(txt=txt))
SUITE_COMMANDS: CommandsMapping = {
"suite-create": Command(
kind=CommandKind.suite, fn=suite_create, doc="Generate the default test suite."
),
"suite-diff": Command(
kind=CommandKind.suite,
fn=suite_diff,
doc="""
Runs 'diff' on all tests in a suite.
Outputs detailed diffs to files, and a brief summary on stdout.
""",
),
"suite-run": Command(kind=CommandKind.suite, fn=suite_run, doc="Run all tests in a suite."),
"suite-run-command": Command(
hidden=True, kind=CommandKind.suite, fn=suite_run_command, doc="WIP"
),
}
|
Microstructural Analysis of Fractured Orthopedic Implants In this paper, fracture behavior of four types of implants with different geometries (pure titanium locking plate, pure titanium femoral implant, Ti-6Al-4V titanium alloy pelvic implant, X2CrNiMo18 14-3 steel femoral implant) was studied in detail. Each implant fractured in the human body. The scanning electron microscopy (SEM) was used to determine the potential cause of implants fracture. It was found that the implants fracture mainly occurred in consequence of mechanical overloads resulting from repetitive, prohibited excessive limb loads or singular, un-intendent, secondary injures. Among many possible loading types, the implants were subjected to an excessive fatigue loads with additional interactions caused by screws that were mounted in their threaded holes. The results of this work enable to conclude that the design of orthopedic implants is not fully sufficient to transduce mechanical loads acting over them due to an increasing weight of treated patients and much higher their physical activity. Introduction Stainless steels and titanium alloys are widely used as implant materials in orthopedic surgery due to their good biocompatibility, corrosion resistance and durability. These materials are suitable for the production of biocomponents used in medicine due to their relatively high biotolerance, which means that they are not toxic and do not induce acute nor chronic immune reactions in organs and surrounding viable tissues. One of the most relevant in vitro biocompatibility tests verifies capabilities of the living cells to multiply and migrate on the surface of implant material. Austenitic chromium-nickel steel doped with molybdenum (X2CrNiMo18 14-3, also known as 316L) belongs to materials most extensively used in medicine. Nowadays, it is used to manufacture surgical instruments, surgical and cardiologic implants and joint prostheses. Cr-Ni-Mo steels are relatively cheap and are characterized by relatively low resistance to crevice corrosion. The corrosion rate strongly depends on the properties of its passive layer, formed by thin oxide film (Cr 2 O 3 ) containing small amounts of Co and Mo oxides. Their mechanical properties could be tailored during cold working. Chromium-based steels have been widely used for the production of biocomponents. Titanium and its alloys are well-tolerated in-between viable tissues. They are increasingly used in orthopedics as a material for production of trauma implants, including plates, screws, intramedullary nails, external fixators and joint prostheses. They are also used to manufacture surgical instruments. Commercially pure titanium (Cp Ti) is believed to be among the most biocompatible metals due to its capability to form chemically stable and inert oxide layer. The most important factors characterizing titanium and its alloys include low level of electric conductivity, high corrosion resistance, thermodynamic stability Materials and Methods Four different, exemplary implants were chosen out from 18 parts collected during last two years anteceding coronavirus pandemics to validate the observations independently from the type of an implant, its producer and an alloy itself. The study is focused on implants dedicated for the stabilization of long bone fractures, such as those that could be used temporarily, and possibly removed, when the fracture is healed; implants intended for bones subjected to mechanical loads in highest degree and implants representing materials the most often used for orthopedic purposes: steel, pure titanium and its alloy (Ti-6Al-4V). Thus, four different parts were investigated: 1. Pure Titanium Plates The chemical composition of a titanium locking plate is presented in Table 1. The implant's fracture zone was subjected to observations using a scanning electron microscope ( Figure 2b). All implants served for the stabilization of bone fractures and underwent breakage during the treatment. After removal, the microstructural analysis was performed using Jeol J0L6360 LA scanning electron microscope (Jeol, Tokyo, Japan) with energy-dispersive spectroscopy (EDS) attachment (Oxford Instruments, Oxford, UK). Samples were obtained from the implant within the fracture zone. The fracture area was analyzed on both sides of the broken implant. Pure Titanium Plates The chemical composition of a titanium locking plate is presented in Table 1. The implant's fracture zone was subjected to observations using a scanning electron microscope ( Figure 2b). Figure 2a presents a general view of the fracture of exploited titanium implant with marked fracture zones characteristic for fatigue damage: (1; Figure 2a) the focus of the crack initiated around the inclusions with characteristically smoothed surface and the (2; Figure 2a) zone of fatigue breakthrough characterized by a coarse-grained structure (3; Figure 2a) residual area. Nucleation of cracks was initiated on the side part of the implant and was followed by a progressive crack development (Figure 2b) in consequence of cyclic loading. A further microscopic analysis performed on the fracture zone showed analogical fracture characteristics throughout the sample surface, as well as the same nucleation points and fracture paths. Nucleation of the crack (4; Figure 2c), as well as their development (5; Figure 2c), can be observed. Crack development led to the material fracture due to the friction between surfaces of the crack during crack propagation. Typical fatigue behavior of the material (Figure 3b) with the direction of crack propagation (6; Figure 2d), as well as visible traces of inclusions (7; Figure 2d), were observed. The mechanism of the implant's fracture points to its mixed nature due to the presence of both plastic and brittle zones. On the fracture surface, cavities and extrusions (Figure 3a), characteristic for plastic deformation and cracks at the grain boundaries that are typical for brittle fracture, were observed (Figure 3b). Analysis of the chemical composition of the implant material ( Figure 3b, point 1) and inclusions in the implant's fracture areas (marked with arrows, Figure 3a,b, point 2) using Nucleation of cracks was initiated on the side part of the implant and was followed by a progressive crack development (Figure 2b) in consequence of cyclic loading. A further microscopic analysis performed on the fracture zone showed analogical fracture characteristics throughout the sample surface, as well as the same nucleation points and fracture paths. Nucleation of the crack (4; Figure 2c), as well as their development (5; Figure 2c), can be observed. Crack development led to the material fracture due to the friction between surfaces of the crack during crack propagation. Typical fatigue behavior of the material (Figure 3b) with the direction of crack propagation (6; Figure 2d), as well as visible traces of inclusions (7; Figure 2d), were observed. The mechanism of the implant's fracture points to its mixed nature due to the presence of both plastic and brittle zones. On the fracture surface, cavities and extrusions (Figure 3a), characteristic for plastic deformation and cracks at the grain boundaries that are typical for brittle fracture, were observed ( Figure 3b). Nucleation of cracks was initiated on the side part of the implant and was followed by a progressive crack development (Figure 2b) in consequence of cyclic loading. A further microscopic analysis performed on the fracture zone showed analogical fracture characteristics throughout the sample surface, as well as the same nucleation points and fracture paths. Nucleation of the crack (4; Figure 2c), as well as their development (5; Figure 2c), can be observed. Crack development led to the material fracture due to the friction between surfaces of the crack during crack propagation. Typical fatigue behavior of the material ( Figure 3b) with the direction of crack propagation (6; Figure 2d), as well as visible traces of inclusions (7; Figure 2d), were observed. The mechanism of the implant's fracture points to its mixed nature due to the presence of both plastic and brittle zones. On the fracture surface, cavities and extrusions (Figure 3a), characteristic for plastic deformation and cracks at the grain boundaries that are typical for brittle fracture, were observed ( Figure 3b). Analysis of the chemical composition of the implant material ( Figure 3b, point 1) and inclusions in the implant's fracture areas (marked with arrows, Figure 3a,b, point 2) using Analysis of the chemical composition of the implant material ( Figure 3b, point 1) and inclusions in the implant's fracture areas (marked with arrows, Figure 3a,b, point 2) using the EDS technique showed characteristic peaks of oxygen and aluminum, clearly indicating that the inclusions consisted of aluminum oxide Al 2 O 3 (Figures 3 and 4). These precipitates, which are characterized by extremely high hardness, serve as stress concentrators during loading. Stress components allocated around them make the crack initiation much easier in those areas rather than in homogeneous material. The content of other elements in the inclusions may indicate the formation of precipitates as a result of reaction with the human body. the EDS technique showed characteristic peaks of oxygen and aluminum, clearly indicating that the inclusions consisted of aluminum oxide Al2O3 (Figures 3 and 4). These precipitates, which are characterized by extremely high hardness, serve as stress concentrators during loading. Stress components allocated around them make the crack initiation much easier in those areas rather than in homogeneous material. The content of other elements in the inclusions may indicate the formation of precipitates as a result of reaction with the human body. Pure Titanium Femoral Implant The chemical composition of the titanium alloy locking plate is presented in Table 2. Visual inspection performed on the femoral implant surface ( Figure 5) allows to observe numerous local plastic deformation areas of the material around the threaded holes (Figure 5a,b, areas marked with arrows). Although no material losses were found on the screws, it was assumed that they led to permanent deformation of the implant. Figure 5d presents a macroscopic view of the fracture surfaces found near the threaded hole. These fracture surfaces were characterized by high smoothness, which may indicate that longterm friction occurred between them. As the fracture of the implant was found in this particular area, scanning election microscopy was used to investigate the potential cause of failure. The SEM observations were performed on the inner part of the thread. Pure Titanium Femoral Implant The chemical composition of the titanium alloy locking plate is presented in Table 2. Visual inspection performed on the femoral implant surface ( Figure 5) allows to observe numerous local plastic deformation areas of the material around the threaded holes (Figure 5a,b, areas marked with arrows). Although no material losses were found on the screws, it was assumed that they led to permanent deformation of the implant. Figure 5d presents a macroscopic view of the fracture surfaces found near the threaded hole. These fracture surfaces were characterized by high smoothness, which may indicate that longterm friction occurred between them. As the fracture of the implant was found in this particular area, scanning election microscopy was used to investigate the potential cause of failure. The SEM observations were performed on the inner part of the thread. Table 2. Chemical composition of the pure titanium femoral implant. Element Ti Impurities Wt % 99.12 00.88 Figure 6 presents a general view of the crack in the inner thread of a titanium implant. The fracture was initiated in the center part of the thread (Figure 6a) and propagated deeply into the material as shown with the arrow (Figure 6b), leading to complete fracture of the implant, as shown in Figure 7. Furthermore, in the area of the dominant crack ( Figure 6a, marked with the arrow), a 300-m microcrack was observed (Figure 6c,d, marked with the arrow). Based on the observations performed on the fracture surfaces (Figure 8), it was found that, after the implant fracture, the two separated parts interacted with each other. The observed smooth surfaces (Figure 8a,b) are characteristic for friction processes, which result in surface wear of the material. The analysis of the fracture surface and the nature of the fracture allow us to state that the material was damaged due to the interaction of the implant and the screw that fastened the implant and the femur. Pieces of bone tissue were observed in the structure of the fracture, which was confirmed by the analysis of the chemical composition presented in Table 3. (Figure 6b), leading to of the implant, as shown in Figure 7. Furthermore, in the area of the do ure 6a, marked with the arrow), a 300-m microcrack was observed (Fig with the arrow). Based on the observations performed on the fracture s it was found that, after the implant fracture, the two separated parts in other. The observed smooth surfaces (Figure 8 a,b) are characteristic for which result in surface wear of the material. The analysis of the fractu nature of the fracture allow us to state that the material was damaged tion of the implant and the screw that fastened the implant and the fem tissue were observed in the structure of the fracture, which was confirm of the chemical composition presented in Table 3. (Figure 6b), leading of the implant, as shown in Figure 7. Furthermore, in the area of the d ure 6a, marked with the arrow), a 300-m microcrack was observed (F with the arrow). Based on the observations performed on the fracture it was found that, after the implant fracture, the two separated parts i other. The observed smooth surfaces (Figure 8 a,b) are characteristic fo which result in surface wear of the material. The analysis of the fract nature of the fracture allow us to state that the material was damaged tion of the implant and the screw that fastened the implant and the fe tissue were observed in the structure of the fracture, which was confir of the chemical composition presented in Table 3. Ti-6Al-4V Titanium Alloy Ti-6Al-4V titanium alloy femoral gamma nail with a full set of its components ( Figure 9) was subjected to visual inspection at first and, then, to microscopic observations. The chemical composition of the Ti-6Al-4V titanium alloy implant is presented in Table 4. Macroscopic examination of the implant surface allowed us to observe numerous local plastic deformations of the threads ( Figure 10, marked with arrows) and the areas around the threaded holes ( Figure 11, marked with arrows). These deformations caused the permanent deformation of the telescopic lag screw (part 7 in Figure 9) that was attached to the nail passing through the hole in the nail's shaft (Figure 10a). Additionally, propagation of the longitudinal crack from the shaft towards the thread was observed on the threaded surface ( Figure 10b, marked with arrow). Simultaneously, the threaded part itself was plastically deformed (Figure 10c, marked with arrow), prohibiting damage and disabling its further usage. Permanent deformation of the inner areas of the threaded holes and their locking screws are presented and marked with arrows in Figure 11a-d. A significant abrasive wear of the thread was observed, probably due to the interaction of the screw and thread itself. On the other hand, traces of material wear in the unthreaded hole were observed as a result of long-term friction (Figure 11a,b, marked with arrows) of the telescopic screw of the nail's shaft. Ti-6Al-4V Titanium Alloy Ti-6Al-4V titanium alloy femoral gamma nail with a full set of its components ( Figure 9) was subjected to visual inspection at first and, then, to microscopic observations. The chemical composition of the Ti-6Al-4V titanium alloy implant is presented in Table 4. Macroscopic examination of the implant surface allowed us to observe numerous local plastic deformations of the threads ( Figure 10, marked with arrows) and the areas around the threaded holes ( Figure 11, marked with arrows). These deformations caused the permanent deformation of the telescopic lag screw (part 7 in Figure 9) that was attached to the nail passing through the hole in the nail's shaft (Figure 10a). Additionally, propagation of the longitudinal crack from the shaft towards the thread was observed on the threaded surface ( Figure 10b, marked with arrow). Simultaneously, the threaded part itself was plastically deformed (Figure 10c, marked with arrow), prohibiting damage and disabling its further usage. Permanent deformation of the inner areas of the threaded holes and their locking screws are presented and marked with arrows in Figure 11a-d. A significant abrasive wear of the thread was observed, probably due to the interaction of the screw and thread itself. On the other hand, traces of material wear in the unthreaded hole were observed as a result of long-term friction (Figure 11a,b, marked with arrows) of the telescopic screw of the nail's shaft. Ti-6Al-4V Titanium Alloy Ti-6Al-4V titanium alloy femoral gamma nail with a full set of its components ( Figure 9) was subjected to visual inspection at first and, then, to microscopic observations. The chemical composition of the Ti-6Al-4V titanium alloy implant is presented in Table 4. Macroscopic examination of the implant surface allowed us to observe numerous local plastic deformations of the threads ( Figure 10, marked with arrows) and the areas around the threaded holes ( Figure 11, marked with arrows). These deformations caused the permanent deformation of the telescopic lag screw (part 7 in Figure 9) that was attached to the nail passing through the hole in the nail's shaft (Figure 10a). Additionally, propagation of the longitudinal crack from the shaft towards the thread was observed on the threaded surface ( Figure 10b, marked with arrow). Simultaneously, the threaded part itself was plastically deformed (Figure 10c, marked with arrow), prohibiting damage and disabling its further usage. Permanent deformation of the inner areas of the threaded holes and their locking screws are presented and marked with arrows in Figure 11a-d. A significant abrasive wear of the thread was observed, probably due to the interaction of the screw and thread itself. On the other hand, traces of material wear in the unthreaded hole were observed as a result of long-term friction (Figure 11a,b, marked with arrows) of the telescopic screw of the nail's shaft. aterials 2021, 14, x FOR PEER REVIEW Figure 9. General view of the broken femoral nail's shaft; its proximal and dista a set of assembling parts: proximal part of the broken nail's shaft ; blocking bo ; two self-tapping locking screws (5 and 6); telescopic lag screw ; end cap ; cap (b). Figure 12a during were subjected to microscopic observations and presented in Figure 12 numbers given. The crack was initiated at the outer part of the hole (o 12b,d) and propagated deeply into the material, leading to its complet tionally, the 1.25-mm and 0.5-mm-long cracks were observed below the ure 12c), as well as micropores found on the edge of fractured implant (F on microscopic observations, it could be concluded that, after the implan separated parts rubbed on one another, producing characteristic smoo parts succumbing to friction. Inspection of the components of the implan its fracture allowed us to conclude that the implants broke in consequenc Figure 12a during visual inspection were subjected to microscopic observations and presented in Figure 12 b-e according to numbers given. The crack was initiated at the outer part of the hole (on its edge- Figure 12b,d) and propagated deeply into the material, leading to its complete fracture. Additionally, the 1.25-mm and 0.5-mm-long cracks were observed below the main crack (Figure 12c), as well as micropores found on the edge of fractured implant (Figure 12e). Based on microscopic observations, it could be concluded that, after the implant's break, both its separated parts rubbed on one another, producing characteristic smooth surfaces of the parts succumbing to friction. Inspection of the components of the implant and surfaces of its fracture allowed us to conclude that the implants broke in consequence of the overloads that transmitted excessive mechanical stresses from the bone to the implant's shaft by attaching the screws. X2CrNiMo18-14-3 Steel Femoral Implant The steel femoral condylar plate ( Table 5) was made of X2CrNiMo18-14-3 (D) steel (316L, PN-ISO 5832-1 Standard ). A macroscopic inspection ( Figure 13) allowed us to observe the foci of the fracture in the area of the threaded hole ( Figure 14). Smooth fracture surfaces are characteristic for the friction processes that indicated that two separated parts of the implant interacted between themselves after its break. Additionally, visible traces of wear were observed on one of the implant's mounting screws with damage of the 35mm-long segment of its thread. X2CrNiMo18-14-3 Steel Femoral Implant The steel femoral condylar plate ( Table 5) was made of X2CrNiMo18-14-3 (D) steel (316L, PN-ISO 5832-1 Standard ). A macroscopic inspection ( Figure 13) allowed us to observe the foci of the fracture in the area of the threaded hole ( Figure 14). Smooth fracture surfaces are characteristic for the friction processes that indicated that two separated parts of the implant interacted between themselves after its break. Additionally, visible traces of wear were observed on one of the implant's mounting screws with damage of the 35-mm-long segment of its thread. A visual analysis (Figure 15a) revealed a characteristic crater formed around the threaded hole that occurred due to permanent deformation of the material caused by the transmission of excessive forces by the screw mounted in it. Multiple cracks propagating from the crater along the edge of the hole could also be observed (Figure 15b,c). Moreover, numerous scratches inside the nonthreaded part of the hole were observed, most likely caused by screwing the bolt into the hole or the impact of the bolt head on the side surface of the hole (Figure 15d). The edges of the second part of the fractured implant were also characterized by numerous cracks (Figure 15e-g). Analogically, smooth surfaces of the fracture edge caused by friction of one part of the broken implant over another were observed (Figure 15h). A visual analysis ( Figure 15a) revealed a characteristic crater formed around the threaded hole that occurred due to permanent deformation of the material caused by the transmission of excessive forces by the screw mounted in it. Multiple cracks propagating from the crater along the edge of the hole could also be observed (Figure 15b,c). Moreover, numerous scratches inside the nonthreaded part of the hole were observed, most likely caused by screwing the bolt into the hole or the impact of the bolt head on the side surface of the hole (Figure 15d). The edges of the second part of the fractured implant were also characterized by numerous cracks (Figure 15e-g). Analogically, smooth surfaces of the fracture edge caused by friction of one part of the broken implant over another were observed (Figure 15h). A visual analysis ( Figure 15a) revealed a characteristic crater formed around the threaded hole that occurred due to permanent deformation of the material caused by the transmission of excessive forces by the screw mounted in it. Multiple cracks propagating from the crater along the edge of the hole could also be observed (Figure 15b,c). Moreover, numerous scratches inside the nonthreaded part of the hole were observed, most likely caused by screwing the bolt into the hole or the impact of the bolt head on the side surface of the hole (Figure 15d). The edges of the second part of the fractured implant were also characterized by numerous cracks (Figure 15e-g). Analogically, smooth surfaces of the fracture edge caused by friction of one part of the broken implant over another were observed (Figure 15h). Microscopic observations of the inner area of the fracture surface revealed dominant cracks in the inner aspect of the thread ( Figure 16, marked with arrow). The crack, which initiated at the upper part of the thread, propagating deeply into the structure of the implant up to its outer surface, leading to its fracture (Figure 16a, marked with arrow). In addition, 100-300-m microcracks could be observed on the threads and around the unthreaded parts of the hole as marked with arrows in Figure 16c-d. Bolts were subjected to an additional analysis (Figure 17), showing a significant degree of wear in one of them. The entire thread, about 30 mm long, was significantly damaged. Inspection of the fracture surface and the nature of the fracture of the implant allowed us to state that the material was damaged due to screwing it with excessive force to the plate. Microscopic observations of the inner area of the fracture surface revealed dominant cracks in the inner aspect of the thread ( Figure 16, marked with arrow). The crack, which initiated at the upper part of the thread, propagating deeply into the structure of the implant up to its outer surface, leading to its fracture (Figure 16a, marked with arrow). In addition, 100-300-m microcracks could be observed on the threads and around the unthreaded parts of the hole as marked with arrows in Figure 16c,d. Bolts were subjected to an additional analysis (Figure 17), showing a significant degree of wear in one of them. The entire thread, about 30 mm long, was significantly damaged. Inspection of the fracture surface and the nature of the fracture of the implant allowed us to state that the material was damaged due to screwing it with excessive force to the plate. A microstructural analysis of the fractured implants was performed in this paper. It should be mentioned, however, that it is hard to compare the results obtained in this work with the literature, since the knowledge on the implant fracture and its potential causes has not been reported in detail yet. More importantly, the complex loading states that the implants were subjected to were different for individual patients. A microstructural analysis of the fractured implants was performed in this paper. It should be mentioned, however, that it is hard to compare the results obtained in this work with the literature, since the knowledge on the implant fracture and its potential causes has not been reported in detail yet. More importantly, the complex loading states that the implants were subjected to were different for individual patients. Summary and Concluding Remarks The paper focused on the analysis of orthopedic trauma implant failure, which led to bone fracture destabilization. Implant fractures complicate orthopedic procedures, further disabling effective interventions. Clinical practice shows that they occur in a limited number of patients that usually present a fracture healing process or those who, due to unintended secondary injuries or in consequence of the transgression of postoperative recommendations, succumb to secondary limb traumatization. In those cases, implant fractures and bone break destabilization point to the causative impact of accidental or cumulative mechanical overloads of an implant. Based on our investigations, it could be presumed that implant fractures are caused by: 1. material impurities, where super-hard aluminum oxide acts as a mechanical strain concentrator, serving as the center of cracks propagating between the structure of an implant, 2. the deficient adjustment of implant designs to the stresses acting on it, 3. improper technology of implant production, especially in the case of drilling and threading of the holes serving as mounting areas of screws attaching it to the bone, 4. its mechanical overloads caused by excessive forces acting on limbs treated with this particular implant. Nica et al. analyzed the structure and remnants of implants removed due to their breaks and further reported that an inappropriate surgical technique and structural material flaws are responsible for most of the incidences of implant failures. It was concluded that the improvement of material quality and implementation of higher-quality standards would improve treatment results. The only explanation of the discrepancy of their observation with ours comes from the fact that they analyzed implants produced outside Europe. In the materials investigated, implant fractures most probably occurred in consequence to mechanical overloads resulting from repetitive, prohibitively excessive limb loads or singular, unintended secondary injures. The first mechanism subjected the implant to relatively mild but repetitive loads, which, when summarized, damaged it. The second one fractured due to forces acting with an amplitude enormously exceeding those that the implant could withstand. Based on a finite element analysis of the broken, steel condylar plate, Gervais et al. concluded that regular walks alone create mechanical forces sufficient to break the implant. The crack, initiated at the area of stress concentration due to repeated bending loads, propagated to deeper parts, resulting in the implant's fracture. An analogous observation was reported by Hou et al., where the failure mechanisms of screws manufactured by reputable producers were analyzed. The results clearly indicated that overload plays a much more important role in implant fractures than material flaws themselves. For the observations given by our predecessors, we can add an additional onenamely, that the current design of orthopedic implants is not fully suitable to transduce mechanical loads acting on them due to the increasing weight of treated patients and, much more important, their physical activity. Moreover, the design of implants leads to the generation of stress concentrators that serve as initiators of cracks. An analysis of the surfaces of the fractured implants revealed that, regardless of the initial material state and geometry, the fracture was caused by the concentration of the stress forces in its holes, including the threaded parts, where the crack initiated and propagated within the material. Most probably, the implants were subjected to an excessive fatigue load with additional effects caused by the interaction between the screws and threaded holes. The very tight connection between the screws and threated holes of the implant initiated cracks that led to significant wear between the working surfaces. The wear of threads of screws and plates might reduce the rigidity of the connection between the bone and the implant, thus enhancing the temporary loads between them that promoted the propagation of the implant's cracks. Taking into account the above observations, in order to reduce the risk of implant fracture, one can require to increase the thickness of their dimensions, especially in the areas with holes. |
import { ScannedProject } from '@snyk/cli-interface/legacy/common';
import { MonitorMeta } from '../types';
import { config as userConfig } from '../user-config';
export const IMAGE_SAVE_PATH_OPT = 'imageSavePath';
export const IMAGE_SAVE_PATH_ENV_VAR = 'SNYK_IMAGE_SAVE_PATH';
export function isContainer(scannedProject: ScannedProject): boolean {
return scannedProject.meta?.imageName?.length;
}
export function getContainerTargetFile(
scannedProject: ScannedProject,
): string | undefined {
return scannedProject.targetFile;
}
export function getContainerName(
scannedProject: ScannedProject,
meta: MonitorMeta,
): string | undefined {
let name = scannedProject.meta?.imageName;
if (meta['project-name']?.length) {
name = meta['project-name'];
}
if (scannedProject.targetFile) {
// for app+os projects the name of project is a mix of the image name
// with the target file (if one exists)
return name + ':' + scannedProject.targetFile;
} else {
return name;
}
}
export function getContainerProjectName(
scannedProject: ScannedProject,
meta: MonitorMeta,
): string | undefined {
let name = scannedProject.meta?.imageName;
if (meta['project-name']?.length) {
name = meta['project-name'];
}
return name;
}
export function getContainerImageSavePath(): string | undefined {
return (
process.env[IMAGE_SAVE_PATH_ENV_VAR] ||
userConfig.get(IMAGE_SAVE_PATH_OPT) ||
undefined
);
}
|
The Aam Aadmi Party (AAP) of Delhi Chief Minister Arvind Kejriwal today lashed out at the Congress party and its president Rahul Gandhi for not reaching out to its leader to seek support for BK Hariprasad – the opposition's nominee for the Rajya Sabha Deputy Chairman post.
The Aam Aadmi Party (AAP) of Delhi Chief Minister Arvind Kejriwal today lashed out at the Congress party and its president Rahul Gandhi for not reaching out to its leader to seek support for BK Hariprasad – the opposition’s nominee for the Rajya Sabha Deputy Chairman post. Speaking to news agency ANI, party MP Sanjay Singh said that the Congress is the ‘biggest obstacle’ in the opposition’s unity. He said that the 3 AAP MPs abstained from voting because the Congress had not sought support from them and that it was not possible for them to vote for a BJP-backed candidate.
“Looking at Congress’ attitude, we have decided to abstain from voting for Rajya Sabha Deputy Chairman, Congress is the biggest obstacle in Opposition’s unity,” he said.
The Aam Aadmi Party has 3 MPs in the Rajya Sabha. The MPs were not present on the floor of the House today at the time of voting to elect a new Deputy Chairman. Yesterday, Sanjay Singh had said that Congress president Rahul Gandhi should speak to AAP’s national convenor Arvind Kejriwal if he wants its 3 MPs to support the candidature of BK Hariprasad. He had also informed that Bihar CM Nitish Kumar had called Kejriwal to seek ‘our support’ for Harivansh Narayan Singh – the BJP-led NDA’s candidate.
“When Nitish Kumar can seek support for his candidate, why not Rahul Gandhi? When a leader can’t ensure his own candidate’s victory, how can he make tall claims of making his party victorious?” he asked.
Earlier this morning, BJP-led NDA’s candidate Harivansh Narayan Singh defeated Congress’ BK Hariprasad to win the Rajya Sabha election for the Deputy Chairman post. While Harivansh got 125 votes, Hariprasad managed to secure only 105 votes. |
Education-Based Intervention to Prevent Catheter-Associated Bloodstream Infection TO THE E D I T O R In their recent article, Warren and colleagues have described a multicenter, education-based intervention to prevent catheter-associated blood stream infections. We are concerned that the conclusions may not be warranted given the way the data were presented and analyzed. The authors conceded that the preintervention-postintervention design may be limited because of other unmeasured factors that may have accounted for the changes in outcome. Use of this type of study is reasonable in circumstances in which randomization is not possible, but the data presentation and analysis were not optimal in this study. Data in each time period were aggregated, which does not allow an assessment of the natural history of the outcomes, particularly in the preintervention period. For example, rates of catheterrelated blood stream infections may have been decreasing anyway because of some other factor, and the intervention may have been coincidental in this decline. Analysis using segmented regression of interrupted time series, which gives an indication of whether there is a difference in the preintervention and postintervention slopes of the outcome, may have provided more information. Graphic representation of the rates or proportions of each outcome per time period (for example, month or quarter) or an analysis for trend would have allowed the reader to assess the pattern of the outcome before and after the intervention. Figure 1 in the article by Warren et al. presents data per time period for the postintervention period. The fact that the reduction in catheter-associated blood stream infections was not significant until later in the postintervention phase may have reflected the time it took to change healthcare workers' behavior, as suggested by the authors, but may also have reflected a natural fluctuation of rates. A similar breakdown of rates in the preintervention period may have helped to assess this. It is not clear in the Methods section whether any of the intervention went beyond the initial 3 month period or whether this was merely the time required for its full implementation. |
The role of CD2AP in the Pathogenesis of Alzheimer's Disease Alzheimers disease (AD) is the most common neurodegenerative disease characterized by irreversible decline in cognition with unclear pathogenesis. Recently, accumulating evidence has revealed that CD2 associated protein (CD2AP), a scaffolding molecule regulates signal transduction and cytoskeletal molecules, is implicated in AD pathogenesis. Several single nucleotide polymorphisms (SNPs) in CD2AP gene are associated with higher risk for AD and mRNA levels of CD2AP are decreased in peripheral lymphocytes of sporadic AD patients. Furthermore, CD2AP loss of function is linked to enhanced A production, Tau-induced neurotoxicity, abnormal neurite structure modulation and reduced blood-brain barrier integrity. This review is to summarize the recent discoveries about the genetics and known functions of CD2AP. The recent evidence concerning the roles of CD2AP in the AD pathogenesis is summarized and CD2AP can be a promising therapeutic target for AD. Alzheimer's disease (AD), the most common form of dementia, becomes more prevalent as the population ages. Both genetic and environmental factors contribute to its risk. Amyloid precursor protein (APP), presenilin 1 (PSEN1), and presenilin 2 (PSEN2) are the major causative genes of familial AD (FAD). Apolipoprotein E (APOE) 4 allele has been consistently recognized to increase susceptibility to sporadic AD (SAD). During the last decades, genome-wide association studies (GWAS) have been identified a series of single nucleotide polymorphisms (SNPs) that associated with late-onset AD (LOAD). However, the mechanisms that lead to synapses degeneration and neuron death remain elusive. Investigations have identified multiple perturbations of cellular function in AD neurons, such as excessive amyloid protein (A) deposition and neurofibrillary tangles, impaired mitochondrial function, abnormal calcium metabolis m and altered axonal transport. Several SAD risk genes have been studied in both cell and animal models, providing more insights into the cellular mechanisms underlying SAD neuron degeneration. Recently, several studies reported that SNPs in the CD2 associated protein (CD2AP) gene were associated with SAD. Furthermore, emerging evidence has demonstrated that CD2AP loss of function may play an important role in the SAD pathogenesis. Here, we review the recent discoveries about the genetics and known functions of CD2AP. The recent evidence concerning the roles of CD2AP in the AD pathogenesis is summarized and CD2AP can be a promising therapeutic target for AD. 3). B) The protein has three consecutive SH3 domains at the amino terminus. The middle region is a proline-rich sequence. The carboxy terminus contains a duplex helical structural region where has a binding site for the actin cytoskeleton. Biochemical properties of CD2AP CD2AP was first identified in 1998 and was named for its ability to bind to CD2 and promote CD2 aggregation to stabilize the interaction between T cells and antigen presenting cells. CD2AP gene is located on chromosome 6 (6p12.3) and contains 18 exons. The encoded protein (CD2AP) is an adaptor protein consisting of 639 amino acid residues with a molecular weight of 80 KD. The protein has three consecutive SH3 domains at the amino terminus. Studies have shown that SH3 domains can interact with many signal transduction and cytoskeletal molecules. The middle region is a prolinerich sequence. The carboxy terminus contains a duplex helical structural region and has a binding site for the actin cytoskeleton. The chromosome location of CD2AP and schematic representation of the structural domains of CD2AP was shown in Figure 1. This protein has been well studied concerning its role in dynamic actin remodeling and membrane trafficking during endocytosis and cytokinesis. CD2AP is ubiquitously expressed with higher levels in immune cells, epithelial cells as well as neurons. Previous studies have shown that CD2AP is necessary for signaling at the slit diaphragm of kidney. Haploinsufficiency or homozygous mutation of the human CD2AP leads to renal disease. Mice lacking CD2AP has phenotype of congenital nephrotic syndrome because of decreased podocytes foot process integrity and died of severe proteinuria at 6-7 weeks. CD2AP has also been shown to associate with several scaffold and focal adhesion proteins such as F-actin and p130CAS, supporting a critical role for CD2AP in specialized cell contacts. In addition, CD2AP was reported to directly bind to p53 protein in the cytoplasm. Recently, a study has demonstrated that CD2AP in CD4 T cells modulates differentiation of follicular helper T cells during chronic lymphocytic choriomeningitis virus infection. Another study has showed that CD2AP contributes to hepatitis C virus propagation and steatosis by disrupting insulin signaling. Genetics of CD2AP gene in AD In 2011, Hollingworth et al. first reported in a large staged GWAS of AD that a specific SNP with CD2AP (rs9349407) located in intron 1, is significantly associated with AD risk in both stages (P1 = 8.0 10 −4, OR1 = 1.11; P2 = 8.6 10 −9, OR2 = 1.11, respectively) in Caucasian population. Another study performed by Naj and colleagues using a 3-stage design GWAS also observed this result (rs9349407; P=8.610-9). After then, numerous replicated studies were performed to confirm this result. However, inconformity of the replication results occurred in a study of meta-analyses comprising 6 case-control series of 2634 LOAD and 4201 controls in Caucasian populations which found no relationship between CD2AP variant (rs9349407, OR 0.97, p=0.56) and LOAD. Similarly, no association was found between rs9349407 or rs9296559 and LOAD risk in northern Chinese Han population and Korean population by Tan et al. and Chung et al., respectively. Recent year, another meta-analysis study selected 54, 936 subjects from East Asian, American, Canadian and European populations showed the significantly association between the SNP (rs9349407) of CD2AP and AD. The studies performed to assess the association between rs9349407 and AD has been summarized by Chen et al.. Recently, our group performed a two-stage study showed that the C allele of rs9296559 increased the risk of SAD (P = 7.6910-9, OR = 1.77). In addition, rs9349407 in CD2AP combined with rs11218343 in SORL1, rs17125944 in FERMT2, rs6859 in PVRL2, rs157580 and rs2075650 in TOMM40 were used to calculate genetic risk score (GRS) in predicting SAD risk, and the results showed that the area under the receiver operating characteristic curve (AUC) for discriminating cases from controls was 0.58 for GRS, 0.60 for APOE, and 0.64 for GRS and APOE. The associations between several other SNPs in CD2AP and SAD were also determined in a number of replicated studies. The associations between SAD and SNPs in CD2AP were summarized in Table 1. Role of CD2AP in amyloidogenesis CD2AP is expressed in neurons and capillaries in brain. We previously reported the gene expression of CD2AP in peripheral blood lymphocytes was decreased in Chinese patients with SAD as compared with cognitively normal controls, implying CD2AP loss of function may implicate in SAD pathogenesis. A peptides are generated via sequential cleavage of APP by -secretase and -secretase complexes during the course of its secretory pathway. Several LOAD risk genes (APOE, PICALM, BIN1, SORL1, and PLD3) have been reported to implicate in A42 production. Liao et al. reported CD2AP could affect A levels and A42/A40 ratio in vitro while its effects on A metabolism were subtle in vivo. As a part of adaptor protein complexes, CD2AP loss of function affects the sorting process in endosome-lysosome pathway. One study reported that glucose transporter 4 (Glut4) trafficking was impaired in podocytes lacking CD2AP. In addition, CD2AP plays a role in maintaining early endosome morphology and the traffic between early and late endosomes. A recent study demonstrated that CD2AP regulated A generation by a neuron-specific polarization of A in dendritic early endosomes. CD2AP could affect APP and BACE1 sorting in early endosomes by distinct mechanisms. Role of CD2AP in Tau-induced Toxicity Abnormal phosphorylated Tau protein forms into neurofibrillary tangles in brain are one of the hallmarks of AD. In a Drosophila model of AD, susceptibility genes that implicate in Tau-mediated mechanisms were screened. The results found cindr, the fly ortholog of the human CD2AP, was implicated as a modulator of Taumediated neurotoxicity. Furthermore, cindr loss of function (cindr-/-) enhances Tau-induced neuronal loss in the adult fly brain. Significantly reduced survival times were observed in cindr-/-flies. In addition, cindr-/-flies also showed reduced synaptic strength and altered shortterm plasticity. Role of CD2AP in other pathways underlying AD pathogenesis CD2AP has been extensively studied in kidney podocytes. Lacking CD2AP leads to decreased mice podocytes foot process integrity. Previous studies have reported that the podocyte major processes share lots of cell biological characteristics with neural dendrites. Recently, a study revealed that CD2AP could modulate neurite length, neurite complexity, growth cone filopodia number in neurons, and these effects were in accordance with CD2AP expression levels. CD2AP regulate collateral sprouting and structural plasticity of intact adult axons by coordinate NGF signaling. The blood-brain barrier (BBB) is a continuous endothelial membrane that separates the brain and extracellular fluid from the circulating blood in the central nervous system (CNS). The BBB breakdown and vascular degeneration plays a critical role in AD pathogenesis. For example, APOE 4-positive individuals have a reduced cerebral blood flow and increased BBB leakines s. By contrast, individuals carrying APOE 3, a protective factor of AD, show a decreased degree of BBB breakdown. CD2AP is enriched in the brain microvascular endothelial cells, an essential component of BBB. A recent study showed that CD2APdeficient mice had reduced BBB integrity, suggesting cerebrovascular roles of CD2AP could take part into its role on AD risk. CD2AP-deficient mice had mild motor and anxiety deficits and showed more susceptible to pharmacologically induced seizures. No obviously other behavioral abnormalities were observed. CD2AP and several other AD risk genes (BIN1 and PICALM) were also predicated to participate in autophagy pathway. In CD2AP −/− podocytes, one of the important proteins in autophagy signaling (p62) was upregulated, indicating the absence of CD2AP induced podocyte injury by affecting autophagy signaling. In addition, downregulation of the full-length caspase-1 was observed in podocytes lacking CD2AP. However, the specific role of CD2AP in autophagy signaling was still elusive and need to be further addressed. Neuro-inflammation is one of the pathological hallmarks of AD. Microglia is the main immune cell in the CNS. Accumulating evidences have revealed microglia dysfunction in both AD patients and mouse models. CD2AP is expressed in both neuron and microglia in the brain and postulated to be involved in immune system regulation. Identifying the role of CD2AP in microglia will help us better understand the pathogenesis of AD. The potential pathways underlying roles of CD2AP in the pathogenesis of AD were summarized in Figure 2. Conclusions CD2AP is an adaptor protein that plays important roles in regulating signal transduction and cytoskeletal molecules. Although the association between CD2AP and higher risk of AD has been well addressed, the mechanisms of CD2AP implicated in AD pathogenesis still unclear. More and more evidence reveals that CD2AP loss of function results to enhanced A metabolism, Tau-induced neurotoxicity, synapse dysfunction and abnormal neurite structure. Thus, targeting CD2AP may serve as a potential strategy for AD therapy. Elevating the expression of CD2AP in specific brain area could be a promising effective treatment. However, more accurate and detailed mechanisms by which CD2AP contributes to AD pathogenesis should be further explored. |
Evaluation of the Efficacy of Excimer Laser Ablation of Cross-Linked Porcine Cornea Background Combination of riboflavin/UVA cross-linking (CXL) and excimer laser ablation is a promising therapy for treating corneal ectasia. The cornea is strengthened by cross-linking, while the irregular astigmatism is reduced by laser ablation. This study aims to compare the efficacy of excimer laser ablation on porcine corneas with and without cross-linking. Methods and Findings The porcine cornea was de-epithelialized and treated with 0.1% riboflavin solution for 30 minutes. A half of the cornea was exposed to UVA-radiation for another 30 minutes while the controlled half of the cornea was protected from the UVA using a metal shield. Photo therapeutic keratectomy (PTK) was then performed on the central cornea. Corneal thickness of 5 paired locations on the horizontal line, ±0.5, ±1.0, ±1.5, ±2.0, and ±2.5 mm from the central spot, were measured using optical coherence tomography prior to and after PTK. The ablation depth was then determined by the corneal thickness. There was a 9% difference (P<0.001) in the overall ablation depth between the CXL-half corneas (158±22 m) and the control-half corneas (174±26 m). The ablation depths of all 5 correspondent locations on the CXL-half were significantly smaller (P<0.001). Conclusion The efficacy of the laser ablation seems to be lower in cross-linked cornea. Current ablation algorithms may need to be modified for cross-linked corneas. Introduction Keratoconus is a bilateral, non-symmetrical, progressive corneal degenerative disorder. There are many approaches available to correct the refractive errors caused by keratoconus, such as spectacles, rigid gas-permeable contact lenses, intracorneal ring segments, and photorefractive keratectomy. However, none of the treatments above have shown successful control on the progression, and it leaded to corneal transplantation in progressive cases. Seilor first introduced corneal collagen cross-linking (CXL) which is a method to biomechanically stabilize the cornea and stop the progression of keratoconus. This method has raised a new hope in treating not only keratoconus and pellucid marginal degeneration, but also laser-assisted in situ keratomileusisinduced keratectasia, radial keratotomy-induced keratectasia, keratitis, and bullous keratopathy. Even though CXL suppresses the progression, the vision does not improve much because of the remained refractive error. Therefore, the combination with photorefractive keratectomy (PRK) or phototherapeutic keratectomy (PTK) was introduced for optical regularization. There are currently two modes of the combination surgery, one using laser treatment right before CXL, and the other, based on laser treatment 6-12 months after CXL. The aim of the current study is to assess excimer laser ablation efficacy in CXL treated cornea. Preparation Thirty porcine eyes from the local abattoir (Meat Processing Plant of Wenzhou Shopping Basket Group Co., Ltd., Wenzhou, Zhejiang, China) were conserved in containers at 4uC immediately after enucleation and were used within 2 to 10 hours post-mortem. To minimize the error caused by stromal hydration, only clear corneas with intact epithelium and no initial edema were selected. To avoid potential error due to individual variations, one half of each cornea was cross-linked and the other half served as a control. Nasal-temporal comparisons were preferred to superiorinferior ones since porcine corneal thickness (CT) differs least along this line. All experiments were performed at 22uC and 60% humidity. Physiological saline solution was injected into the vitreous to maintain a stable and optimized intraocular pressure (Tn), which was estimated by digital palpation that estimated intraocular pressure by gently pressing the index finger against the cornea in this study. Each eye was then mounted in a fixation device with a 9.0 mm suction ring centered on the apex throughout the experiment to avoid eye rotation ( Figure 1). The vertical line on the suction ring separated the cornea into a CXL-half and controlhalf, while the horizontal line on the suction ring represented the direction along which the CT was measured. The intersection of the two lines was used as the central spot of pachymetry and laser ablation. To avoid any influence of light, all the eyes were stored in a dark room throughout the experiment. Corneal Collagen Cross-linking (CXL) After de-epithelialization of 9 mm central cornea, 0.1% riboflavin solution (0.1% riboflavin, 20% dextran-T-500, MEDIO-CROSS 3.0 mL isotonic solution, Kiel, Germany) was applied for 30 minutes. Riboflavin filled within the suction ring and formed a thin and intact film throughout the CXL procedure ( Figure 1). Prior to irradiation, one half of the cornea was sheltered with a metal shield (without contact) so that only the uncovered half was irradiated ( Figure 2) as only the irradiated cornea could be cross-linked. The desired irradiance of a UV lamp was calibrated using a UVA meter. The exposed half of the cornea was then irradiated by UVA for another 30 minutes with an irradiance of 3 mW/cm 2 (dose 5.4 J/cm 2 ) 5 cm away from the cornea using the 370 nm UV-lamp (UV-X, IROC AG, Zurich, Switzerland). Riboflavin film was wiped off right before pachymetry. Pachymetry Pachymetry was performed prior to and after PTK using optical coherence tomography (OCT) (RTVue-100, Optovue, Inc., Fremont, CA, USA). The horizontal line and the central spot on the suction ring were placed on the OCT's horizontal meridian and measurement center, respectively. Six OCT images were captured consecutively on each eye and the three best images, with regard to centration in both vertical and horizontal direction, as well as to absence of tilt or rotation, were selected for analysis. CT was measured at 5 pairs of equidistant locations at 60.5, 61.0, 61.5, 62.0, and 62.5 mm away from the central spot, using the RTVue's ''flap tool'' in the CL-line scan ( Figure 3). To minimize the influence of dehydration in the stroma, the time interval of the measurements was standardized. Phototherapeutic Keratectomy (PTK) The excimer laser (WaveLight Laser Technology, AG, Erlangen, Germany) ablation (400 Hz) was centered on the central spot by projecting the laser's red cross on the cross-mark of the suction ring. A circular PTK ablation of 6.0 mm in diameter and 50 mm in depth was repeated 3 times to reach the intended ablation depth of 150 mm. By this method both CXL-and control-halves were ablated simultaneously. Statistical Analysis Laser ablation depth was calculated by subtracting the post-PTK CT from the pre-PTK CT and was compared using paired T-test. Multiple linear regression analysis was used to analyze the association between ablation depth and distance from the central spot. The statistical analysis was performed with the statistical package for social sciences (SPSS 17.0 GmbH, Munich, Germany). Statistical significance was defined as p,0.05. Figure 1. Porcine eye was mounted in a fixation device throughout the experiment. Each eye was mounted in a fixation device throughout the experiment. A 9.0 mm suction ring with a cross-mark was applied to the eye and centered on the corneal apex to avoid eye rotation. The vertical mark (black arrow) on the suction ring separated the cornea into CXL-half and control-half, while the horizontal line (white arrow) on the suction ring represented the direction along which the CT was measured. The intersection (red spot) of the two lines (dotted lines, invisible) was used as the center spot of pachymetry and laser ablation. In this picture, riboflavin solution (yellow colored) filled within the suction ring and formed a thin and intact film over the de-epithelialized cornea throughout the CXL procedure. doi:10.1371/journal.pone.0046232.g001 Results In all 30 eyes, the CXL-halves appeared less reflective comparing with control-halves after CXL and this property became more obvious after PTK (Figure 4). The average ablation depths at the 5 correspondent locations were shown in Table 1. There was a 9% difference (p,0.001) in the overall ablation depth between the CXL-half corneas (158622 mm) and the control-half corneas (174626 mm). Moreover, the ablation depths were significantly smaller on the CXLhalves comparing with the control-halves at all 5 correspondent locations ( Figure 5). The ablation depths were strongly correlated with both the CXL group (R = 0.999, p = 0.038) and control group (R = 0.985, p = 0.015). In each group, the laser ablation depths significantly increased as a function of distance from the central spot, with the peripheral locations being more ablated than the central ones. As the central ablation depth set to be 150 mm, the best-fit functions were displayed as follow: The CXL-half: Y = 151.414+12.558X-14.697X 2 +5X 3 The control-half: Y = 163.75127.590X+7.752X 2 Y represents the laser ablation depth (mm) and X represents the distance (mm) from the central spot. Discussion Ever since its clinical introduction by Schnitzler in 2000, riboflavin/UVA-induced CXL has proven to be a promising treatment for keratoconus. This photochemical reaction increases new intrafibrillar bonds in corneal stroma, thus changes corneal properties, including increasing collagen fiber diameter and enhancing biomechanical rigidity. There is similar distribution of riboflavin and stiffening effect in porcine and human corneas, and the maximal effect of CXL is limited to the anterior 300 mm. When the PTK ablation is intended to the anterior 150 mm of porcine corneas in our research, the data obtained are of reference value. Still, further research is needed before it can be clinically applicable to human eyes. Corneal stromal demarcation lines at a depth of 300 mm were observed two weeks after CXL, and it is presumed that the lines may be due to the differences in refractive index and/or reflection properties between corneal layers with and without CXL. In accordance with these findings, we observed an apparent difference in surface reflectivity between the CXL-and controlhalves of the cornea. Kanellopoulos has demonstrated the effects of CXL on corneal ablation on a patient with keratoconus. It was stated that a more rigid cornea might have an ablation rate different from a normal cornea, so the one-year-old CXL treated eye was 25% undercorrected using PRK to obtain the optimized effect. Therefor, we investigated the ablation rate using porcine corneas in this study and found a significant smaller ablation depth in the CXL-half corneas compared to that in the control-half. The interaction of 193 nm excimer laser with the corneal tissue represents a photochemical effect, each photon supplies enough energy to directly break a molecular bond and each pulse effectively removes certain amount of corneal tissue. If equal energy is required in breaking CXL-induced bonds and original bonds, we may speculate that breaking the increased bonds in the CXL-half demands extra energy and totally more pulses than the control-half. As the same amount of laser energy was applied to both sides of cornea in this study, the CXL strengthened half was less ablated. The conventional ablation rate of 0.23 to 0.30 mm/mm 2 /pulse should be adjusted as 0.21 to 0.27 mm/mm 2 /pulse accordingly. In this study, the ablation depth increased from center towards periphery on both the CXL-and control-halves. This is probably due to the PTK ablation profile and the different curvature of porcine cornea compared to that of human one. The current PTK ablation profile designed for human cornea produces more pulses in mid-periphery in order to compensate for energy loss in midperiphery. This peripheral compensation is excessive for porcine cornea due to its flatter curvature compared to human cornea. As a result, the periphery is more ablated than the center. Amount of corneal hydration between CXL and control should be identical. The pre-ocular isotonic riboflavin film played an important role in preventing the de-epithelialized and exposed cornea from dehydration. In addition, the slight decrease in CT has nothing to do with UVA irradiation. Unlike Kampik's research in which different eyes were selected as CXL group and control group, each eye in our study was divided into CXL-half and control-half, so that both halves received the same amount of riboflavin solution and maintained identical hydration. Therefore, it is unlikely to induce the difference in CT between CXL-and control-half in our study. A combination of CXL and excimer laser ablation is becoming a popular clinical treatment for corneal ectasia. In this study, the mean ablation depth in CXL porcine cornea was reduced by 9%, which may help to modify the ablation algorithms of excimer treatment for CXL treated eyes. However, our results reflected experimental study on porcine eyes, where the time interval between CXL and laser ablation is much shorter than that in a clinical situation, where the ablation may occur several months after CXL. More data based on human tissue is necessary. |
Focus Home Interactive has announced free DLC for Wargame: European Escalation called Conquest, which contains a new multiplayer mode and seven new maps. The game is also being discounted on Steam.
Numerous new control zones have been added, and the DLC is playable against the AI as well as other players.
The Siege mode, introduced with the last DLC, has had two new battlefields added, while Destruction and Conquest modes are also the beneficiary of the Mecklenburg map.
Improvements to the multiplayer mode’s menus have been added, providing better visibility of players waiting on a ranked game, as well as better clarity in game creation options.
Many units have had their stats updated for better balancing as well.
Wargame: European Escalation will also be on sale this weekend on Steam, where you can get it for 50% off. The sale starts today, and runs through Monday, July 9.
Screenshots of the DLC are below. |
package models
import (
"time"
"github.com/vipin23/vmart-api/api_server"
)
type ResourceOperation struct {
BaseModel
Path string `gorm:"size:1024;not null;index" json:"path"`
HTTPMethod string `gorm:"size:10;not null;index" json:"http_method"`
Role []*Role `gorm:"many2many:role_resourcesoperations;"`
Description string `gorm:"size:2048;not null;index" json:"description"`
}
func (r *ResourceOperation) Prepare() {
r.ID = 0
r.CreatedAt = time.Now()
r.UpdatedAt = time.Now()
}
func (u *ResourceOperation) Save() (*ResourceOperation, error) {
var err error
err = api_server.DefaultServer.DB.Debug().Create(&u).Error
if err != nil {
return &ResourceOperation{}, err
}
return u, nil
}
func (r *ResourceOperation) Upsert() (*ResourceOperation, error) {
existing, err := r.FindByMethodAndPath(r.HTTPMethod, r.Path)
if err != nil {
return r.Save()
}
return r.Update(existing.ID)
}
func (r *ResourceOperation) FindByMethodAndPath(method, path string) (*ResourceOperation, error) {
existing := ResourceOperation{}
err := api_server.DefaultServer.DB.Debug().Where("http_method = ? AND path = ?", r.HTTPMethod, r.Path).First(&existing).Error
if err != nil {
return &ResourceOperation{}, err
}
return &existing, err
}
func (r *ResourceOperation) Update(uid uint64) (*ResourceOperation, error) {
db := api_server.DefaultServer.DB.Debug().Model(&ResourceOperation{}).Where("id = ?", uid).
Take(&ResourceOperation{}).UpdateColumns(
map[string]interface{}{
"description": r.Description,
"updated_at": time.Now(),
},
)
if db.Error != nil {
return &ResourceOperation{}, db.Error
}
// This is the display the updated user
err := api_server.DefaultServer.DB.Debug().Model(&ResourceOperation{}).Where("id = ?", uid).Take(&r).Error
if err != nil {
return &ResourceOperation{}, err
}
return r, nil
}
|
package middleware
import (
"net/http"
"testing"
"github.com/insionng/vodka"
"github.com/insionng/vodka/test"
"github.com/stretchr/testify/assert"
)
func TestHTTPSRedirect(t *testing.T) {
e := vodka.New()
next := func(c vodka.Context) (err error) {
return c.NoContent(http.StatusOK)
}
req := test.NewRequest(vodka.GET, "http://insionng.com", nil)
res := test.NewResponseRecorder()
c := e.NewContext(req, res)
HTTPSRedirect()(next)(c)
assert.Equal(t, http.StatusMovedPermanently, res.Status())
assert.Equal(t, "https://insionng.com", res.Header().Get(vodka.HeaderLocation))
}
func TestHTTPSWWWRedirect(t *testing.T) {
e := vodka.New()
next := func(c vodka.Context) (err error) {
return c.NoContent(http.StatusOK)
}
req := test.NewRequest(vodka.GET, "http://insionng.com", nil)
res := test.NewResponseRecorder()
c := e.NewContext(req, res)
HTTPSWWWRedirect()(next)(c)
assert.Equal(t, http.StatusMovedPermanently, res.Status())
assert.Equal(t, "https://www.insionng.com", res.Header().Get(vodka.HeaderLocation))
}
func TestWWWRedirect(t *testing.T) {
e := vodka.New()
next := func(c vodka.Context) (err error) {
return c.NoContent(http.StatusOK)
}
req := test.NewRequest(vodka.GET, "http://insionng.com", nil)
res := test.NewResponseRecorder()
c := e.NewContext(req, res)
WWWRedirect()(next)(c)
assert.Equal(t, http.StatusMovedPermanently, res.Status())
assert.Equal(t, "http://www.insionng.com", res.Header().Get(vodka.HeaderLocation))
}
func TestNonWWWRedirect(t *testing.T) {
e := vodka.New()
next := func(c vodka.Context) (err error) {
return c.NoContent(http.StatusOK)
}
req := test.NewRequest(vodka.GET, "http://www.insionng.com", nil)
res := test.NewResponseRecorder()
c := e.NewContext(req, res)
NonWWWRedirect()(next)(c)
assert.Equal(t, http.StatusMovedPermanently, res.Status())
assert.Equal(t, "http://insionng.com", res.Header().Get(vodka.HeaderLocation))
}
|
"Because now you’ve got less space in those oceans because the bottom is moving up," Rep. Mo Brooks said during the science committee hearing.
A rolling stone may gather no moss, but if it tumbles into the ocean, you can expect the seas to rise.
At least that's the theory embraced by Rep. Mo Brooks, R-Ala., during a hearing held Wednesday by the House Committee on Science, Space, and Technology on how technology can be used to address climate change.
While questioning Philip Duffy, president of Woods Hole Research Center, about what, in addition to climate change, might be driving rising sea levels, Brooks suggested erosion might be a factor.
"Every single year that we’re on Earth, you have huge tons of silt deposited by the Mississippi River, by the Amazon River, by the Nile, by every major river system — and for that matter, creek, all the way down to the smallest systems," Brooks said. "And every time you have that soil or rock whatever it is that is deposited into the seas, that forces the sea levels to rise. Because now you’ve got less space in those oceans because the bottom is moving up."
Brooks pointed to the White Cliffs of Dover and to California "where you have the waves crashing against the shorelines" and "you have the cliffs crash into the sea."
"All of that displaces the water which forces it to rise, does it not?" Brooks asked.
"I’m pretty sure that on human time scales, those are minuscule effects," Duffy answered.
An analysis by The Washington Post found that Duffy is correct. The Post found that it would take the equivalent of the top five inches of land from the entire surface area of the United States to cause the oceans to rise the 3.3 millimeters a year currently seen. And those five inches would have to fall into the oceans every year to explain the current rate at which they are rising. |
#!/usr/bin/env python
#
# Copyright 2019 OpenGEE Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Abstraction script for retrieving system information so that other code and
executables can have a platform-independent way of retreiving needed info.
New and existing code should be made to pull information from here so that
it is retrieved in a consistent manner with less code changes needed
elsewhere.
"""
import sys
import os
import socket
def GetHostName(useFqdn=False):
"""Returns the host name, and with FQDN if specified."""
thisHost = "localhost"
if useFqdn:
thisHost = socket.getfqdn()
# If the server is configured correctly getfqdn() works fine, but it might
# take what's in /etc/hosts first, so double check to see if
# hostname is more appropriate to avoid using localhost or something else
if thisHost == "localhost" or thisHost == "127.0.0.1" or thisHost == "127.0.1.1" or "." not in thisHost:
thisHost = socket.gethostname()
else:
# hostname itself could be set to FQDN too, so it could be the same as above
thisHost = socket.gethostname()
return thisHost
def Usage(cmd):
"""Show proper usage for this command."""
print "usage: %s [hostname [-f]]" % cmd
exit(1)
def main(args):
if len(args) < 2:
Usage(args[0])
if args[1] == "hostname":
useFqdn = False
if len(args) > 2:
useFqdn = args[2] == "-f"
print GetHostName(useFqdn)
else:
Usage(args[0])
if __name__ == "__main__":
main(sys.argv)
|
Q:
Possible internal contamination from soaking or scalding slaughtered chicken in hot water to ease plucking
I understand that a slaughtered chicken may become externally contaminated due to the impurities present in water.
Over the course of a processing day and despite scald tank
temperatures of 50-60C, the water in scald tanks can become
contaminated with salmonellas and campylobacters as a consequence of
involuntary defaecation (Humphrey and Lanning, 1987; Mulder et al.,
1978). The bacterial load of the tank water can also be increased by
microorganisms present on the feathers and skin of the birds (Mulder
et al., 1978; Berndtson et al., 1992; Kotula et al., 1995; Q22-Q28
Q23-Q29) Kotula and colleagues (1995) report more than seven logs
CFU/ml of rinse for both campylobacters and salmonella on some
carcasses immediately before scalding. When a carcass enters the
scald tank, it is not unusual for water to be inhaled into the lungs
(Thomson and Kotula, 1959). If there is contamination present in the
scald tank water and blood is still circulating in the carcass,
bacteria can be transported by the circulatory system into internal
organs and muscles.
Source: http://www.ukmeat.org/FSAMeat/PoultryScalding.htm
I am not talking about contaminations like the above. I am asking about any possible internal contamination.
During scalding, do you think that the water is hot enough,
or the process is long enough, to substantially increase a whole chicken's core temperature, and to actually start some internal body process (metabolism perhaps)? Since evisceration is yet to be done, are there any chances of the organ walls breaking down and leeching filth into the meat? Is it possible for the filth to escape the organs and permeate the meat? We are assuming the organs themselves are neither punctured nor torn.
I may be talking about a very remote possibility, but I really need to know this.
A:
Are you looking to do this for industrial purposes? Or are you just doing this at home? Also, what is your major concern - putrid water getting into the lungs and growing from there? The water should be hot enough to kill most bacteria at boiling after a relatively short exposure (anything from defecation or exposed on the feathers).
As for internal temperature, at a small home scale, I don't think it's that big of a deal. Whenever we slaughtered our chickens we always scalded them to remove the feathers, and never had any issue. Same with the hogs (removing hair).
Anything which may get kicked off internally should be halted/paused relatively quickly by proper sanitation and food prep techniques after plucking. If you are cooking the bird right away, you shouldn't have any issue because it would take awhile at room temperature for most bacteria to really start working their mojo. Assuming you slaughter, scald, defeather, clean, and then do whatever butchering is necessary (if any), you're talking about an hour to an hour and a half (generously). Afterwards, it'll go in a freezer or a refrigerator (both of which will slow the metabolism of any bacteria), or a pan (which will kill the bacteria using proper cooking techniques). There simply shouldn't be enough time IMHO. Now, I wouldn't let the bird sit around for a few hours, but that's pretty common sense. |
Stayers' Hurdle
History
The inaugural race was run in 1912 at Prestbury Park over 3 miles with £100 (£200 in 1913) prize money to the winner and £10 to the runner-up. It was called "Stayers Selling Hurdle" and was a Weight for Age Selling type of event with the winning horse being sold for £50 after the race. The race was dropped from the festival programme twice during 1928–1929 and in 1939–1945 but in 1946 it replaced the Spa Hurdle which was previously run in 1923 and 1942 over 2 miles. From 1946 to 1967 the Spa Hurdle was run over the same 3 miles until being renamed in 1972 as the Stayers' Hurdle, when it was sponsored by Lloyds Bank. It was backed by Waterford Crystal from 1978 to 1990, and by Bonusprint from 1991 to 2004. The race used to be held on either the Tuesday or the Wednesday of the Festival, but it was moved to the Thursday in 1993.
The title of the race was changed to the World Hurdle when Ladbrokes took over the sponsorship in 2005. Their sponsorship came to an end with the 2015 renewal and the 2016 race was sponsored by Ryanair. In 2017 and 2018 it was sponsored by Sun Bets and the title reverted to the Stayers' Hurdle. Since 2019 the race has been sponsored by Sun Racing. |
Defence ministry official says artillery fire landed in the waters off Yeonpyeong island in apparent training exercise.
North Korea has fired artillery in the direction of a South Korean island in an apparent training exercise, according to South Korean media and a government official.
A South Korean defence ministry official said the shells landed in the waters off Yeonpyeong island on Tuesday but it was not clear whether it was inside the South's territorial waters.
Yonhap news agency said South Korea fired back with its artillery. It said the shelling started at about 2pm local time (0500 GMT).
"Three shots were heard. One shell landed near the Northern Limit Line," Yonhap quoted a military official as saying.
The incident took place near the disputed Northen Limit Line (NLL), the scene of several skirmishes over the past decade.
Fishing boats in the vicinity called to port and Yeonpyeong residents have been evacuated into emergency shelters, media reports said.
Last November four South Koreans - two civilians and marines - were killed on the island when the North shelled part of Yeonpyeong.
And in March 2010, a South Koren navy ship, Cheonan, was ripped into two by an explosion, killing 46 sailors. Pyongyang denied any involvement but international investigators suggested the North bore responsibility for the incident.
The line separating North and South Korea, drawn at the close of the 1950-53 war between the two nations, remains a fierce point of dispute.
North Korea argues that the line should run farther south, but Seoul fears it would endanger fishing around five South Korean islands and hamper access to its port at Incheon.
The November attack marked a new level of hostility along the contested line.
Tensions had eased since the start of the year after the North's renewed calls for dialogue, including the resumption of six-party talks aimed at ending Pyongyang's nuclear arms programme.
The two Koreas, Russia, the US, Japan and China are involved in the on-off disarmament talks. |
<filename>programs/Xserver/fb/fbarc.c
/*
* Id: fbarc.c,v 1.1 1999/11/02 03:54:45 keithp Exp $
*
* Copyright © 1998 <NAME>
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_DIX_CONFIG_H
#include <dix-config.h>
#endif
#include "fb.h"
#include "mizerarc.h"
#include <limits.h>
typedef void (*FbArc) (FbBits *dst,
FbStride dstStride,
int dstBpp,
xArc *arc,
int dx,
int dy,
FbBits and,
FbBits xor);
void
fbPolyArc (DrawablePtr pDrawable,
GCPtr pGC,
int narcs,
xArc *parcs)
{
FbArc arc;
if (pGC->lineWidth == 0)
{
#ifndef FBNOPIXADDR
arc = 0;
if (pGC->lineStyle == LineSolid && pGC->fillStyle == FillSolid)
{
switch (pDrawable->bitsPerPixel)
{
case 8: arc = fbArc8; break;
case 16: arc = fbArc16; break;
#ifdef FB_24BIT
case 24: arc = fbArc24; break;
#endif
case 32: arc = fbArc32; break;
}
}
if (arc)
{
FbGCPrivPtr pPriv = fbGetGCPrivate (pGC);
FbBits *dst;
FbStride dstStride;
int dstBpp;
int dstXoff, dstYoff;
BoxRec box;
int x2, y2;
RegionPtr cclip;
cclip = fbGetCompositeClip (pGC);
fbGetDrawable (pDrawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
while (narcs--)
{
if (miCanZeroArc (parcs))
{
box.x1 = parcs->x + pDrawable->x;
box.y1 = parcs->y + pDrawable->y;
/*
* Because box.x2 and box.y2 get truncated to 16 bits, and the
* RECT_IN_REGION test treats the resulting number as a signed
* integer, the RECT_IN_REGION test alone can go the wrong way.
* This can result in a server crash because the rendering
* routines in this file deal directly with cpu addresses
* of pixels to be stored, and do not clip or otherwise check
* that all such addresses are within their respective pixmaps.
* So we only allow the RECT_IN_REGION test to be used for
* values that can be expressed correctly in a signed short.
*/
x2 = box.x1 + (int)parcs->width + 1;
box.x2 = x2;
y2 = box.y1 + (int)parcs->height + 1;
box.y2 = y2;
if ( (x2 <= SHRT_MAX) && (y2 <= SHRT_MAX) &&
(RECT_IN_REGION(pDrawable->pScreen, cclip, &box) == rgnIN) )
(*arc) (dst, dstStride, dstBpp,
parcs, pDrawable->x + dstXoff, pDrawable->y + dstYoff,
pPriv->and, pPriv->xor);
else
miZeroPolyArc(pDrawable, pGC, 1, parcs);
}
else
miPolyArc(pDrawable, pGC, 1, parcs);
parcs++;
}
fbFinishAccess (pDrawable);
}
else
#endif
miZeroPolyArc (pDrawable, pGC, narcs, parcs);
}
else
miPolyArc (pDrawable, pGC, narcs, parcs);
}
|
1. Field of the Invention
The present invention relates to an image processing apparatus for dividing impositioned image data into a plurality of pages, and to a method of controlling this apparatus.
2. Description of the Related Art
For jobs in which imposition binding has been performed by a DTP (Desktop Publishing) application or utility, there is a function for repetitive job introduction by storage in the memory of an image processing apparatus. This has made it possible to shorten the time it takes to perform imposition binding of image data. However, with regard to jobs in which imposition binding has been performed by a DTP application or utility, there may be a need to obtain an output in which imposition and binding settings have been removed.
Japanese Patent Laid-Open No. 2006-285610 proposes an image processing apparatus in which re-printing is prepared for by storing pre-RIP data in a memory. This makes it unnecessary for the user to re-introduce a non-imposition job from an application and therefore shortens operation time. Further, Japanese Patent No. 3912012 proposes an image dividing apparatus for dividing images from impositioned image data. With the image dividing apparatus described in Japanese Patent No. 3912012, a plurality of pages of page image data are divided based upon an image file, in which a plurality of items of image data have been impositioned, and the job settings.
However, the following problems arise in the prior art mentioned above: For example, with the image processing apparatus described in Japanese Patent Laid-Open No. 200 6-285610, it is necessary to store image data in memory beforehand in order to perform re-printing, and the size of the image data stored in memory becomes very large. As a consequence, it is necessary to increase memory storage capacity or to place a limit on the number of documents that can be stored.
On the other hand, with the image dividing apparatus described in Japanese Patent No. 3912012, only post-RIP data is stored in memory and hence there is no particular problem ascribable to memory storage capacity. However, when the number of bound and impositioned pages increases, the problem is that the operation of dividing impositioned images takes time. |
The salacious “dossier” that describes ties between Donald J. Trump and the Russian government is back in the news. The Trump administration says new reporting that the Clinton campaign helped to pay for the report is evidence that the entire Russia inquiry is just politics. Also, we have a rare on-the-record conversation with President Trump’s top lawyer, Ty Cobb, about the investigation. Guests: Kenneth P. Vogel, a New York Times reporter based in Washington; Matt Apuzzo, who interviewed Mr. Cobb. For more information on today’s episode, visit nytimes.com/thedaily. |
/**
* This class handles communication with clients using NIO. There is one per
* client, but only one thread doing the communication.
*/
public class NIOServerCnxn implements Watcher, ServerCnxn {
static public class Factory extends Thread {
ZooKeeperServer zks;
ServerSocketChannel ss;
Selector selector = Selector.open();
int packetsSent;
int packetsReceived;
HashSet<NIOServerCnxn> cnxns = new HashSet<NIOServerCnxn>();
QuorumPeer self;
long avgLatency;
long maxLatency;
long minLatency = 99999999;
int outstandingLimit = 1;
void setStats(long latency, long avg) {
this.avgLatency = avg;
if (latency < minLatency) {
minLatency = latency;
}
if (latency > maxLatency) {
maxLatency = latency;
}
}
public Factory(int port) throws IOException {
super("NIOServerCxn.Factory");
setDaemon(true);
this.ss = ServerSocketChannel.open();
ss.socket().bind(new InetSocketAddress(port));
ss.configureBlocking(false);
ss.register(selector, SelectionKey.OP_ACCEPT);
start();
}
public Factory(int port, QuorumPeer self) throws IOException {
this(port);
this.self = self;
}
public void startup(ZooKeeperServer zks) throws IOException,
InterruptedException {
zks.startup();
setZooKeeperServer(zks);
}
public void setZooKeeperServer(ZooKeeperServer zks) {
this.zks = zks;
if (zks != null) {
this.outstandingLimit = zks.getGlobalOutstandingLimit();
zks.setServerCnxnFactory(this);
} else {
this.outstandingLimit = 1;
}
}
private void addCnxn(NIOServerCnxn cnxn) {
synchronized (cnxns) {
cnxns.add(cnxn);
}
}
public void run() {
while (!ss.socket().isClosed()) {
try {
selector.select(1000);
Set<SelectionKey> selected;
synchronized (this) {
selected = selector.selectedKeys();
}
for (SelectionKey k : selected) {
if ((k.readyOps() & SelectionKey.OP_ACCEPT) != 0) {
SocketChannel sc = ((ServerSocketChannel) k
.channel()).accept();
sc.configureBlocking(false);
SelectionKey sk = sc.register(selector,
SelectionKey.OP_READ);
NIOServerCnxn cnxn = new NIOServerCnxn(zks, sc, sk,
this);
sk.attach(cnxn);
addCnxn(cnxn);
} else if ((k.readyOps() & (SelectionKey.OP_READ | SelectionKey.OP_WRITE)) != 0) {
NIOServerCnxn c = (NIOServerCnxn) k.attachment();
c.doIO(k);
}
}
selected.clear();
} catch (Exception e) {
ZooLog.logException(e);
}
}
ZooLog.logTextTraceMessage("NIOServerCnxn factory exitedloop.",
ZooLog.textTraceMask);
clear();
ZooLog.logError("=====> Goodbye cruel world <======");
// System.exit(0);
}
/**
* clear all the connections in the selector
*
*/
synchronized public void clear() {
selector.wakeup();
synchronized (cnxns) {
// got to clear all the connections that we have in the selector
for (Iterator<NIOServerCnxn> it = cnxns.iterator(); it
.hasNext();) {
NIOServerCnxn cnxn = it.next();
it.remove();
try {
cnxn.close();
} catch (Exception e) {
// Do nothing.
}
}
}
}
public void shutdown() {
try {
ss.close();
clear();
this.interrupt();
this.join();
} catch (Exception e) {
ZooLog.logException(e);
}
if (zks != null) {
zks.shutdown();
}
}
synchronized void closeSession(long sessionId) {
selector.wakeup();
synchronized (cnxns) {
for (Iterator<NIOServerCnxn> it = cnxns.iterator(); it
.hasNext();) {
NIOServerCnxn cnxn = it.next();
if (cnxn.sessionId == sessionId) {
it.remove();
try {
cnxn.close();
} catch (Exception e) {
}
break;
}
}
}
}
}
/**
* The buffer will cause the connection to be close when we do a send.
*/
static final ByteBuffer closeConn = ByteBuffer.allocate(0);
Factory factory;
ZooKeeperServer zk;
private SocketChannel sock;
private SelectionKey sk;
boolean initialized;
ByteBuffer lenBuffer = ByteBuffer.allocate(4);
ByteBuffer incomingBuffer = lenBuffer;
LinkedBlockingQueue<ByteBuffer> outgoingBuffers = new LinkedBlockingQueue<ByteBuffer>();
int sessionTimeout;
int packetsSent;
int packetsReceived;
ArrayList<Id> authInfo = new ArrayList<Id>();
LinkedList<Request> outstanding = new LinkedList<Request>();
void sendBuffer(ByteBuffer bb) {
synchronized (factory) {
try {
sk.selector().wakeup();
// ZooLog.logTextTraceMessage("Add a buffer to outgoingBuffers",
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK);
// ZooLog.logTextTraceMessage("sk " + sk + " is valid: " +
// sk.isValid(), ZooLog.CLIENT_DATA_PACKET_TRACE_MASK);
outgoingBuffers.add(bb);
if (sk.isValid()) {
sk.interestOps(sk.interestOps() | SelectionKey.OP_WRITE);
}
} catch (RuntimeException e) {
ZooLog.logException(e);
throw e;
}
}
}
void doIO(SelectionKey k) throws InterruptedException {
try {
if (sock == null) {
return;
}
if (k.isReadable()) {
int rc = sock.read(incomingBuffer);
if (rc < 0) {
throw new IOException("Read error");
}
if (incomingBuffer.remaining() == 0) {
incomingBuffer.flip();
if (incomingBuffer == lenBuffer) {
readLength(k);
} else if (!initialized) {
packetsReceived++;
factory.packetsReceived++;
readConnectRequest();
lenBuffer.clear();
incomingBuffer = lenBuffer;
} else {
packetsReceived++;
factory.packetsReceived++;
readRequest();
lenBuffer.clear();
incomingBuffer = lenBuffer;
}
}
}
if (k.isWritable()) {
// ZooLog.logTextTraceMessage("outgoingBuffers.size() = " +
// outgoingBuffers.size(),
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK);
if (outgoingBuffers.size() > 0) {
// ZooLog.logTextTraceMessage("sk " + k + " is valid: " +
// k.isValid(), ZooLog.CLIENT_DATA_PACKET_TRACE_MASK);
ByteBuffer bbs[] = outgoingBuffers
.toArray(new ByteBuffer[0]);
// Write as much as we can
long i = sock.write(bbs);
ByteBuffer bb;
// Remove the buffers that we have sent
while (outgoingBuffers.size() > 0
&& (bb = outgoingBuffers.peek()).remaining() == 0) {
if (bb == closeConn) {
throw new IOException("closing");
}
if (bb.remaining() > 0) {
break;
}
packetsSent++;
factory.packetsSent++;
outgoingBuffers.remove();
}
// ZooLog.logTextTraceMessage("after send,
// outgoingBuffers.size() = " + outgoingBuffers.size(),
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK);
}
synchronized (this) {
if (outgoingBuffers.size() == 0) {
if (!initialized
&& (sk.interestOps() & SelectionKey.OP_READ) == 0) {
throw new IOException("Responded to info probe");
}
sk.interestOps(sk.interestOps()
& (~SelectionKey.OP_WRITE));
} else {
sk
.interestOps(sk.interestOps()
| SelectionKey.OP_WRITE);
}
}
}
} catch (CancelledKeyException e) {
close();
} catch (IOException e) {
// ZooLog.logException(e);
close();
}
}
private void readRequest() throws IOException {
// We have the request, now process and setup for next
InputStream bais = new ByteBufferInputStream(incomingBuffer);
BinaryInputArchive bia = BinaryInputArchive.getArchive(bais);
RequestHeader h = new RequestHeader();
h.deserialize(bia, "header");
// Through the magic of byte buffers, txn will not be
// pointing
// to the start of the txn
incomingBuffer = incomingBuffer.slice();
if (h.getType() == OpCode.auth) {
AuthPacket authPacket = new AuthPacket();
ZooKeeperServer.byteBuffer2Record(incomingBuffer, authPacket);
String scheme = authPacket.getScheme();
AuthenticationProvider ap = zk.authenticationProviders.get(scheme);
if (ap == null
|| ap.handleAuthentication(this, authPacket.getAuth()) != KeeperException.Code.Ok) {
if (ap == null)
ZooLog.logError("No authentication provider for scheme: "
+ scheme);
else
ZooLog.logError("Authentication failed for scheme: "
+ scheme);
// send a response...
ReplyHeader rh = new ReplyHeader(h.getXid(), 0,
KeeperException.Code.AuthFailed);
sendResponse(rh, null, null);
// ... and close connection
sendBuffer(NIOServerCnxn.closeConn);
disableRecv();
} else {
ZooLog.logError("Authentication succeeded for scheme: "
+ scheme);
ReplyHeader rh = new ReplyHeader(h.getXid(), 0,
KeeperException.Code.Ok);
sendResponse(rh, null, null);
}
return;
} else {
zk.submitRequest(this, sessionId, h.getType(), h.getXid(),
incomingBuffer, authInfo);
}
if (h.getXid() >= 0) {
synchronized (this) {
outstandingRequests++;
// check throttling
if (zk.getInProcess() > factory.outstandingLimit) {
disableRecv();
// following lines should not be needed since we are already
// reading
// } else {
// enableRecv();
}
}
}
}
public void disableRecv() {
sk.interestOps(sk.interestOps() & (~SelectionKey.OP_READ));
}
public void enableRecv() {
if (sk.isValid()) {
int interest = sk.interestOps();
if ((interest & SelectionKey.OP_READ) == 0) {
sk.interestOps(interest | SelectionKey.OP_READ);
}
}
}
private void readConnectRequest() throws IOException, InterruptedException {
BinaryInputArchive bia = BinaryInputArchive
.getArchive(new ByteBufferInputStream(incomingBuffer));
ConnectRequest connReq = new ConnectRequest();
connReq.deserialize(bia, "connect");
ZooLog.logWarn("Connected to " + sock.socket().getRemoteSocketAddress()
+ " lastZxid " + connReq.getLastZxidSeen());
if (zk == null) {
throw new IOException("ZooKeeperServer not running");
}
if (connReq.getLastZxidSeen() > zk.dataTree.lastProcessedZxid) {
ZooLog.logError("Client has seen "
+ Long.toHexString(connReq.getLastZxidSeen())
+ " our last zxid is "
+ Long.toHexString(zk.dataTree.lastProcessedZxid));
throw new IOException("We are out of date");
}
sessionTimeout = connReq.getTimeOut();
sessionId = connReq.getSessionId();
byte passwd[] = connReq.getPasswd();
if (sessionTimeout < zk.tickTime * 2) {
sessionTimeout = zk.tickTime * 2;
}
if (sessionTimeout > zk.tickTime * 20) {
sessionTimeout = zk.tickTime * 20;
}
// We don't want to receive any packets until we are sure that the
// session is setup
disableRecv();
if (sessionId != 0) {
zk.reopenSession(this, sessionId, passwd, sessionTimeout);
ZooLog.logWarn("Renewing session " + Long.toHexString(sessionId));
} else {
zk.createSession(this, passwd, sessionTimeout);
ZooLog.logWarn("Creating new session "
+ Long.toHexString(sessionId));
}
initialized = true;
}
private void readLength(SelectionKey k) throws IOException {
// Read the length, now get the buffer
int len = lenBuffer.getInt();
if (!initialized) {
// We take advantage of the limited size of the length to look
// for cmds. They are all 4-bytes which fits inside of an int
if (len == ruokCmd) {
sendBuffer(imok.duplicate());
sendBuffer(NIOServerCnxn.closeConn);
k.interestOps(SelectionKey.OP_WRITE);
return;
} else if (len == killCmd) {
System.exit(0);
} else if (len == getTraceMaskCmd) {
long traceMask = ZooLog.getTextTraceLevel();
ByteBuffer resp = ByteBuffer.allocate(8);
resp.putLong(traceMask);
resp.flip();
sendBuffer(resp);
sendBuffer(NIOServerCnxn.closeConn);
k.interestOps(SelectionKey.OP_WRITE);
return;
} else if (len == setTraceMaskCmd) {
incomingBuffer = ByteBuffer.allocate(8);
int rc = sock.read(incomingBuffer);
if (rc < 0) {
throw new IOException("Read error");
}
System.out.println("rc=" + rc);
incomingBuffer.flip();
long traceMask = incomingBuffer.getLong();
ZooLog.setTextTraceLevel(traceMask);
ByteBuffer resp = ByteBuffer.allocate(8);
resp.putLong(traceMask);
resp.flip();
sendBuffer(resp);
sendBuffer(NIOServerCnxn.closeConn);
k.interestOps(SelectionKey.OP_WRITE);
return;
} else if (len == dumpCmd) {
if (zk == null) {
sendBuffer(ByteBuffer.wrap("ZooKeeper not active \n"
.getBytes()));
} else {
StringBuffer sb = new StringBuffer();
sb.append("SessionTracker dump: \n");
sb.append(zk.sessionTracker.toString()).append("\n");
sb.append("ephemeral nodes dump:\n");
sb.append(zk.dataTree.dumpEphemerals()).append("\n");
sendBuffer(ByteBuffer.wrap(sb.toString().getBytes()));
}
k.interestOps(SelectionKey.OP_WRITE);
return;
} else if (len == reqsCmd) {
StringBuffer sb = new StringBuffer();
sb.append("Requests:\n");
synchronized (outstanding) {
for (Request r : outstanding) {
sb.append(r.toString());
sb.append('\n');
}
}
sendBuffer(ByteBuffer.wrap(sb.toString().getBytes()));
k.interestOps(SelectionKey.OP_WRITE);
return;
} else if (len == statCmd) {
StringBuffer sb = new StringBuffer();
sb.append("Clients:\n");
for (SelectionKey sk : factory.selector.keys()) {
Channel channel = sk.channel();
if (channel instanceof SocketChannel) {
NIOServerCnxn cnxn = (NIOServerCnxn) sk.attachment();
sb.append(" "
+ ((SocketChannel) channel).socket()
.getRemoteSocketAddress() + "["
+ Integer.toHexString(sk.interestOps())
+ "](queued=" + cnxn.outstandingRequests
+ ",recved=" + cnxn.packetsReceived + ",sent="
+ cnxn.packetsSent + ")\n");
}
}
sb.append("\n");
sb.append("Latency min/avg/max: " + factory.minLatency + "/"
+ factory.avgLatency + "/" + factory.maxLatency + "\n");
sb.append("Received: " + factory.packetsReceived + "\n");
sb.append("Sent: " + factory.packetsSent + "\n");
if (zk != null) {
sb.append("Outstanding: " + zk.getInProcess() + "\n");
sb.append("Zxid: "
+ Long.toHexString(zk.dataTree.lastProcessedZxid)
+ "\n");
}
// sb.append("Done: " + ZooKeeperServer.getRequests() + "\n");
if (factory.self == null) {
sb.append("Mode: standalone\n");
} else {
switch (factory.self.state) {
case LOOKING:
sb.append("Mode: leaderelection\n");
break;
case LEADING:
sb.append("Mode: leading\n");
sb.append("Followers:");
for (FollowerHandler fh : factory.self.leader.followers) {
if (fh.s == null) {
continue;
}
sb.append(" ");
sb.append(fh.s.getRemoteSocketAddress());
if (factory.self.leader.forwardingFollowers
.contains(fh)) {
sb.append("*");
}
}
sb.append("\n");
break;
case FOLLOWING:
sb.append("Mode: following\n");
sb.append("Leader: ");
Socket s = factory.self.follower.sock;
if (s == null) {
sb.append("not connected\n");
} else {
sb.append(s.getRemoteSocketAddress() + "\n");
}
}
}
sendBuffer(ByteBuffer.wrap(sb.toString().getBytes()));
k.interestOps(SelectionKey.OP_WRITE);
return;
}
}
if (len < 0 || len > 0xfffff) {
throw new IOException("Len error " + len);
}
if (zk == null) {
throw new IOException("ZooKeeperServer not running");
}
incomingBuffer = ByteBuffer.allocate(len);
}
/**
* The number of requests that have been submitted but not yet responded to.
*/
int outstandingRequests;
/*
* (non-Javadoc)
*
* @see com.yahoo.zookeeper.server.ServerCnxnIface#getSessionTimeout()
*/
public int getSessionTimeout() {
return sessionTimeout;
}
/**
* This is the id that uniquely identifies the session of a client. Once
* this session is no longer active, the ephemeral nodes will go away.
*/
long sessionId;
static long nextSessionId = 1;
public NIOServerCnxn(ZooKeeperServer zk, SocketChannel sock,
SelectionKey sk, Factory factory) throws IOException {
this.zk = zk;
this.sock = sock;
this.sk = sk;
this.factory = factory;
sock.socket().setTcpNoDelay(true);
sock.socket().setSoLinger(true, 2);
InetAddress addr = ((InetSocketAddress) sock.socket()
.getRemoteSocketAddress()).getAddress();
authInfo.add(new Id("ip", addr.getHostAddress()));
authInfo.add(new Id("host", addr.getCanonicalHostName()));
sk.interestOps(SelectionKey.OP_READ);
}
public String toString() {
return "NIOServerCnxn object with sock = " + sock + " and sk = " + sk;
}
boolean closed;
/*
* (non-Javadoc)
*
* @see com.yahoo.zookeeper.server.ServerCnxnIface#close()
*/
public void close() {
if (closed) {
return;
}
closed = true;
synchronized (factory.cnxns) {
factory.cnxns.remove(this);
}
if (zk != null) {
zk.removeCnxn(this);
}
ZooLog.logTextTraceMessage("close NIOServerCnxn: " + sock,
ZooLog.SESSION_TRACE_MASK);
try {
/*
* The following sequence of code is stupid! You would think that
* only sock.close() is needed, but alas, it doesn't work that way.
* If you just do sock.close() there are cases where the socket
* doesn't actually close...
*/
sock.socket().shutdownOutput();
} catch (IOException e) {
// This is a relatively common exception that we can't avoid
}
try {
sock.socket().shutdownInput();
} catch (IOException e) {
}
try {
sock.socket().close();
} catch (IOException e) {
ZooLog.logException(e);
}
try {
sock.close();
// XXX The next line doesn't seem to be needed, but some posts
// to forums suggest that it is needed. Keep in mind if errors in
// this section arise.
// factory.selector.wakeup();
} catch (IOException e) {
ZooLog.logException(e);
}
sock = null;
if (sk != null) {
try {
// need to cancel this selection key from the selector
sk.cancel();
} catch (Exception e) {
}
}
}
private final static byte fourBytes[] = new byte[4];
/*
* (non-Javadoc)
*
* @see com.yahoo.zookeeper.server.ServerCnxnIface#sendResponse(com.yahoo.zookeeper.proto.ReplyHeader,
* com.yahoo.jute.Record, java.lang.String)
*/
synchronized public void sendResponse(ReplyHeader h, Record r, String tag) {
if (closed) {
return;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// Make space for length
BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos);
try {
baos.write(fourBytes);
bos.writeRecord(h, "header");
if (r != null) {
bos.writeRecord(r, tag);
}
baos.close();
} catch (IOException e) {
ZooLog.logError("Error serializing response");
}
byte b[] = baos.toByteArray();
ByteBuffer bb = ByteBuffer.wrap(b);
bb.putInt(b.length - 4).rewind();
sendBuffer(bb);
if (h.getXid() > 0) {
synchronized (this.factory) {
outstandingRequests--;
// check throttling
if (zk.getInProcess() < factory.outstandingLimit
|| outstandingRequests < 1) {
sk.selector().wakeup();
enableRecv();
}
}
}
}
/*
* (non-Javadoc)
*
* @see com.yahoo.zookeeper.server.ServerCnxnIface#process(com.yahoo.zookeeper.proto.WatcherEvent)
*/
synchronized public void process(WatcherEvent event) {
ReplyHeader h = new ReplyHeader(-1, -1L, 0);
ZooLog.logTextTraceMessage("Deliver event " + event + " to "
+ this.sessionId + " through " + this,
ZooLog.EVENT_DELIVERY_TRACE_MASK);
sendResponse(h, event, "notification");
}
public void finishSessionInit(boolean valid) {
try {
ConnectResponse rsp = new ConnectResponse(0, valid ? sessionTimeout
: 0, valid ? sessionId : 0, // send 0 if session is no
// longer valid
valid ? zk.generatePasswd(sessionId) : new byte[16]);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos);
bos.writeInt(-1, "len");
rsp.serialize(bos, "connect");
baos.close();
ByteBuffer bb = ByteBuffer.wrap(baos.toByteArray());
bb.putInt(bb.remaining() - 4).rewind();
sendBuffer(bb);
ZooLog.logWarn("Finished init of " + Long.toHexString(sessionId)
+ ": " + valid);
if (!valid) {
sendBuffer(closeConn);
}
// Now that the session is ready we can start receiving packets
synchronized (this.factory) {
sk.selector().wakeup();
enableRecv();
}
} catch (Exception e) {
ZooLog.logException(e);
close();
}
}
/*
* (non-Javadoc)
*
* @see com.yahoo.zookeeper.server.ServerCnxnIface#getSessionId()
*/
public long getSessionId() {
return sessionId;
}
public void setSessionId(long sessionId) {
this.sessionId = sessionId;
}
public ArrayList<Id> getAuthInfo() {
return authInfo;
}
public InetSocketAddress getRemoteAddress() {
return (InetSocketAddress) sock.socket().getRemoteSocketAddress();
}
public void setStats(long latency, long avg) {
factory.setStats(latency, avg);
}
} |
<reponame>vishalbelsare/tributary<filename>tributary/streaming/output/ws.py
import asyncio
import aiohttp
import json as JSON
from collections import deque
from aiohttp import web
from .output import Foo
from ..node import Node
from ...base import StreamNone, StreamEnd
class WebSocket(Foo):
"""Connect to websocket and send data
Args:
node (Node): input tributary
url (str): websocket url to connect to
json (bool): dump data as json
wrap (bool): wrap result in a list
binary (bool): send_bytes instead of send_str
"""
def __init__(
self,
node,
url,
json=False,
wrap=False,
field=None,
response=False,
response_timeout=1,
binary=False,
):
async def _send(
data,
url=url,
json=json,
wrap=wrap,
field=field,
response=response,
response_timeout=response_timeout,
binary=binary,
):
if isinstance(data, (StreamNone, StreamEnd)):
return data
if wrap:
data = [data]
if json:
data = JSON.dumps(data)
session = aiohttp.ClientSession()
async with session.ws_connect(url) as ws:
if binary:
await ws.send_bytes(data)
else:
await ws.send_str(data)
if response:
msg = await ws.receive(response_timeout)
if msg.type == aiohttp.WSMsgType.TEXT:
x = msg.data
elif msg.type == aiohttp.WSMsgType.CLOSED:
x = "{}"
elif msg.type == aiohttp.WSMsgType.ERROR:
x = "{}"
else:
x = "{}"
await session.close()
if json:
x = JSON.loads(x)
if field:
x = x[field]
if wrap:
x = [x]
return x
super().__init__(foo=_send, name="WebSocket", inputs=1)
node >> self
class WebSocketServer(Foo):
"""Host a websocket server and stream in the data
Args:
path (str): route on which to host ws server
json (bool): load http content data as json
wrap (bool): wrap result in a list
field (str): field to index result by
snapshot (bool): maintain history and provide a snapshot on first request
server (Optional[aiohttp.web.Application]): aiohttp application to install route
run (Optional[bool]): run the web app right after construction
host (Optional[str]): if running the web app, which ip to listen on (default 127.0.0.1)
port (Optional[int]): if running the web app, port to listen on
request_handler (Optional[callable]): custom handler to process the request from client
response_handler (Optional[callable]): custom handler to manage the response sent to client
binary (bool): send_bytes instead of send_str
"""
def __init__(
self,
node,
path="/",
json=False,
wrap=False,
field=None,
snapshot=True,
server=None,
run=True,
host="127.0.0.1",
port=8080,
request_handler=None,
response_handler=None,
binary=False,
):
# instantiate server if not existing
server = server or web.Application()
# capture history
self._history = deque()
# create queue map for clients
self._queue_map = {}
# http server handler
async def _handler(
request,
queue_map=self._queue_map,
history=self._history,
request_handler=request_handler,
response_handler=response_handler,
binary=binary,
):
ws = web.WebSocketResponse()
await ws.prepare(request)
if ws not in queue_map:
# create a queue for this client
queue_map[ws] = asyncio.Queue()
# send history if snapshotting
for data in history:
if response_handler and callable(response_handler):
data = await response_handler(request, data)
if binary:
await ws.send_bytes(data)
else:
await ws.send_str(data)
queue = queue_map[ws]
try:
while not ws.closed:
# put the request into the queue
data = await queue.get()
# TODO move this?
if request_handler and callable(request_handler):
data = await request_handler(request)
# if custom response handler is given, use that to determine response
if response_handler and callable(response_handler):
data = await response_handler(request, data)
if binary:
await ws.send_bytes(data)
else:
await ws.send_str(data)
elif response_handler and isinstance(
response_handler, (str, bytes)
):
if binary:
await ws.send_bytes(response_handler)
else:
await ws.send_str(response_handler)
else:
# just put an ok with data
await ws.send_str(JSON.dumps(data))
finally:
# remove from queue
queue_map.pop(ws)
# tributary node handler
async def _req(
data,
json=json,
wrap=wrap,
field=field,
snapshot=snapshot,
queue_map=self._queue_map,
history=self._history,
):
if json:
data = JSON.dumps(data)
if field:
data = data[field]
if wrap:
data = [data]
# put data in history
if snapshot:
history.append(data)
# put data into queue
await asyncio.gather(
*(asyncio.create_task(queue.put(data)) for queue in queue_map.values())
)
# TODO expect response from clients?
return data
super().__init__(foo=_req, inputs=1)
self._name = "WebSocketServer"
node >> self
# set server attribute so it can be accessed
self.set("server", server)
# install get handler
server.router.add_get(path, _handler)
# Initialize application to None, might be managed outside
self.set("app", None)
self.set("site", None)
if run:
# setup runners so that we start the application
async def _start(self=self, server=server, host=host, port=port):
# https://docs.aiohttp.org/en/v3.0.1/web_reference.html#running-applications
runner = web.AppRunner(server)
self.app = runner
await runner.setup()
site = web.TCPSite(runner, host=host, port=port)
self.site = site
await site.start()
async def _shutdown(self=self, server=server, host=host, port=port):
await self.site.stop()
await self.app.cleanup()
self._onstarts = (_start,)
self._onstops = (_shutdown,)
Node.websocket = WebSocket
Node.websocketServer = WebSocketServer
|
You wouldn’t expect a baby to work. Unless that baby is actually a 30-year-old man who just likes wearing diapers and drinking juice from a bottle.
If that’s the case, the federal government is probably going to start asking questions.
That’s what happened to Stanley Thornton Jr. of California after he was featured on a National Geographic show called “Taboo.” Thornton, who sleeps in an adult-sized crib and has a roommate who plays the role of mother, collects disability benefits.
He says he’s unable to work, but Sen. Tom Coburn of Oklahoma isn’t buying that line. He’s calling for an investigation, saying Thornton can work, considering that he built a giant crib, drives himself on errands and determines when he should wear grown-up clothes.
The senator also wants to know why Thornton’s roommate, a former nurse, is collecting social security benefits when she is able to take care of an adult baby.
Thornton isn’t taking this lying down — giant crib or not. He wrote a letter to the Washington Times threatening to take his own life if his social security benefits are canceled:
“You wanna test how damn serious I am about leaving this world, screw with my check that pays for this apartment and food. Try it. See how serious I am. I don’t care,” the California man said. “I have no problem killing myself. Take away the last thing keeping me here, and see what happens. Next time you see me on the news, it will be me in a body bag.”
On the “Taboo” show, Thornton said he likes to be treated like a baby because of “love, affection and safeness.”
Visit seattlepi.com’s home page for more Seattle news. Contact Amy Rolph at [email protected] or on Twitter as @amyrolph and @bigblog. |
<gh_stars>0
import requests
import json
# ---- Static Data ---- {
def get_icons_JSON():
url = 'http://ddragon.leagueoflegends.com/cdn/8.24.1/data/en_US/profileicon.json'
print("Profile Icons:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_items_JSON():
url = 'http://ddragon.leagueoflegends.com/cdn/8.24.1/data/en_US/item.json'
print("Items Data:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_sum_spells_JSON():
url = 'http://ddragon.leagueoflegends.com/cdn/8.24.1/data/en_US/summoner.json'
print("Summoner Spells:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_version_JSON():
url = 'https://ddragon.leagueoflegends.com/api/versions.json'
print("Versions:")
print(url)
got = requests.get(url)
data = got.json()
with open('./JSON_Files/versions.json', 'w') as f:
json.dump(data,f,indent=2)
# latest version
version = data[0]
return data
# ---- Champion Patch 8.17.1 End Points ----
def get_input_champ_JSON(champion):
url = 'http://ddragon.leagueoflegends.com/cdn/8.24.1/data/en_US/champion/' + champion + '.json'
print("Single Champion Data:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_champs_JSON():
# up to date now
url = 'http://ddragon.leagueoflegends.com/cdn/8.24.1/data/en_US/champion.json'
print("Full Champion List JSON:")
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- Match V4 End Points ----
def get_match_history_JSON(region, account, beginIndex, endIndex, APIKey):
url = 'https://'+region+'.api.riotgames.com/lol/match/v4/matchlists/by-account/'+account+'?endIndex='+str(endIndex)+'&beginIndex='+str(beginIndex)+'&api_key='+APIKey
print("Match History:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_champ_match_history_JSON(region, account, champion, beginIndex, endIndex, APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/match/v4/matchlists/by-account/' + account + '?champion=' +str(champion)+ '&endIndex=' + str(endIndex) + '&beginIndex='+str(beginIndex)+'&api_key='+APIKey
print("Specific Champion Match History:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_match_JSON(region, matchID, APIKey):
url = 'https://'+ region + '.api.riotgames.com/lol/match/v4/matches/'+matchID+'?api_key='+APIKey
print("Single Match")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_match_timeline_JSON(region, matchID, APIKey):
url = 'https://'+region+ '.api.riotgames.com/lol/match/v4/timelines/by-match/'+matchID+'?api_key='+APIKey
print("Timeline of Match:")
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- Summoner V4 End Points ----
def get_single_sum_data_JSON(region, summoner, APIKey):
url = "https://" + region + ".api.riotgames.com/lol/summoner/v4/summoners/by-name/" + summoner + "?api_key=" + APIKey
print("Single Summoner:")
# print out url for desired api
print(url)
# retrieve json
got = requests.get(url)
# turns into json format
data = got.json()
return data
def get_sum_by_account_id_JSON(region, accountID, APIKey):
url = 'https://'+region+'.api.riotgames.com/lol/summoner/v4/summoners/by-account/'+accountID+'?api_key='+APIKey
print("Single Account:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_multi_account_data_JSON(region, accountIDs, APIKey):
count = 0
print("Multiple Accounts:")
# list to store json data
data_List = []
for account in accountIDs:
url = "https://" + region + ".api.riotgames.com/lol/summoner/v4/summoners/by-account/" + account + "?api_key=" + APIKey
# debug console statements
print(account)
print(url)
# retrieves data from API
got = requests.get(url)
# converts to json format
data = got.json()
# adds each json item to list
data_List.append(data)
# saves files cause it's easier to do this in this function than creating a separate list JSON file writer
with open('JSON_Files/' + accountIDs[count] + '.json', 'w') as f:
json.dump(data, f, indent=2)
# increments list
count += 1
return data_List
def get_multi_sum_data_JSON(region, summoners, APIKey):
count = 0
print("Multiple Summoners:")
# list to store json data
data_List = []
for summoner in summoners:
url = "https://" + region + ".api.riotgames.com/lol/summoner/v4/summoners/by-name/" + summoner + "?api_key=" + APIKey
# debug console statements
print(summoner)
print(url)
# retrieves data from API
got = requests.get(url)
# converts to json format
data = got.json()
# adds each json item to list
data_List.append(data)
# saves files cause it's easier to do this in this function than creating a separate list JSON file writer
with open('JSON_Files/' + summoners[count] + 'summoners.json', 'w') as f:
json.dump(data, f, indent=2)
# increments list
count += 1
return data_List
# ---- LoL-Status V3 End Points ----
def get_status_JSON(region, APIKey):
print("Getting LoL Service Status")
url = 'https://' + region + '.api.riotgames.com/lol/status/v3/shard-data?api_key=' + APIKey
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- Spectator Data V4 End Points ----
def get_spectator_JSON(region,summonerID,APIKey):
url = 'https://'+region+'.api.riotgames.com/lol/spectator/v4/active-games/by-summoner/'+summonerID+'api_key='+APIKey
print("Getting active spectator data:")
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- Champion Rotations V3 End Points ----
def get_champ_rotation_JSON(region, APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/platform/v3/champion-rotations?api_key=' + APIKey
print("Getting current weeks free champion rotation:")
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- Champion Master V4 End Points ----
def get_champion_mastery_by_summonerid_JSON(region,summonerID,APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/' + summonerID + '?api_key=' + APIKey
print("Getting all mastery entries sorted high-low:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_total_mastery_JSON(region,summonerID,APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/champion-mastery/v4/scores/by-summoner/' + summonerID + '?api_key='+APIKey
print("Getting total mastery:")
print(url)
got =requests.get(url)
data = got.json()
return data
def get_mastery_by_champion_and_summoner_id_JSON(region,summonerID,championID,APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/' + summonerID + '/by-champion/'+championID+'?api_key='+ APIKey
print("Getting specific champion mastery for a player:")
print(url)
got = requests.get(url)
data = got.json()
return data
# ---- League & Queue V4 End Points ----
def get_league_data_by_summoner_JSON(region, summonerID, APIKey):
url = "https://" + region + ".api.riotgames.com/lol/league/v4/positions/by-summoner/" + summonerID + "?api_key=" + APIKey
print("Single Summoner League Data:")
print(url)
# retrieve json
got = requests.get(url)
data = got.json()
return data
def get_challenger_JSON(region, queueName,APIKey):
url = 'https://'+region+'.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/'+queueName+'?api_key='+APIKey
print("Challenger League:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_grandmaster_JSON(region, queueName, APIKey):
url = 'https://'+region+'.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/'+queueName+'?api_key='+APIKey
print("Grand-Master League:")
print(url)
got = requests.get(url)
data = got.json()
return data
def get_master_JSON(region, queueName, APIKey):
url = 'https://' + region + '.api.riotgames.com/lol/league/v4/masterleagues/by-queue/' + queueName + '?api_key=' + APIKey
print("Masters League:")
print(url)
got = requests.get(url)
data = got.json()
return data
get_version_JSON()
accounts = ["<KEY>","<KEY>","<KEY>"]
api = "RGAPI-55f82a01-d2ec-4092-93da-6829172a436d"
# get_multi_account_data_JSON("na1",accounts,api)
account = "<KEY>"
print(get_match_history_JSON("na1", account,0,100,api))
print(get_champ_match_history_JSON("na1",account,516,0,100,api))
print(get_single_sum_data_JSON("na1","DrunkenSkarl",api))
# more functions coming... |
Family planning, antenatal and post partum care in multiple sclerosis: a review and update Multiple sclerosis is more prevalent in women of childbearing age than in any other group. As a result, the impact of multiple sclerosis and its treatment on fertility, planned and unplanned pregnancies, post partum care and breastfeeding presents unique challenges that need to be addressed in everyday clinical practice. Given the increasing number of diseasemodifying agents now available in Australia for the treatment of multiple sclerosis, there is a growing need for clinicians to provide their patients with appropriate counselling on family planning. Providing better evidence regarding the relative risks and benefits of continuing therapy before, during and after pregnancy is an important research priority. International pregnancy registries are essential in developing better evidencebased practice guidelines, and neurologists should be encouraged to contribute to these when possible. The management of women with multiple sclerosis, especially when they are taking diseasemodifying agents, requires careful assessment of fertility and disease characteristics as well as a multidisciplinary approach to ensure positive outcomes in both mothers and their children. |
<gh_stars>0
package com.mtx.javacommon.test;
/**
* 剑指offer面试题9:斐波那契数列
* Created by lishaoming on 17/12/13.
*/
public class TestFibonacci {
public static void main(String[] args) {
long time = System.currentTimeMillis();
System.out.println("开始:" + time);
int N = 7;
// System.out.println(method1(N));
System.out.println(method2(N));
long timeEnd = System.currentTimeMillis();
System.out.println("结束:" + timeEnd + ", 耗时:" + (timeEnd - time) + "ms");
}
// 重复节点过多,复杂度称指数上升
private static int method1(int n) {
if (n == 0) {
return 0;
}
if (n == 1) {
return 1;
}
return method1(n - 1) + method1(n - 2);
}
private static int method2(int n) {
int[] nums = new int[]{0, 1};
if (n < 2) {
return nums[n];
}
int num2 = nums[0];
int num1 = nums[1];
int num = 0;
for (int i = 2; i <= n; i++) {
num = num1 + num2;
num2 = num1;
num1 = num;
}
return num;
}
}
|
<filename>contrib/babelfishpg_tsql/src/err_handler.h<gh_stars>0
#ifndef ERR_HANDLER_H
#define ERR_HANDLER_H
#include "postgres.h"
extern int CurrentLineNumber; /* Holds the Line No. of the current query being executed. */
bool is_ignorable_error(int pg_error_code);
bool get_tsql_error_code(ErrorData *edata, int *last_error);
bool is_current_batch_aborting_error(int pg_error_code);
bool is_batch_txn_aborting_error(int pg_error_code);
bool ignore_xact_abort_error(int pg_error_code);
bool is_txn_aborting_compilation_error(int sql_error_code);
bool is_xact_abort_txn_compilation_error(int sql_error_code);
#endif
/* Macros for tsql error code */
#define SQL_ERROR_129 129
#define SQL_ERROR_132 132
#define SQL_ERROR_133 133
#define SQL_ERROR_134 134
#define SQL_ERROR_135 135
#define SQL_ERROR_136 136
#define SQL_ERROR_141 141
#define SQL_ERROR_142 142
#define SQL_ERROR_153 153
#define SQL_ERROR_180 180
#define SQL_ERROR_217 217
#define SQL_ERROR_219 219
#define SQL_ERROR_220 220
#define SQL_ERROR_232 232
#define SQL_ERROR_266 266
#define SQL_ERROR_289 289
#define SQL_ERROR_293 293
#define SQL_ERROR_306 306
#define SQL_ERROR_346 346
#define SQL_ERROR_352 352
#define SQL_ERROR_477 477
#define SQL_ERROR_487 487
#define SQL_ERROR_506 506
#define SQL_ERROR_512 512
#define SQL_ERROR_515 515
#define SQL_ERROR_517 517
#define SQL_ERROR_545 545
#define SQL_ERROR_547 547
#define SQL_ERROR_550 550
#define SQL_ERROR_556 556
#define SQL_ERROR_574 574
#define SQL_ERROR_628 628
#define SQL_ERROR_1034 1034
#define SQL_ERROR_1049 1049
#define SQL_ERROR_1051 1051
#define SQL_ERROR_1205 1205
#define SQL_ERROR_1505 1505
#define SQL_ERROR_1715 1715
#define SQL_ERROR_1752 1752
#define SQL_ERROR_1765 1765
#define SQL_ERROR_1768 1768
#define SQL_ERROR_1776 1776
#define SQL_ERROR_1778 1778
#define SQL_ERROR_1801 1801
#define SQL_ERROR_1946 1946
#define SQL_ERROR_2627 2627
#define SQL_ERROR_2714 2714
#define SQL_ERROR_2732 2732
#define SQL_ERROR_2747 2747
#define SQL_ERROR_2787 2787
#define SQL_ERROR_3609 3609
#define SQL_ERROR_3616 3616
#define SQL_ERROR_3623 3623
#define SQL_ERROR_3701 3701
#define SQL_ERROR_3723 3723
#define SQL_ERROR_3726 3726
#define SQL_ERROR_3728 3728
#define SQL_ERROR_3729 3729
#define SQL_ERROR_3732 3732
#define SQL_ERROR_3902 3902
#define SQL_ERROR_3903 3903
#define SQL_ERROR_3914 3914
#define SQL_ERROR_3930 3930
#define SQL_ERROR_4514 4514
#define SQL_ERROR_4708 4708
#define SQL_ERROR_4712 4712
#define SQL_ERROR_4901 4901
#define SQL_ERROR_4920 4920
#define SQL_ERROR_6401 6401
#define SQL_ERROR_8003 8003
#define SQL_ERROR_8004 8004
#define SQL_ERROR_8007 8007
#define SQL_ERROR_8009 8009
#define SQL_ERROR_8011 8011
#define SQL_ERROR_8016 8016
#define SQL_ERROR_8018 8018
#define SQL_ERROR_8023 8023
#define SQL_ERROR_8028 8028
#define SQL_ERROR_8029 8029
#define SQL_ERROR_8031 8031
#define SQL_ERROR_8032 8032
#define SQL_ERROR_8037 8037
#define SQL_ERROR_8043 8043
#define SQL_ERROR_8047 8047
#define SQL_ERROR_8050 8050
#define SQL_ERROR_8057 8057
#define SQL_ERROR_8058 8058
#define SQL_ERROR_8106 8106
#define SQL_ERROR_8107 8107
#define SQL_ERROR_8115 8115
#define SQL_ERROR_8134 8134
#define SQL_ERROR_8143 8143
#define SQL_ERROR_8152 8152
#define SQL_ERROR_8159 8159
#define SQL_ERROR_8179 8179
#define SQL_ERROR_9441 9441
#define SQL_ERROR_9451 9451
#define SQL_ERROR_9809 9809
#define SQL_ERROR_10610 10610
#define SQL_ERROR_10727 10727
#define SQL_ERROR_10733 10733
#define SQL_ERROR_10793 10793
#define SQL_ERROR_11555 11555
#define SQL_ERROR_11700 11700
#define SQL_ERROR_11701 11701
#define SQL_ERROR_11702 11702
#define SQL_ERROR_11703 11703
#define SQL_ERROR_11705 11705
#define SQL_ERROR_11706 11706
#define SQL_ERROR_11708 11708
#define SQL_ERROR_11709 11709
#define SQL_ERROR_11717 11717
#define SQL_ERROR_16915 16915
#define SQL_ERROR_16948 16948
#define SQL_ERROR_16950 16950
#define SQL_ERROR_18456 18456
|
Direct Synthesis of Co-doped Graphene on Dielectric Substrates Using Solid Carbon Sources Direct synthesis of high-quality doped graphene on dielectric substrates without transfer is highly desired for simplified device processing in electronic applications. However, graphene synthesis directly on substrates suitable for device applications, though highly demanded, remains unattainable and challenging. Here, a simple and transfer-free synthesis of high-quality doped graphene on the dielectric substrate has been developed using a thin Cu layer as the top catalyst and polycyclic aromatic hydrocarbons as both carbon precursors and doping sources. N-doped and N, F-co-doped graphene have been achieved using TPB and F16CuPc as solid carbon sources, respectively. The growth conditions were systematically optimized and the as-grown doped graphene were well characterized. The growth strategy provides a controllable transfer-free route for high-quality doped graphene synthesis, which will facilitate the practical applications of graphene. Electronic supplementary material The online version of this article (doi:10.1007/s40820-015-0052-6) contains supplementary material, which is available to authorized users. Introduction Graphene, a one-atom-thick layer of carbon with sp 2 hybrid orbital bonding and two-dimensional structure material, has attracted intense research interests due to its extraordinary physical and chemical characteristics, such as good mechanical strength, high carrier mobility, excellent electrical conductivity, superior thermal conductivity, and high transmittance. However, the nature of pristine graphene with zero band gap brings some difficulties for its application in the electronic device field. Among all the approaches to synthesize doped graphene, chemical vapor deposition (CVD) is the most popular method to obtain high-quality doped graphene in large scale by introducing copper or nickel foil as the catalyst and independent doping source (e.g., NH 3 as N doping source). Recently, carbon sources containing dopant element have been used to directly grow doped graphene by CVD method, avoiding the post-doping treatment or using dopant gases in the growth process. For example, Tour et al. demonstrated a new approach that large area, high-quality N-doped graphene with controllable thickness can be grown from different solid carbon sources such as polymer films or small molecules, deposited on a metal catalyst substrate at 800°C. Liu et al. developed a self-assembly approach that allows the synthesis of single-layer and highly nitrogen-doped graphene domain arrays by self-organization of pyridine molecules on the Cu surface. However, the graphene film obtained by these methods generally requires physical transfer onto the desired substrates for subsequent device processing, which could introduce the defects and contaminations into the graphene film. Recently, we have developed a new transfer-free approach capable of synthesizing graphene directly on dielectric substrates using polycyclic aromatic hydrocarbons (PAHs) as carbon sources. Significantly, N doping and patterning of graphene can be readily and concurrently achieved by this growth method. In this paper, we systematically investigate the factors that affect the growth quality of the doped graphene and optimized the growth conditions for high-quality doped graphene. Furthermore, we demonstrate that N, F-co-doped graphene can be synthesized using only 1,2,3,4,8,9,10,11,15,16,17,18,22,23,24, 25-Hexadecafluorophthalocyanine Copper(II) (F 16 CuPc) as solid carbon source and both N and F doping sources. Experimental Section The schematic of growth process of doped graphene directly on SiO 2 -layered Si (SiO 2 /Si) without transfer is shown in Fig. 1. First, the SiO 2 /Si (SiO 2 : 300 nm thick) substrate was ultrasonically cleaned by acetone, ethanol, and deionized water for 15 min, respectively. Then PAHs with planar structure (TPB or F 16 CuPc) were evaporated on the substrate as solid carbon sources by thermal evaporation system (Organic Evaporation Coating Machine ZZB-U500SA), followed by the Cu film layer deposition on the surface of PAHs as catalyst by electron-beam evaporation system (Kurt J. Lesker, PVD750). After annealing in a tube furnace under Ar gas flow at *1.8 9 10 2 Pa, doped graphene was synthesized between the Cu layer and the substrate. At last, Cu layer was etched away by Marble's reagent (CuSO 4 :HCl:H 2 O = 10 g:50 mL:50 mL), then doped graphene was obtained directly on SiO 2 substrate without any transfer process. The morphology of doped graphene was characterized by scanning electron microscopy (SEM) (FEI Quanta 200F). Raman spectra were recorded at room temperature using a Jobin-Yvon HR800 Raman microscope with laser excitation at 514 nm. Optical images were obtained using Fluorescence optical microscope (DM4000M). The HR-TEM images were taken by transmission electron microscope (TEM) (Tecnai G2 F20). The surface state and electron structure of the samples were studied by X-ray photoelectron spectroscopy (XPS) measurement (Kratos AXIS UltraDLD ultrahigh vacuum (UHV) surface analysis system), using Al Ka X-rays (1486 eV) as the excitation source. The optical transmittance spectrum and sheet Result and Discussion Carbon source is an important factor in graphene synthesis. We found that planar configuration of PAHs might provide a hexagonal honeycomb skeleton structure for the graphene growth and the growth mechanism from PAHs maybe involves surface-mediated nucleation process of dehydrogenated PAHs catalyzed by Cu rather than segregation or precipitation process of small carbon species that decomposed from the precursors. Therefore, planar PAHs that contain heteroatoms (e.g., nitrogen, boron, fluorine) were chosen as solid carbon sources for doped graphene growth in our work. In addition to the specific structure of solid carbon sources, there are some other key factors to control the quality of doped graphene, such as the thickness of solid carbon sources, the thickness of Cu film layer, annealing time, annealing temperature, etc. Hence, in order to achieve high-quality doped graphene, optimal conditions for doped graphene growth had been investigated by rationalizing the above factors. 2, 4, 6-triphenylborazine (TPB) with planar configuration was selected as the solid carbon source to evaluate the growth conditions of the doped graphene. The thickness effect of TPB on the quality of graphene was investigated firstly. The Raman spectra shown in Fig. S1a reveal that the optimum thickness of TPB layer is 5 nm. When the thickness of TPB is less than 5 nm, the carbon source cannot form continuous film on the substrate, which could result in the formation of discontinuous graphene. While the thickness of TPB is greater than 5 nm, the excessive amount of carbon source leads to multilayer graphene or amorphous carbon formation due to the extremely low solubility of C in Cu. Different annealing temperatures were also investigated for the growth of TPB-derived doped graphene. In general, the growth temperature, in conventional CVD method, required to synthesize good-quality graphene is 1000-1050°C. Figure S1b shows the Raman spectra of graphene synthesized at different growth temperatures, suggesting that graphene can be obtained above 950°C. Annealing temperature below 650°C results in the deposition of amorphous carbon, as characterized by the broad D and G bands and a very weak 2D band shown in Fig. S1b. When the annealing temperature was increased to 1050°C, the obtained graphene layer also has a larger D band than that grown at 1000°C in the Raman spectra. It has probably arisen from the partial evaporation of thin Cu film at 1050°C. Subsequently, different annealing times were studied. As shown in Fig. S1c, higher quality doped graphene with lower I D /I G and higher I 2D /I G ratio can be achieved when the annealing time is 60 min. The effect of Cu film thickness on doped graphene growth was investigated as well. When Cu film thickness is above 100 nm, the graphene film can be obtained. However, when the thickness of the Cu film was decreased below 100 nm, most of the Cu was evaporated during the annealing process at 1000°C and resulted in discontinuous doped graphene. In addition, the graphene formed on the top surface of Cu was observed when a thin Cu layer was used. When the thickness of the Cu film was increased to 1000 nm, relatively high-quality doped graphene was obtained indicated by the G/2D ratio (*0.3), D/G ratio (*1.3), and FWHM of the 2D (*42 cm -1 ) band in Fig. S1d. Thus the optimal growth conditions for the doped graphene growth from TPB were set at 5 nm TPB as carbon source, 1000 nm Cu film on the top surface, and annealing temperature of 1000°C for 60 min. Figure 2a shows the optical image of the doped graphene grown on SiO 2 /Si substrate at the optimal condition using TPB as the carbon source. The continuous film with almost no contrast indicates that the graphene is distributed uniformly on the dielectric substrate. The corresponding Raman spectrum in Fig. 2b shows a weak D band, revealing that the graphene film is almost defect free and the weak D band may have arisen from the doping effect. The G/2D ratio is *0.25 and the 2D peak is sharp and symmetric, indicating that the obtained graphene is monolayer. The monolayer graphene is also confirmed by AFM measurement as shown in Fig. S2. A small D' band beside G band confirms that doped graphene has been obtained. Figure S3 shows micro-Raman mapping for the 2D graphene peak, further indicating that graphene film is distributed uniformly on the substrate. Figure 2c shows the high-resolution XPS scan of N 1s centered at 400.7 eV, further confirming that N-doped graphene was obtained under this optimal condition. All the results demonstrate that planar configuration of PAHs precursor containing dopant elements promotes the formation of doped graphene. The atomic concentration of N in TPB-derived doped graphene is about 1.74 % from the data of XPS survey scan. No B 1s peak was observed for this sample, which is probably owing to the difficulty of B-C bonding formation in graphene film at the present condition. In order to achieve co-doping in graphene by this method, F 16 CuPc of 5 nm was used as the carbon source to prepare N, F-co-doped graphene on the SiO 2 /Si substrate at 1000°C for 60 min. F 16 CuPc is also a PAH compound with a planar structure. The Raman spectrum of the product (Fig. 3a) shows a large D peak and a small D' peak, which may be induced by N and F doping atoms. The G/2D ratio is *1.5 and the 2D peak is lower and broader than that of the single layer graphene, indicating that the obtained doped graphene film is of 3-4 layers. Figure 3b shows SEM image of F 16 CuPc-derived doped graphene. It can be found that thin graphene film is homogeneously distributed on the substrate. Moreover, it can be clearly distinguished from the HR-TEM image shown in Fig. 3c that the doped graphene film is of three layers, which is consistent with Raman analysis. XPS investigation further verifies that N and F codoping has been achieved in the graphene. XPS spectra of N, F-co-doped graphene are shown in Fig. 4. Figure 4a shows the full XPS spectrum of F 16 CuPc-derived doped graphene on the SiO 2 /Si substrate. There is no signal of Cu, indicating the clear removal of Cu after etching. Both the nitrogen-and fluorine-related peaks are obviously found in the survey scan, which confirms the successful co-doping of N and F in the graphene film. The atomic concentration of N and F for F 16 CuPc-derived doped graphene is about 2.98 and 0.66 %, respectively. The characteristic XPS C 1s core-level spectrum (Fig. 4b) is assigned as sp 2 carbon (284.4 eV), confirming the graphitic structure of the as-grown graphene grains. The shoulder around 285.5 and 286.6 eV can be assigned to the C-N and C-F bonding, respectively. Figure 4c shows the high-resolution XPS scan of N 1s, suggesting two types of N-C bonding: ''graphitic'' N centered at 401.1 eV and ''pyridine'' N centered at 399.2 eV. The ratio of two types N indicates that they are mainly bonded to three adjacent carbons, suggesting that the N atoms are uniformly bound to the graphene structure. The high-resolution XPS scan of F 1s shows a single symmetric peak centered at 689.1 eV in Fig. 4d, which is assigned to C-F covalent bond. Figure 5 shows the result of optical transmittance measurement for the N, F-co-doped graphene directly grown on quartz in the same condition as using SiO 2 /Si substrate, exhibiting a high optical transmittance of (a) (b) (c) *93 % at 550 nm, even though the doped graphene film is 3-4 layers. The sheet resistance (R s ) obtained from fourpoint probe measurement is *2.5 kX (sq) -1, revealing that the as-grown N, F-co-doped graphene film is of high conductivity. Conclusions In summary, a facile method for high-quality synthesis of doped graphene film on the dielectric substrate has been developed. PAHs containing dopant elements with planar configuration were used as both carbon feedstocks and doping sources and a layer of Cu film as the catalyst. The thickness of Cu layer and PAHs, the annealing time, and temperature are optimized for high-quality graphene growth. N-doped and N, F-co-doped graphene have been synthesized using TPB and F 16 CuPc as solid carbon sources, respectively. The properties of the as-grown samples were well studied and N, F-co-doped graphene exhibits a high optical transmittance and low sheet resistance. The present growth strategy provides a controllable transferfree route for high-quality doped graphene growth, which The optical transmittance spectrum of the F 16 CuPc-derived doped graphene on a quartz wafer. Inset is the sheet resistance measured by four-point probe measurement will facilitate the practical electronic applications of graphene. |
NASA's Kepler spacecraft hunting for Earth-like planets aroundother stars has found 706 candidates for potential alien worlds while gazing atmore than 156,000 stars packed into a single patch of the sky.
If all 706 of these objects pass the stringent follow-up teststo determine if they are actually planets, and not false alarms, they could nearlytriple the current number of known extrasolar planets. They were announced aspart of a huge release of data from the mission's first 43 days by NASA's Kepler science team this week.
The Keplerspace observatory monitors stars for subtle changes in their brightness,which could indicate the presence of alien planets passing in front of them asseen from Earth. Astronomers will use the newly-released data from Kepler todetermine if orbiting planets are responsible for the variation in brightnessof several hundred stars.
?"This is the most precise, nearly continuous, longestand largest data set of stellar photometry ever," said David Koch, themission's deputy principal investigator at NASA's Ames Research Center inMoffett Field, Calif., in a statement. "The results will only get betteras the duration of the data set grows with time."
To date, astronomers have discovered more than 400 alienplanets lurking around stars beyond our solar system. That includes sixnewfound worlds discovered by a French observatory that were announced earlierthis week.
Kepler currently monitors a star field in the constellationsCygnus and Lyra. The stars make up a full range of temperatures, sizes andages. Many of them are stable, but others pulsate.
Some of the stars show starspots,which are similar to sunspots, and a few even produce flares that are sopowerful they would sterilize their nearest planets, should any exist.
In this particular star field, Kepler has identified 706planetary candidates, of which the data for 306 of these were part of thepublic data release this week.
The 28 members of the Kepler science team are usingground-based telescopes, the Hubble Space Telescope and the Spitzer SpaceTelescope to perform follow-up observations on a specific set of 400 objects thatwere not publicly released to double-check if they are good candidates foralien planets.
Data from these follow-up observations will determine whichof the objects of interest can be identified as planets. These findings willsubsequently be released to the scientific community in February 2011.
Follow-up observations are necessary in order to distinguishcandidates that are actual planets from false alarms, such as binarystars, which are two stars that orbit each other.
"For the most interesting objects, we go through aprocess of putting the data through a series of sieves," Charles Sobeck, Kepler'sdeputy project manager, told SPACE.com. "For final candidates that havepassed all the tests, we then go to the expensive resources like Hubble andSpitzer."
The size of planetary candidates can also only beapproximated until the size of the stars they orbit is determined fromadditional spectroscopic observations made by ground-based telescopes.
"I look forward to the scientific community analyzingthe data and announcing new exoplanet results in the coming months," saidLia LaPiana, Kepler's program executive at NASA Headquarters in Washington,D.C., in a statement.
The Kepler observatory will continue conducting scienceoperations until at least November 2012. It will also continue searching for Earth-likeplanets, including those that orbit stars in a warm, habitable zone whereliquid water could exist on the surface of alien planets. ?
And, since transits of planets within this habitable zone ofsolar-like stars occur about once a year and require three transits forverification, it is expected to take at least three years to locate and verifyany potential Earth-size planet.
"The Kepler observations will tell us whether there aremany stars with planetsthat could harbor life, or whether we might be alone in our galaxy,"said Kepler's science principal investigator William Borucki of NASA's AmesResearch Center.
So far, Kepler's observations have produced a wealth ofinformation, and it has surpassed the expectations of its mission scientists,Borucki said.
"We never thought we'd have this much this early, it'sabsolutely wonderful," Borucki told SPACE.com. "The instruments areworking well, but we still have some work to do. We're certainly not finishedwith this kind of work, and each year, we go to more and more difficult targets.So, people have to be patient." |
<filename>7/7.1.cpp
//
// 7.1.cpp
// c++1x tutorial
//
// created by changkun at shiyanlou.com
//
// 线程支持库
#include <iostream>
#include <future>
#include <thread>
void foo() {
std::cout << "hello world" << std::endl;
}
void foo2() {
// 将一个返回值为7的 lambda 表达式封装到 task 中
// std::packaged_task 的模板参数为要封装函数的类型
std::packaged_task<int()> task([](){return 7;});
// 获得 task 的 future
std::future<int> result = task.get_future(); // 在一个线程中执行 task
std::thread(std::move(task)).detach(); std::cout << "Waiting...";
result.wait();
// 输出执行结果
std::cout << "Done!" << std:: endl << "Result is " << result.get() << '\n';
}
int main() {
std::thread t(foo);
foo2();
t.join();
return 0;
}
|
<reponame>delineateio/hashicorp-consul-kv-example<gh_stars>0
package server
import (
"fmt"
"strconv"
"delineate.io/customers/src/config"
_ "delineate.io/customers/src/docs"
"delineate.io/customers/src/logging"
"delineate.io/customers/src/routes"
)
const defaultHost = "localhost"
const defaultPort = 1102
func Start() {
router := routes.NewRouter()
host := config.GetStringOrDefault("server.host", defaultHost)
port := config.GetStringOrDefault("server.port", strconv.Itoa(defaultPort))
address := host + ":" + port
logging.Info(fmt.Sprintf("attempting to start server on '%s'", address))
if err := router.Run(address); err != nil {
logging.Err(err)
}
}
|
Psychogenic non-epileptic seizures: our video-EEG experience Abstract Objective: The aim of our study was to assess the number of psychogenic non-epileptic seizures (PNES) in our patients with a refractory seizure disorder, to determine the typical PNES semiology using video-EEG monitoring and describe other PNES parameters. Methods: We evaluated prospectively 596 patients with pharmacoresistant seizures. All these patients underwent continuous video-EEG monitoring. In consenting patients, we used suggestive seizure provocation. We assessed seizure semiology, interictal EEG, brain MRI, psychiatric co-morbidities, personality profiles, and seizure outcome. Results: In the sample of 596 monitored patients, we detected 111 (193%) patients with PNES. Of the 111 patients with PNES, 865% had spontaneous and 765% had provoked seizures. The five most typical symptoms were: initially closed eyelids (676%), rapid tremor (477%), asynchronous limb movement (378%), preictal pseudosleep (333%), and side-to-side head movement (324%). Interictal EEG was rated as abnormal in 462% and with epileptiform abnormality in 9%. Brain MRI was abnormal in 32 (288%) patients. Personality disorders (468%), anxiety (396%), and depression (126%) were the most frequent additional psychiatric co-morbidities. PNES outcome after at least 2 years is reported; 225% patients was seizure-free; one-third had markedly reduced seizure frequency. We have not seen any negative impact of the provocative testing on the seizure outcome. Discussion: Video-EEG monitoring with suggestive seizure provocation supported by clinical psychiatric and psychological evaluation significantly contributes to the correct PNES diagnosis, while interictal EEG and brain MRI are frequently abnormal. Symptoms typical for PNES, as opposed to epileptic seizures, could be distinguished. |
// aop id defined via application-context.xml. the annotations are only for test purposes
@Aspect
public class RoleAuthorizationHandler {
private static final Logger log = Logger.getLogger(RoleAuthorizationHandler.class);
private final ConfigurationManager configurationManager = ConfigurationManager.getConfigurationManager();
@Before("@annotation(permissions)")
public void authorizeRole(JoinPoint joinPoint, PermissionAllowed permissions) {
if (isPermissionAuthenticationNeeded()) {
String methodName = joinPoint.getSignature().toShortString();
HttpServletRequest request = ((BeGenericServlet) joinPoint.getThis()).getServletRequest();
String[] perms = permissions.value();
logAuth(methodName, perms, true, null);
for (String perm : perms) {
if (request.isUserInRole(getFullPermission(perm))) {
logAuth(methodName, perms, false, true);
return;
}
}
logAuth(methodName, perms, false, false);
throw new ByActionStatusComponentException(ActionStatus.AUTH_FAILED);
}
}
private void logAuth(String methodName, String[] perms, boolean beforeAuth, Boolean success) {
if (beforeAuth) {
log.trace("#{} - authorizing before invoking endpoint {}", methodName);
} else {
String status = success ? "SUCCESS" : "FAILED";
log.trace("#{} - authorizing before invoking endpoint {}, Status: {}", methodName, status);
}
}
private String getFullPermission(String role) {
return AafPermission.getEnumByString(role).getFullPermission();
}
private boolean isPermissionAuthenticationNeeded() {
if (configurationManager.getConfiguration().getAafAuthNeeded() && ThreadLocalsHolder.isExternalRequest()) {
return true;
} else {
return false;
}
}
} |
/**
* This class implements a universal parser, using the parser
* registry to find an appropriate parser for extracting an
* XES model from any given file. May be used as a convenience
* method for applications.
*
* @author Christian W. Guenther ([email protected])
*/
public class XUniversalParser {
/**
* Checks whether the given file can be parsed by any parser.
*/
public boolean canParse(File file) {
for(XParser parser : XParserRegistry.instance().getAvailable()) {
if(parser.canParse(file)) {
return true;
}
}
return false;
}
/**
* Attempts to parse a collection of XES models
* from the given file, using all available parsers.
*/
public Collection<XLog> parse(File file) throws Exception {
Collection<XLog> result = null;
for(XParser parser : XParserRegistry.instance().getAvailable()) {
if(parser.canParse(file)) {
try {
result = parser.parse(file);
return result;
} catch(Exception e) {
// ignore and move on
}
}
}
throw new Exception("No suitable parser could be found!");
}
} |
There is a class of computer systems which execute requested jobs by using a plurality of processors, processor cores, or computing nodes managed as computing resources. For example, a computer system designed for high performance computing (HPC) includes a plurality of computing nodes as the resources for execution of jobs. Also included is a managing node that manages the schedule of jobs executed on the computing nodes. This managing node performs scheduling of jobs so as to use the computing nodes in an efficient way.
The noted computer system executes various kinds of jobs, which may be categorized into serial jobs and parallel jobs. Serial jobs are executed on a single computing resource. Parallel jobs are executed on a plurality of computing resources in a parallel fashion. In the context of parallel job processing, the term “degree of parallelism (DOP)” refers to the number of computing resources used concurrently to execute a parallel job. Different jobs may take different lengths of time to execute. Some jobs finish in a relatively short time (e.g., a few minutes to a few hours), while other jobs consume a relatively long time (e.g., a few days to a few weeks).
In view of the above aspects of jobs, job scheduling is performed for each computing resource on the basis of, for example, the types of jobs (serial or parallel), the degree of parallelism in the case of parallel jobs, and the maximum execution time of each job. One proposed scheduling system is designed to achieve a high usage ratio of at least one central processing unit (CPU). Another proposed system improves the efficiency of job scheduling, taking advantage of checkpoint and restart services. The checkpoint and restart functions enable an ongoing job to stop at a certain checkpoint and restart afterwards from that checkpoint. See, for example, the following documents:
Japanese Laid-open Patent Publication No. 2010-182199
Duell, J.; Hargrove, P.; and Roman, E., “Requirements for Linux Checkpoint/Restart,” Berkeley Lab Technical Report (publication LBNL-49659), May 2002
As mentioned above, jobs may be suspended in the middle of their execution for the purpose of efficient scheduling. While some users can enjoy the advantages of this job suspension, other users may suffer a loss. For example, a typical job scheduler coordinates execution of jobs in such a way that the jobs are executed in the order they are requested. This orderly job execution is no longer the case when the scheduler is allowed to use job suspension. That is, the resulting schedule may stop an earlier-arriving job to execute a later-arriving job, and when this happens, the user who has requested the former job suffers a loss of time. Frequent occurrence of such swapping of execution order would lead to inequality of services to the users.
As can be seen from the above, the use of job suspension in scheduling is expected to improve the usage ratio of computing resources while it could bring some loss to the users. Conventional schedulers, however, suspend jobs without sufficient consideration of a balance between the expected improvement of resource usage and the risk of loss. |
import {entryListComponent} from "./entryList.directive";
import "../overview/overview.module.js";
import {EntryListController} from "./entryList.controller";
import {EntryListService} from "./entryList.service";
angular
.module('xr.entryList', ['xr.overview'])
.service('EntryListService', EntryListService)
.controller('EntryListController', EntryListController)
.component('entryList', entryListComponent);
|
import batou
import batou.component
import batou.lib.file
class ACL(batou.component.Component):
path = ''
# A list of rules for setfacl. Each rule e.g. user::rwx needs to be
# one element of the list.
ruleset = []
def update(self):
proc = self.cmd('setfacl --set-file=- "{}"'.format(self.path),
communicate=False)
outs, errs = proc.communicate(input='\n'.join(self.ruleset))
def verify(self):
proc = self.cmd(
'getfacl -cpE {}'.format(self.path),
communicate=False)
outs, errs = proc.communicate()
if sorted(outs.strip().split('\n')) != sorted(self.ruleset):
raise batou.UpdateNeeded
|
Sylvester Heereman
Sylvester Heereman van Zuydtwyck (born 10 September 1974) is the former Vicar General of the Catholic congregation, the Legion of Christ.
Family
Heereman was born in Bad Neustadt an der Saale, Bavaria, on 10 September 1974.
Seminary and priesthood
In 1994 Heereman entered the Legion's novitiate in Germany. He was ordained on 23 December 2006. From 2007-11 he was the territorial director of Germany, and then Northern and Central Europe.
Vicar General of the Legion and Regnum Christi
On 16 February 2012 Heereman was named to his new post of Vicar General by Cardinal Velasio de Paolis. The Vicar General is second in command after the General Director. He had replaced Father Luis Garza, who resigned to become territorial director of North America, and he was named after consulting the members of the Legion of Christ.
As of 15 October 2012, Heereman assumed the responsibility for running the Legion until the convocation of a General Chapter, as the former General Director, Álvaro Corcuera, was on sabbatical for health reasons. In two letters, dated 9–10 October, both Álvaro and Cardinal Velasio De Paolis (the Legion's Papal delegate) announced the sabbatical. |
The myocardial protective effects of puerarin on STZ-induced diabetic rats. To investigate the myocardial protective effects of puerarin on streptozotocin (STZ)-induced diabetic rats and the possible mechanism were involved. 45 Sprague-Dawley male rats were randomly divided into 3 groups as diabetic group (intraperitoneally injected STZ 65 mg/kg), puerarin treatment group (intraperitoneally injected STZ 65 mg/kg, and intraperitoneally injected puerarin 100 mg/kg/day for 4 weeks), and control group (intraperitoneally injected saline 6 ml/kg). Four weeks after the model induction, the myocardial changes were observed by H-E stain and Transmission electron microscopy, the alteration of thrombospondin-1 (TSP-1) protein and mRNA expression in the myocardium were also assessed by immunohistochemistry and real-time PCR. The heart function of three groups' rats was tested by Langendorff isolated in vivo heart perfusion. The differences in the data of weight and blood sugar of diabetic between puerarin treatment and normal groups were significant after 4 weeks (P<0.01). Our results demonstrated that diabetic myocardial ultrastructural changes included myofibrillar disarrangements and mitochondria disruption. These damages were significantly less severe in the puerarin treatment group compared with the diabetic group. A significant decrease of TSP-1 expression was observed in the puerarin treated rats' myocardium compared to the diabetic rats (P<0.01). Left ventricular systolic end pressure (LVSEP) and left ventricular developed pressure (LVDP) of puerarin treatment group were also significantly increased compared to diabetic group (P<0.01). Altogether puerarin could improve the left ventricular function of diabetic rats and showed protective effects of myocardium by decreasing the TSP-1 expression in myocardium of diabetic rats. |
<filename>sentinel-core/src/main/java/com/alibaba/csp/sentinel/eagleeye/TokenBucket.java<gh_stars>1000+
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.csp.sentinel.eagleeye;
import java.util.concurrent.atomic.AtomicLong;
class TokenBucket {
private final long maxTokens;
private final long intervalMillis;
private volatile long nextUpdate;
private AtomicLong tokens;
public TokenBucket(long maxTokens, long intervalMillis) {
if (maxTokens <= 0) {
throw new IllegalArgumentException("maxTokens should > 0, but given: " + maxTokens);
}
if (intervalMillis < 1000) {
throw new IllegalArgumentException("intervalMillis should be at least 1000, but given: " + intervalMillis);
}
this.maxTokens = maxTokens;
this.intervalMillis = intervalMillis;
this.nextUpdate = System.currentTimeMillis() / 1000 * 1000 + intervalMillis;
this.tokens = new AtomicLong(maxTokens);
}
public boolean accept(long now) {
long currTokens;
if (now > nextUpdate) {
currTokens = tokens.get();
if (tokens.compareAndSet(currTokens, maxTokens)) {
nextUpdate = System.currentTimeMillis() / 1000 * 1000 + intervalMillis;
}
}
do {
currTokens = tokens.get();
} while (currTokens > 0 && !tokens.compareAndSet(currTokens, currTokens - 1));
return currTokens > 0;
}
}
|
Mother’s Day is Sunday, May 14, and among the places rolling out special meals for moms are Southern California casinos.
Here are some ideas for brunch and moreto say thanks to mom.
84245 Indio Springs Parkway, Indio. 760-342-5000, www.fantasyspringsresort.com.
Fresh Grill Buffet: Made-to-order omelets and egg and cheese-stuffed mini croissants are among the brunch options. Other items include menudo, Maryland shrimp bisque and carved meats including prime rib, ham and turkey. As always, the chocolate fountain is there for dessert. Brunch is 10 a.m.-3 p.m. and dinner is 3 -9 p.m. $33.99 for adults and $15.99 for children.
Pom: The special of the day is Lobster Milanese ($44). The restaurant will also offer its regular lunch and dinner menus. Lunch is noon-4 p.m. and dinner starts at 5 p.m. Reservations are recommended.
The Bistro: The resort’s fine dining restaurant will offer a special broiled seafood entree ($50) that includes with Chilean sea bass, jumbo gulf prawn, Cape Cod diver scallop a Maryland crab cake and half of a South African lobster tail. Dinner, which will also feature the regular menu, starts at 5 p.m. and reservations are recommended.
777 Harrah’s Rincon Way, Valley Center. 760-751-3100, www.harrahssocal.com.
The Buffet: There will be breakfast and lunch stations as well as spots for carved meats and international fare. Sunday hours are 9 a.m.-3:30 p.m. and 4:30-9 p.m.
The Café: Specials of the day at the 24-hour spot include New York steak, garlic buttered shrimp, roasted fingerling potatoes, seasoned vegetables, soup or salad and red velvet cake for dessert.
Fiore Steakhouse: A three-course prix fixe menu includes an entree choice of grilled shrimp Étouffée or petit filet mignon. Reservations are recommended. Sunday hours are 5:30-9 p.m.
49500 Seminole Drive, Cabazon. 800-252-4499, www.morongocasinoresort.com.
Give mom the gift of bubbles on Mother’s Day. Morongo will host the Mumm’s Day Brunch in the Grand Ballroom. The buffet will feature unlimited pours of Mumm’s Napa Sparkling Wine as well as a menu that includes oysters, crab claws, prime rib, eggs benedict, brioche French toast and more.
The brunch also includes entertainment from Nick Costa and the George Shelby Orchestra.
The festivities start at 1 p.m. Tickets are $65 for adults, $20 for children ages 4 to 11 and free for kids 3 and younger. Reservations are recommended and can be made by calling 866-328-2018.
11154 Highway 76, Pala. 760-510-5100, www.palacasino.com.
Pala’s buffet Choices will offer more options on Mother’s Day, including steamed-to-order Alaskan King Crab, prime rib, a seafood saute station, sushi and more. That’s in addition to its eight usual stations from 9 a.m.-9 p.m. There will also be a special brunch menu from 9 a.m.- 1 p.m.
The buffet is $46, but there are discounts for Pala Privileges card holders.
45000 Pechanga Parkway, Temecula. 888-732-4264, www.pechanga.com.
Pechanga Buffet: In addition to its regular menu, the buffet will also feature brunch items such as stuffed eggs benedict, huevos rancheros sope and French toast stuffed with strawberries. The regular stations include fresh seafood and more. Brunch runs 9 a.m.-2 p.m. and the all-day menu runs from 9 a.m.-10 p.m. $32.99 for adults; $14.99 for children 3-11; free for kids under 3.
Great Oak Steakhouse: Open from 5-10 p.m. for dinner, the $58 prix fixe menu includes curried carrot bisque or a wedge salad, a porcini mushroom-crusted filet mignon and a dessert of champagne gelée, raspberry and white chocolate mousse. The restaurant’s regular dinner menu will also be available and reservations are recommended.
Paisano’s: Also open from 5-10 p.m., the three-course prix fixe meal is $48 per person and includes a beef carpaccio appetizer, pan-seared sea bass with tomato lobster broth as the main course and dessert of a frozen champagne soufflé with strawberry sauce. The restaurant’s regular dinner menu will also be available and reservations are recommended.
Kelsey’s: A casual dining spot at Pechanga, Kelsey’s specials include a grilled lamb chop appetizer ($9.75), a parmesan-crusted halibut entree ($26) and a chocolate molten lava cake with raspberry coulis for dessert ($5.95). Kelsey’s regular menu will also be available. 11 a.m.-10 p.m.
Umi Sushi & Oyster Bar: Among the day’s specials are a beef stew appetizer ($9), eel, sweet shrimp, salmon and ikura sushi ($18), a seafood trio pasta with scallops, shrimp and crab ($35) and a dessert of pavlova red berries and vanilla jus ($9). Umi is open 5-10 p.m.
Pechanga Café: The 24-hour spot will feature shrimp scampi ($17.95) and strawberry shortcake ($5.95).
Journey’s End: The day’s buffet will include made-to-order omelet and pancake stations, salad and carving stations, a raw bar and more. $48 for adults; $25 for children. Normal Sunday hours are 7 a.m.- 6 p.m.
Vanessa Franko is the Digital Director of Entertainment for the Southern California News Group. The lure of palm trees and covering pop culture brought her to The Press-Enterprise in Riverside in 2006. Vanessa has reported on everything from the Palm Springs International Film Festival to the MLB All-Star Game as a reporter, photographer, videographer and on-camera personality. She's won awards for her coverage of the Coachella Valley Music and Arts Festival and for crime reporting in her home state of Maryland. Vanessa studied multimedia storytelling as a Knight Digital Media Center fellow in Dec. 2011 and has taught college courses in digital journalism. She's seen shows at every major concert venue in Southern California, but most special was when Paul McCartney played the high-desert roadhouse Pappy & Harriet's in Pioneertown for a couple hundred fans in Oct. 2016. Her album collection numbers in the thousands (including a couple hundred on vinyl) and when she isn't hunting for records, she and her husband like to check out the best in Southern California craft beer and watch sports. She also had a cameo in the 1992 Atlanta Braves highlight film, Lightning Strikes Twice! |
Learning clinical reasoning Errors in clinical reasoning continue to account for significant morbidity and mortality, despite evidencebased guidelines and improved technology. Experts in clinical reasoning often use unconscious cognitive processes that they are not aware of unless they explain how they are thinking. Understanding the intuitive and analytical thinking processes provides a guide for instruction. How knowledge is stored is critical to expertise in clinical reasoning. Curricula should be designed so that trainees store knowledge in a way that is clinically relevant. Competence in clinical reasoning is acquired by supervised practice with effective feedback. Clinicians must recognise the common errors in clinical reasoning and how to avoid them. Trainees can learn clinical reasoning effectively in everyday practice if teachers provide guidance on the cognitive processes involved in making diagnostic decisions. |
Unilever has appointed Charlotte Carroll as its communications director for the UK and Ireland as it seeks to promote its green credentials.
A key part of her role will include communications for the firm's sustainable living plan, under which it aims to double the size of its business and halve its environmental impact by 2020.
She will oversee communications to internal and external audiences, such as employees, media, the British government, and NGOs. This includes those relating to Unilever's 11 factories and its research and development base in the UK and Ireland, which make 80% of the products it sells in the UK.
Before her appointment, Carroll worked for the company on a freelance basis as change communications director, overseeing a restructuring program that will see the closure of four plants and the loss of 500 jobs by the end of 2013, as well as the creation of a state-of-the-art “green” manufacturing hub in Port Sunlight.
Prior to this, she held internal communications consultancy roles with the BBC and Siemens. She also worked as communications manager at Unilever from 2001 to 2006.
Carroll replaces Helene Bradley-Ritt, who left to spend more time with her family. |
<filename>book/_build/jupyter_execute/notebooks/Introduction to GeoPandas.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Introduction to GeoPandas
# In[1]:
import geopandas as gpd
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter("ignore")
# ## Read Shape Files into GeoPandas
# In[2]:
# read file
data = gpd.read_file(r'../shapefiles/districts.shp')
# In[3]:
# show data
data
# In[4]:
# type of dataframe
type(data)
# In[5]:
# plot the map
data.plot()
# In[6]:
# color
data.plot(color = "red")
# In[7]:
# edge color
data.plot(color = "red", edgecolor='black')
# In[13]:
# cmap: https://matplotlib.org/2.0.2/users/colormaps.html
data.plot(color = "red", edgecolor='black', cmap='jet')
# In[9]:
# color by column
data.plot(color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
# In[15]:
# read area of interest
area_of_interest = gpd.read_file(r'../shapefiles/area_of_interest.shp')
# In[16]:
area_of_interest.plot()
# In[17]:
# Plot the figure side by side
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10,8))
data.plot(ax = ax1, color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
area_of_interest.plot(ax = ax2, color='green')
# In[21]:
# Plot the figure side by side
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(10,8))
data.plot(ax = ax1, color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
area_of_interest.plot(ax = ax2, color='green')
plt.axis('off')
plt.show()
# In[35]:
# Plotting multiple layers: area of interest
fig, ax = plt.subplots(figsize=(10,6))
data.plot(ax = ax, color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
area_of_interest.plot(ax = ax, color='green')
# In[36]:
# Plotting multiple layers: area of interest
fig, ax = plt.subplots(figsize=(10,6))
data.plot(ax = ax, color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
area_of_interest.plot(ax = ax, color='none', edgecolor = 'black')
# In[37]:
atms = gpd.read_file(r"../shapefiles/atms.shp")
# In[42]:
# Plotting multiple layers: area of interest
fig, ax = plt.subplots(figsize=(10,6))
data.plot(ax = ax, color = "red", edgecolor = 'black', cmap= 'jet', column = 'districts')
area_of_interest.plot(ax = ax, color='none', edgecolor = 'black')
atms.plot(ax = ax, color = "black", markersize = 16)
# In[43]:
data.crs
# In[44]:
area_of_interest.crs
# In[45]:
atms.crs
# In[53]:
# Reprojecting: coordinate reference system
districs = data.to_crs(epsg = 32629)
area_of_interest = area_of_interest.to_crs(epsg = 32629)
# In[47]:
districs.crs
# In[49]:
districs.plot(figsize = (10,6))
# In[54]:
# overlays
df = gpd.overlay(districs, area_of_interest, how="intersection")
# In[58]:
df.plot(edgecolor = "red", figsize = (10,8))
|
Nursing knowledge of the principles of self-care of heart failure in primary care: a multicentre study. INTRODUCTION Nurses play an important part in the education of patients with HF. To prepare patients with HF for self-care maintenance behaviours, nurses must have knowledge of basic self-care maintenance principles. AIM STUDY The aim of this study was to determine the degree of knowledge of primary care (PC) nurses on the principles of self-management of HF and variables associated with this. METHODOLOGY This is an observational, cross-sectional descriptive study, carried out in 2014, in the city of Barcelona (Catalonia). Nurses' Knowledge of Heart Failure Education Principles questionnaire (NKHFEP) was used to assess the principles of HF self-care education. Instrument items assess knowledge of nurses on 5 themes: diet, liquids/weight, worsening signs or symptoms, medication and exercise. Factors related to adequate knowledge were evaluated. RESULTS Of 216 PC nurses, who completed the questionnaire, the average score was 15.6 (SD: 2.2). Only 36 (16.7%) obtained an adequate level of knowledge and defined as a score ≥ 18 points. In multivariate logistic regression, nurse factors associated with an adequate knowledge of principles of self-care of HF were having achieved a PhD degree (OR: 36.4, 95% CI: 2.8-468.2, p = 0.006) and previous specific training in HF (OR: 19.8, 95% CI: 1.4-279.3, p = 0.026). CONCLUSIONS The degree of knowledge of PC nurses in the principles of self-care in HF was higher among nurses who had completed the doctorate and in nurses who had received specific training in HF. |
import java.util.Scanner;
public class A
{
public static void main(String args[])
{
Scanner sc=new Scanner(System.in);
int t=sc.nextInt();
while(t-->=1)
{
int n=sc.nextInt();
int price[]=new int[n];
for(int i=0;i<n;i++)
{
price[i]=sc.nextInt();
}
int baddays=0;
int min=price[n-1];
for(int i=n-2;i>=0;i--){
if(price[i]>min)
{
baddays++;
}
min=Math.min(price[i],min);
}
System.out.println(baddays);
}}} |
frase = str(input('Digite o seu nome: ')).strip().upper()
print('A frase possui {} letras A'.format(frase.count('A')))
print('A primeira letra A aparece na pocisão: ', frase.find('A')+1)
print('A ultima letra A aparece na pocisão: ', frase.rfind('A')+1) |
<filename>python/paddle_fl/paddle_fl/core/master/job_generator.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle.fluid as fluid
from .fl_job import FLCompileTimeJob
class JobGenerator(object):
"""
A JobGenerator is responsible for generating distributed federated
learning configs. Before federated learning job starts, organizations
need to define a deep learning model together to do horizontal federated
learning.
"""
def __init__(self):
# worker num for federated learning
self._worker_num = 0
# startup program
self._startup_prog = None
# inner optimizer
self._optimizer = \
fluid.optimizer.SGD(learning_rate=0.001)
self._feed_names = []
self._target_names = []
def set_optimizer(self, optimizer):
"""
Set optimizer of current job
"""
self._optimizer = optimizer
def set_losses(self, losses):
"""
Set losses of current job
losses can be a list of loss so that we can do
optimization on multiple losses
"""
self._losses = losses
def set_startup_program(self, startup=None):
"""
set startup program for user defined program
"""
if startup == None:
startup = fluid.default_startup_program()
self._startup_prog = startup
def set_infer_feed_and_target_names(self, feed_names, target_names):
if not isinstance(feed_names, list) or not isinstance(target_names,
list):
raise ValueError(
"input should be list in set_infer_feed_and_target_names")
'''
print(feed_names)
print(target_names)
for item in feed_names:
if type(item) != str:
raise ValueError("item in feed_names should be string")
for item in target_names:
if type(item) != str:
raise ValueError("item in target_names should be string")
'''
self._feed_names = feed_names
self._target_names = target_names
def generate_fl_job(self,
fl_strategy,
server_endpoints=[],
worker_num=1,
output=None):
"""
Generate Federated Learning Job, based on user defined configs
Args:
fl_strategy(FLStrategyBase): federated learning strategy defined by current federated users
server_endpoints(List(str)): endpoints for federated server nodes
worker_num(int): number of training nodes
output(str): output directory of generated fl job
Returns:
None
Examples:
import paddle.fluid as fluid
import paddle_fl as fl
from paddle_fl.core.master.job_generator import JobGenerator
from paddle_fl.core.strategy.fl_strategy_base import FLStrategyFactory
input_x = fluid.layers.data(name="input_x", shape=[10], dtype="float32")
label = fluid.layers.data(name="label", shape[1], dtype="int64")
fc0 = fluid.layers.fc(input=input_x, size=2, act='sigmoid')
cost = fluid.layers.cross_entropy(input=fc0, label=label)
loss = fluid.layers.reduce_mean(cost)
job_generator = JobGenerator()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
job_generator.set_optimizer(optimizer)
job_generator.set_losses([loss])
server_endpoints = [127.0.0.1:8181]
worker_num = 10
build_strategy = FLStrategyFactor()
build_strategy.fed_avg = True
strategy = build_strategy.create_fl_strategy()
job_output_dir = "fl_job_config"
job_generator.generate_fl_job(strategy,
server_endpoints=server_endpoints,
worker_num=1,
output=output)
"""
local_job = FLCompileTimeJob()
assert len(self._losses) > 0
assert self._startup_prog != None
assert fl_strategy != None
assert output != None
fl_strategy.minimize(self._optimizer, self._losses)
# strategy can generate startup and main program
# of a single worker and servers
for trainer_id in range(worker_num):
startup_program = self._startup_prog.clone()
main_program = self._losses[0].block.program.clone()
fl_strategy._build_trainer_program_for_job(
trainer_id,
program=main_program,
ps_endpoints=server_endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
startup_program = self._startup_prog.clone()
main_program = self._losses[0].block.program.clone()
fl_strategy._build_server_programs_for_job(
program=main_program,
ps_endpoints=server_endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
local_job.set_feed_names(self._feed_names)
local_job.set_target_names(self._target_names)
local_job.set_strategy(fl_strategy)
local_job.save(output)
def generate_fl_job_for_k8s(self,
fl_strategy,
server_pod_endpoints=[],
server_service_endpoints=[],
worker_num=1,
output=None):
local_job = FLCompileTimeJob()
assert len(self._losses) > 0
assert self._startup_prog != None
assert fl_strategy != None
assert output != None
fl_strategy.minimize(self._optimizer, self._losses)
# strategy can generate startup and main program
# of a single worker and servers
for trainer_id in range(worker_num):
startup_program = self._startup_prog.clone()
main_program = self._losses[0].block.program.clone()
fl_strategy._build_trainer_program_for_job(
trainer_id,
program=main_program,
ps_endpoints=server_service_endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
startup_program = self._startup_prog.clone()
main_program = self._losses[0].block.program.clone()
fl_strategy._build_server_programs_for_job(
program=main_program,
ps_endpoints=server_pod_endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
local_job.set_feed_names(self._feed_names)
local_job.set_target_names(self._target_names)
local_job.set_strategy(fl_strategy)
local_job.save(output)
def save_program(self,
main_prog,
startup_prog,
program_path,
input_list,
hidden_vars,
loss,
learning_rate=None):
if not os.path.exists(program_path):
os.makedirs(program_path)
main_program_str = main_prog.desc.serialize_to_string()
startup_program_str = startup_prog.desc.serialize_to_string()
params = main_prog.global_block().all_parameters()
para_info = []
for pa in params:
para_info.append(pa.name)
with open(program_path + '/input_names', 'w') as fout:
for input in input_list:
fout.write("%s\n" % input)
if hidden_vars != None:
with open(program_path + '/hidden_vars', 'w') as fout:
for var in hidden_vars:
fout.write("%s:%s\n" % (var[0], var[1].name))
with open(program_path + '/para_info', 'w') as fout:
for item in para_info:
fout.write("%s\n" % item)
with open(program_path + '/startup_program', "wb") as fout:
fout.write(startup_program_str)
with open(program_path + '/main_program', "wb") as fout:
fout.write(main_program_str)
with open(program_path + '/loss_name', 'w') as fout:
fout.write(loss.name)
if type(learning_rate) == fluid.Variable:
with open(program_path + '/lr_name', 'w') as fout:
fout.write(learning_rate.name)
def generate_fl_job_from_program(self, strategy, endpoints, worker_num,
program_input, output):
local_job = FLCompileTimeJob()
with open(program_input + '/startup_program', "rb") as fin:
program_desc_str = fin.read()
new_startup = fluid.Program.parse_from_string(program_desc_str)
with open(program_input + '/main_program', "rb") as fin:
program_desc_str = fin.read()
new_main = fluid.Program.parse_from_string(program_desc_str)
para_list = []
with open(program_input + '/para_info', 'r') as fin:
for line in fin:
current_para = line[:-1]
para_list.append(current_para)
input_list = []
with open(program_input + '/input_names', 'r') as fin:
for line in fin:
current_input = line[:-1]
input_list.append(current_input)
with open(program_input + '/loss_name', 'r') as fin:
loss_name = fin.read()
if os.path.exists(program_input + '/lr_name'):
with open(program_input + '/lr_name', 'r') as fin:
lr_name = fin.read()
else:
lr_name = None
for item in para_list:
para = new_main.global_block().var(item)
para.regularizer = None
para.optimize_attr = {'learning_rate': 1.0}
para.trainable = True
exe = fluid.Executor(fluid.CPUPlace())
loss = None
for var in new_main.list_vars():
if var.name == loss_name:
loss = var
if lr_name != None:
if var.name == lr_name:
lr = var
with fluid.program_guard(new_main, new_startup):
if lr_name != None:
optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=lr, momentum=0.9, parameter_list=para_list)
else:
optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.00001,
momentum=0.9,
parameter_list=para_list)
exe.run(new_startup)
strategy.minimize(optimizer, loss)
for trainer_id in range(worker_num):
startup_program = new_startup.clone()
main_program = loss.block.program.clone()
strategy._build_trainer_program_for_job(
trainer_id,
program=main_program,
ps_endpoints=endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
startup_program = new_startup.clone()
main_program = loss.block.program.clone()
strategy._build_server_programs_for_job(
program=main_program,
ps_endpoints=endpoints,
trainers=worker_num,
sync_mode=True,
startup_program=startup_program,
job=local_job)
local_job.set_feed_names(input_list)
local_job.set_target_names([loss.name])
local_job.set_strategy(strategy)
local_job.save(output)
|
import {
Dialog,
DialogTitle,
DialogContent,
DialogContentText,
DialogActions,
DialogProps,
Button,
} from '@mui/material';
import React from 'react';
interface ConfirmDeleteAlertProps extends DialogProps {
open: boolean;
handleClose?: (event: {}, reason: 'backdropClick' | 'escapeKeyDown') => void;
onConfirm?: React.MouseEventHandler<HTMLButtonElement>;
}
export const ConfirmDeleteAlert = ({
open,
handleClose,
onConfirm,
...props
}: ConfirmDeleteAlertProps) => {
return (
<Dialog
open={open}
onClose={handleClose}
aria-labelledby="alert-dialog-title"
aria-describedby="alert-dialog-description"
{...props}
>
<DialogTitle id="alert-dialog-title">Warning</DialogTitle>
<DialogContent>
<DialogContentText id="alert-dialog-description">
Would you like to delete this appointment?
</DialogContentText>
</DialogContent>
<DialogActions>
<Button
onClick={handleClose as React.MouseEventHandler<HTMLButtonElement>}
>
No
</Button>
<Button onClick={onConfirm} autoFocus>
Yes
</Button>
</DialogActions>
</Dialog>
);
};
|
<reponame>ankitpaudel20/Advance-C-Workshop
#include "threading.h"
#include <stdio.h>
#include <stdint.h>
#define ArrayCount(a) (sizeof(a) / sizeof((a)[0]))
// Lets use this struct for passing number of strings to another thread
typedef struct {
const char **strings;
uint32_t string_count;
} Thread_Arg;
int thread_main(void *ptr) {
// This came from the last argument to thrd_create function
Thread_Arg *arg = ptr;
// Loop and print all the strings
for (uint32_t index = 0; index < arg->string_count; ++index) {
printf("%s.\n", arg->strings[index]);
}
return 0;
}
int main() {
const char *args[] = {
"The cake is a lie",
"People die when they are killed"
};
Thread_Arg thread_arg;
thread_arg.strings = args;
thread_arg.string_count = ArrayCount(args);
thrd_t my_thread;
// The last argument is passed to *thread_main* as the argument
// The pointer (last argument) must be valid while the newly
// created thread is using that argument
thrd_create(&my_thread, thread_main, (void *)&thread_arg);
printf("There are 1000 leaves in the pile.\n");
// Waiting for another thread
thrd_join(my_thread, NULL);
return 0;
}
|
Lipid changes during basal insulin peglispro, insulin glargine, or NPH treatment in six IMAGINE trials Basal insulin peglispro (BIL) is a novel basal insulin with hepatopreferential action resulting from reduced peripheral effects. This report provides an integrated summary of lipid changes at 26weeks with BIL and comparator insulins (glargine, NPH) from phase III studies in type 1 diabetes (T1D), insulinnave patients with type 2 diabetes (T2D), patients with T2D on basal insulin only and patients with T2D on basalbolus therapy. BIL treatment had little effect on HDL cholesterol and LDL cholesterol in all patients. The effect of both BIL and glargine treatment on triglycerides (TG) depended on whether patients had been previously treated with insulin. When BIL replaced conventional insulin glargine or NPH treatments, increases in TG levels were observed. When BIL or comparator insulins were given for 26weeks to insulinnave patients with T2D, TG levels were unchanged from baseline with BIL but decreased with either glargine or NPH. The decreased peripheral action of BIL may reduce suppression of lipolysis in peripheral adipose tissue resulting in increased free fatty acid delivery to the liver and, hence, increased hepatic TG synthesis and secretion. |
In vitro cultivation of mycobacteria from human lepromas and from an armadillo infected with mycobacterium leprae. Experimental evidence has been presented that the host-grown, hitherto noncultivated mycobacteria, M. leprae and M. lepraemurium are metabolically competent microorganisms (1. 5.6. X. II. 12. 14). They are provided with a complete and functional tricarboxylic acid cycle and electron transport chain. We also described ( 5.6. 10. II. JJ) that M. leprae and M. lepraemurium have the capacity to oxidize the following substrates: I. Sodium succinate. 2. Substances containing SH groups: Lcysteine, penicillamine and thioglucose. 3. Oleic acid and Tween 80. 4. Unidentified components in the heat stable yeast extract (Difco). Oxidation of some of the above substrates resulted in the formation of adenosine triphosphate (ATP) by M. lepraemurium ( I). Based on the above results, culture media were prepared in which the oxidizable substrates were incorporated as prospective energy sources ( 9. 10. 13 ). Additional ingredients such as carbon and nitrogen sources were added to the media. The thus prepared culture media were inoculated with M. leprae isolated from human lepromas and from leprous tissues of an armadillo. When incubated at 34° C, strongly acid-fast mycobacteria were cultured and regular ly subcultured. The cultures of mycobacteria obtained are, so far, unidentified but might be identical to the etio logic agent of human and ! or armadillo leprosy. |
The use of white light interferometry to characterize wear in short term retrieved highly crosslinked UHMWPE liners White light interferometry is a novel way to measure the surface topology of retrieved orthopedic acetabular liners. This method is used to compare and characterize the degree of wear seen on the articulating surface of the implants. Surface data is divided into waviness (machine marks) or roughness (wear). Our findings highlight the importance of quantitative techniques such as white light interferometry for distinguishing between initial and in vivo generated surface morphology. |
/**
* Representing photos that are supplied as part of the app
*
* @author dagfinn.parnas
*
*/
public class SlideshowPhotoDrawable extends SlideshowPhoto {
protected int largePhotoDrawableId;
protected Context context;
public SlideshowPhotoDrawable(Context context, String title, String description, int largePhotoDrawableId, String largePhotoShareUrl){
super(title,description,null,null,"dummy url");
this.largePhotoDrawableId=largePhotoDrawableId;
this.context=context;
this.largePhoto=largePhotoShareUrl;
}
/* (non-Javadoc)
* @see com.elsewhat.smugmug.api.SlideshowPhoto#getLargePhotoDrawable(java.io.File)
*/
@Override
public Drawable getLargePhotoDrawable(File folder, int maxWidth, int maxHeight)throws IOException {
//changed this due to ICS bug causing a too large drawable
//return context.getResources().getDrawable(largePhotoDrawableId);
return FileUtils.readBitmapFromResources(context.getResources(), largePhotoDrawableId, maxWidth,maxHeight);
}
/* (non-Javadoc)
* @see com.elsewhat.smugmug.api.SlideshowPhoto#isCacheExisting(java.io.File)
*/
@Override
public boolean isCacheExisting(File folder) {
//return false in order for share photo to use largePhotoShareUrl
return false;
}
/**
* Returns the drawable id
* Can be used for creating an Uri to the file
*
* @return
*/
public int getDrawableId(){
return largePhotoDrawableId;
}
} |
. OBJECTIVE To investigate the safety and effectiveness of HLA-mismatched allogeneic hematopoietic stem cell transplantation (allo-HSCT) combined with related haploidentical bone marrow infusion for treatment of hematologic malignancies and to explore the mathod for reduction of aGVHD incidence and clinical significance. METHODS A total of 30 patients with hematologic malignancies (8 cases of AML, 17 AML, 2 MDS and 3 Mix-AL) received related haploidentical and unrelated HLA-mismatched allo-HSCT combined with related haploidentical bone marrow infusion. Among them 20 cases received related haploidentical transplantation of the first donor, 10 cases received unrelated HLA-mismatched treaplantation. The new conditioning regimen for the patients underwent allo-HSCT consisted of fludarabine, busulfan, Me-CCNU and cyclophosphamide. The drugs for GVHD prophylaxis included cyclosporine A and methotrexate, while mycophenolate mofetil and rabbit anti-T-lymphocyte globulin (ATG) were used. RESULTS All the patients achieved full engraftment. The median time for neutrophils to reach over 0.5 10/L was 14 days (8-26 days), while the median time for platelets to reach over 20 10/L was 11.5days (10-24 days). The incidence of I-II grade of aGVHD at 100 d was 22.28% (95% CI 9.9%-34.7%), the incidences of II-IV and III-IV grade of aGVHD were 22.7% (95% CI, 10%-35.4%) and 12.7% (95% CI 6.9%-15.5%) respectively. The incidences of I-II and III-IV cGVHD were 13.3% (95% CI, 1.4%- 26.8%) and 3.3 % (95% CI, 0%-12.2%), one case (3.3%) was in extensive cGVHD. DFS and OS of 2 years were 81.1% (95% CI, 66.0%-96.2%) and 68.2% (95% CI 51.0%-85.4%). CONCLUSION These data suggest that the incidence of grade II-IV grade of aGVHD in recipients of 2 partially HLA-matched units was lower, co-infusion of haplo-BM and partially matched units in allogeneic transplantation is safe and effective for reducing the incidence of aGVHD and improving the survival in DFS. |
Using Active Learning Activities to Increase Student Outcomes in an Information Technology Course ABSTRACT The goal of higher education to produce well-rounded and critically thinking problem solvers to enter into the hospitality industry remains a crucial area of research for hospitality educators. To support this goal, numerous hospitality scholars have engaged in innovative approaches to classroom instruction that facilitate problem solving and critical thinking. Grounded in the principles of the constructivist theory of learning, this study furthers understanding of how to produce these results by using mobile technology within a flipped classroom. Approximately 230 undergraduate students were evaluated using a freshman course redesigned from a predominantly lecture-based one to one that utilized active learning activities. The results showed improvements in technical skills, leadership, teamwork, communication, openness to diversity, and creativity. |
/// \brief Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
///
/// \param ReturnTypeInfo For declarators whose return type does not show
/// up in the normal place in the declaration specifiers (such as a C++
/// conversion function), this pointer will refer to a type source information
/// for that return type.
TypeSourceInfo *
Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo) {
TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
if (isa<PackExpansionType>(T)) {
cast<PackExpansionTypeLoc>(CurrTL).setEllipsisLoc(D.getEllipsisLoc());
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
while (isa<AttributedTypeLoc>(CurrTL)) {
AttributedTypeLoc TL = cast<AttributedTypeLoc>(CurrTL);
fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs());
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
}
DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
if (ReturnTypeInfo) {
TypeLoc TL = ReturnTypeInfo->getTypeLoc();
assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
} else {
TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
}
return TInfo;
} |
<filename>libs/entity/src/tests/entity-state/reflection-validation.spec.ts<gh_stars>0
import { State } from '@ngxs/store';
import { defaultEntityState, EntityState } from '../../lib/entity-state';
import { EntityStateModel } from '../../lib/models';
import { IdStrategy } from '../../lib/id-strategy';
import { NGXS_META_KEY } from '../../lib/internal';
import { EntityActionType } from '../../lib/actions/type-alias';
interface ToDo {
title: string;
}
@State<EntityStateModel<ToDo>>({
name: 'todo',
defaults: defaultEntityState()
})
class TestState extends EntityState<ToDo> {
constructor() {
super(TestState, 'title', IdStrategy.EntityIdGenerator);
}
onUpdate(current: Readonly<ToDo>, updated: Readonly<Partial<ToDo>>): ToDo {
return { ...current, ...updated };
}
}
describe('EntityState reflection validation', () => {
beforeAll(() => {
TestState[NGXS_META_KEY].path = 'todo';
});
it('should find all actions in state class', () => {
// replaces validation in EntityState#setup
const actions = Object.values(EntityActionType);
const baseProto = Reflect.getPrototypeOf(TestState.prototype);
// uses find to see which one is missing in the error message
const missing = actions.find(action => !(action in baseProto));
expect(missing).toBeUndefined();
});
it('should match the methods with the action names', () => {
// replaces @EntityActionHandler validation
const instance = new TestState();
const protoKeys = Object.keys(Reflect.getPrototypeOf(Reflect.getPrototypeOf(instance)));
// you have to manually exclude certain methods, which are not action handlers
// TODO: Add Reflect Meta-data with @EntityActionHandler annotation and query it here?
const exclude = ['idOf', 'setup', 'onUpdate', '_update', '_addOrReplace'];
const actionHandlers = protoKeys.filter(key => !exclude.includes(key));
// actual test
const entityActionTypeValues = Object.values(EntityActionType);
// uses find to see which one is missing in the error message
const missing = actionHandlers.find(fn => !entityActionTypeValues.includes(fn));
expect(missing).toBeUndefined();
});
});
|
<gh_stars>1-10
/*
********************************************************************************************
* Copyright (c) 2019 MINTROBOT Co., Ltd.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the <MINTROBOT Co, Ltd.> nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************************
**/
#ifndef WORKERPROJECT_H
#define WORKERPROJECT_H
#include <QThread>
#include <QMutex>
#include "app/model/nodes/ModelNode.h"
#include "app/model/project/ModelProject.h"
#include "app/model/interface/InterfaceNodeStatusNotifier.h"
class ThreadProjectPlayer : public QThread
{
Q_OBJECT
public:
explicit ThreadProjectPlayer();
virtual ~ThreadProjectPlayer() override;
void setProject(ModelProject* pModelProject);
void setNotifier(InterfaceNodeStatusNotifier* pNotifier);
private:
ModelProject* __pModelProject = nullptr;
InterfaceNodeStatusNotifier* __pNotifier = nullptr;
QMutex __mutexFlags;
bool __flagStop = false;
bool __flagPause = false;
virtual void run() override;
signals:
void signalRunningNode(QUuid nodeID);
void signalFinishedNode(QUuid nodeID);
void signalErrorOnNode(QUuid nodeID);
void signalFinishedPlayingProject();
private slots:
void slotPlayProject();
void slotPauseProject();
void slotResumeProject();
void slotStopProject();
};
#endif // WORKERPROJECT_H
|
#!/usr/bin/env python3
#
# Copyright (c) 2014, <NAME>, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
This module provides interfaces to GPIOs - useful for the lighting LEDs in the I/O Ready Set STEM
Cell, and other Ready Set STEM Cells.
'''
import os
import time
from functools import partial
import re
PINS = range(2, 28)
PULLUP_CMD = '/usr/local/bin/pullup.sbin'
GPIO_EXPORT_FILE = '/sys/class/gpio/export'
GPIO_UNEXPORT_FILE = '/sys/class/gpio/unexport'
GPIO_PIN_FORMAT_STRING = '/sys/class/gpio/gpio%d'
PULL_DISABLE = 0
PULL_DOWN = 1
PULL_UP = 2
def _global_pin_init():
# Disable all GPIOs to start. Note that unexporting a GPIO if it hasn't
# been exported will fail - but we will ignore it.
for pin in PINS:
try:
with open(GPIO_UNEXPORT_FILE, 'w') as f:
f.write('%d' % pin)
except IOError:
pass
def retry_func_on_error(func, tries=10, sleep=0.01, exc=IOError):
for i in range(tries):
try:
ret = func()
except exc as err:
time.sleep(sleep)
sleep *= 2
else:
break
else:
raise err
return ret
class Pin(object):
_global_pin_init()
def __init__(self, pin):
self._board_rev = self.board_rev()
self.gpio_dir = GPIO_PIN_FORMAT_STRING % pin
self.pin = pin
if pin in PINS:
if os.path.exists(self.gpio_dir):
raise IOError('GPIO pin already in use')
with open(GPIO_EXPORT_FILE, 'w') as f:
f.write('%d\n' % pin)
# Repeat trying to configure GPIO as input (default). Repeats
# required because this can fail when run right after the pin
# is exported in /sys. Once it passes, we know the pin is
# ready to use.
retry_func_on_error(partial(self._set_output, False))
else:
raise ValueError('Invalid GPIO pin')
self._write_gpio_file('direction', 'in')
self._write_gpio_file('active_low', '0')
self._write_gpio_file('edge', 'none')
self._set_pull(PULL_DISABLE)
@staticmethod
def board_rev():
with open('/proc/cpuinfo') as f:
cpuinfo = f.read()
matches = re.findall(
'^Revision.*([0-9A-F]{4})$',
cpuinfo,
flags=(re.MULTILINE|re.IGNORECASE)
)
board_rev = int(matches[0], 16)
return board_rev
def _write_gpio_file(self, filename, value):
def write_val(filename, value):
with open(self.gpio_dir + '/' + filename, 'w') as f:
f.write(value)
retry_func_on_error(partial(write_val, filename, value))
def _set_output(self, output, starts_off=True):
if output:
is_low = self._active_low and not starts_off or not self._active_low and starts_off
direction = 'low' if is_low else 'high'
else:
direction = 'in'
self._write_gpio_file('direction', direction)
def _set_pull(self, pull):
if not pull in [PULL_UP, PULL_DOWN, PULL_DISABLE]:
raise ValueError('Invalid pull type')
os.system('%s %d %d %d' % (PULLUP_CMD, self.pin, pull, self._board_rev))
def disable(self):
'''Disable the GPIO pin.'''
with open(GPIO_UNEXPORT_FILE, 'w') as f:
f.write('%d\n' % self.pin)
class Output(Pin):
'''A GPIO output.
An `rstem.gpio.Output` configures a GPIO pin as an output. The pin can then used as a
programmable switch to drive LEDs, motor drivers, relays and other devices.
'''
def __init__(self, pin, active_low=True):
'''Create a new `Output`.
`pin` is the number of the GPIO as labeled on the Ready Set STEM Lid
connector. It is the GPIO number used by the Broadcom processor on
the Raspberry Pi.
If `active_low=True` (the default), then the output will be set LOW
(grounded, i.e. 0 volts) when the output is turned `on`. If
`active_low=False`, then the output will be set HIGH (the supply
voltage, i.e. 3.3 volts) when the output is turned `on`.
'''
super().__init__(pin)
self._active_low = active_low
self._set_output(True)
self._fvalue = retry_func_on_error(partial(open, self.gpio_dir + '/value', 'w'))
def _set(self, level):
self._fvalue.seek(0)
self._fvalue.write('1' if level else '0')
self._fvalue.flush()
def on(self):
'''Turn the GPIO output on (repects `active_low` setting).'''
self._set(not self._active_low)
def off(self):
'''Turn the GPIO output off (repects `active_low` setting).'''
self._set(self._active_low)
def disable(self):
'''Disable the GPIO pin.'''
self._fvalue.close()
super().disable()
class Input(Pin):
'''A GPIO input.
An `rstem.gpio.Input` configures a GPIO pin as an input. The pin can then
used as to read the logic level of the pin - useful for reading the state
of switches, sensors, and other electronics.
'''
def __init__(self, pin, active_low=False, pull=PULL_DISABLE):
'''Create a new `Input`.
`pin` is the number of the GPIO as labeled on the Ready Set STEM Lid
connector. It is the GPIO number used by the Broadcom processor on
the Raspberry Pi.
If `active_low=True` (the default), then when the input is externally
set LOW (grounded, i.e. 0 volts), it is considered `on`. If
`active_low=False`, then when the input is externally set HIGH (the
supply voltage, i.e. 3.3 volts), it is considered `on`.
`pull` is the state of the GPIO internal pullup/down.
If `pull` is `PULL_DISABLE`, then the internal pullup is disabled.
If `pull` is `PULL_UP`, then the internal pullup is enabled.
If `pull` is `PULL_DOWN`, then the internal pulldown is enabled.
'''
super().__init__(pin)
self._active_low = active_low
self._set_output(False)
self._set_pull(pull)
self._fvalue = retry_func_on_error(partial(open, self.gpio_dir + '/value', 'r'))
def configure(self, pull=None):
'''Set pullup on `pin`. See `__init__` for info on the `pull` argument.'''
self._set_pull(pull)
def _get(self):
self._fvalue.seek(0)
return 1 if self._fvalue.read().strip() == '1' else 0
def is_on(self):
'''Return the GPIO input state (repects `active_low` setting).'''
return bool(not self._get() if self._active_low else self._get())
def is_off(self):
'''Return the GPIO input state (repects `active_low` setting).'''
return not self.is_on()
def disable(self):
'''Disable the GPIO pin.'''
self._fvalue.close()
super().disable()
__all__ = ['Output', 'Input', PULL_DISABLE, PULL_UP, PULL_DOWN]
|
def showMenu(self):
def quitButtonCb(event, button):
button.ancestor.done = True
self.engine.quit()
def switchToEditModeCb(event, button):
self.engine.mode = self.engine.modes['edit']
button.ancestor.done = True
editModeButton = gui.Button(text="Edit Mode", callback=switchToEditModeCb)
quitButton = gui.Button(text="Quit", callback=quitButtonCb)
controls = [editModeButton, quitButton]
cd = gui.ControlDialog("Play Mode", controls, width=300)
cd.run() |
<filename>libtelemetry/src/full/java/com/mapbox/android/telemetry/crash/CrashReporterJobIntentService.java
package com.mapbox.android.telemetry.crash;
import android.content.Context;
import android.content.Intent;
import android.support.annotation.NonNull;
import android.support.annotation.VisibleForTesting;
import android.support.v4.app.JobIntentService;
import android.util.Log;
import com.mapbox.android.core.FileUtils;
import com.mapbox.android.telemetry.CrashEvent;
import java.io.File;
import static com.mapbox.android.telemetry.MapboxTelemetryConstants.MAPBOX_TELEMETRY_PACKAGE;
/**
* This is a background job that sends crash events to the telemetry endpoint
* at startup.
*/
public final class CrashReporterJobIntentService extends JobIntentService {
private static final String LOG_TAG = "CrashJobIntentService";
private static final int JOB_ID = 666;
static void enqueueWork(@NonNull Context context) {
enqueueWork(context, CrashReporterJobIntentService.class, JOB_ID,
new Intent(context, CrashReporterJobIntentService.class));
}
@Override
protected void onHandleWork(@NonNull Intent intent) {
Log.d(LOG_TAG, "onHandleWork");
try {
File rootDirectory = FileUtils.getFile(getApplicationContext(), MAPBOX_TELEMETRY_PACKAGE);
if (!rootDirectory.exists()) {
Log.w(LOG_TAG, "Root directory doesn't exist");
return;
}
handleCrashReports(CrashReporterClient
.create(getApplicationContext())
.loadFrom(rootDirectory)
);
} catch (Throwable throwable) {
// TODO: log silent crash
Log.e(LOG_TAG, throwable.toString());
}
}
@VisibleForTesting
void handleCrashReports(@NonNull CrashReporterClient client) {
if (!client.isEnabled()) {
Log.w(LOG_TAG, "Crash reporter is disabled");
return;
}
while (client.hasNextEvent()) {
CrashEvent event = client.nextEvent();
if (client.isDuplicate(event)) {
Log.d(LOG_TAG, "Skip duplicate crash in this batch: " + event.getHash());
client.delete(event);
continue;
}
if (client.send(event)) {
client.delete(event);
} else {
Log.w(LOG_TAG, "Failed to deliver crash event");
}
}
}
}
|
// C11 threads don't have a static initializer so we need a runtime fallback
// This is called by the thread library specific implementation of evfs__init_once().
void evfs__lib_init(void) {
#ifdef EVFS_USE_THREADING
evfs__lock_init(&s_evfs_lock);
#endif
#ifdef EVFS_USE_ATEXIT
atexit(evfs__lib_shutdown);
#endif
s_evfs_initialized = true;
} |
<filename>app/src/main/java/com/xeniac/harrypotterstorybooks/dataProviders/PagesDataProviderBook3/PagesDataProviderBook3_2.java<gh_stars>1-10
package com.xeniac.harrypotterstorybooks.dataProviders.PagesDataProviderBook3;
import com.xeniac.harrypotterstorybooks.R;
import com.xeniac.harrypotterstorybooks.models.DataItemPages;
import java.util.ArrayList;
import java.util.List;
public class PagesDataProviderBook3_2 {
public static List<DataItemPages> dataItemPagesList;
static {
dataItemPagesList = new ArrayList<>();
addItem(new DataItemPages(321, 1, R.string.string_book_3_chapter_2_page_1, 32));
addItem(new DataItemPages(322, 2, R.string.string_book_3_chapter_2_page_2, 32));
addItem(new DataItemPages(323, 3, R.string.string_book_3_chapter_2_page_3, 32));
addItem(new DataItemPages(324, 4, R.string.string_book_3_chapter_2_page_4, 32));
addItem(new DataItemPages(325, 5, R.string.string_book_3_chapter_2_page_5, 32));
addItem(new DataItemPages(326, 6, R.string.string_book_3_chapter_2_page_6, 32));
addItem(new DataItemPages(327, 7, R.string.string_book_3_chapter_2_page_7, 32));
addItem(new DataItemPages(328, 8, R.string.string_book_3_chapter_2_page_8, 32));
addItem(new DataItemPages(329, 9, R.string.string_book_3_chapter_2_page_9, 32));
addItem(new DataItemPages(3210, 10, R.string.string_book_3_chapter_2_page_10, 32));
addItem(new DataItemPages(3211, 11, R.string.string_book_3_chapter_2_page_11, 32));
addItem(new DataItemPages(3212, 12, R.string.string_book_3_chapter_2_page_12, 32));
addItem(new DataItemPages(3213, 13, R.string.string_book_3_chapter_2_page_13, 32));
addItem(new DataItemPages(3214, 14, R.string.string_book_3_chapter_2_page_14, 32));
addItem(new DataItemPages(3215, 15, R.string.string_book_3_chapter_2_page_15, 32));
}
private static void addItem(DataItemPages item) {
dataItemPagesList.add(item);
}
} |
Tissue-specific enhancement of uridine utilization and 5-fluorouracil therapy in mice by benzylacyclouridine. Benzylacyclouridine (BAU), a potent inhibitor of uridine phosphorylase, delays the disappearance of uridine from plasma, affects the utilization of uridine by selected tissues, and enhances the therapeutic effects of 5-fluorouracil (FUra) in female C57BL/6 mice. A single 30-mg/kg i.v. injection of BAU lengthens the plasma half-life of both a tracer dose of uridine (3 micrograms/kg) and a pharmacological dose of uridine (250 mg/kg) by 250 and 83%, respectively. This dose of BAU also increases the normal plasma concentration of uridine about 4-fold to 9 microM and sustains these levels for 4 h. Four injections of BAU at 30 mg/kg over 6 h or a single injection at 240 mg/kg increases the plasma concentration of uridine over 10-fold to approximately 50 microM. In addition to affecting the pharmacokinetics of uridine, a 30-mg/kg dose of BAU selectively increases up to 4-fold the ability of normal host tissues to salvage a tracer dose of uridine for nucleic acid biosynthesis, the uracil nucleotide pool size, and the incorporation of uridine into nucleic acids. However, uridine salvage from plasma by colon tumor 38 is increased only slightly by BAU, while the uracil nucleotide pool size and uridine incorporation into tumor nucleic acids are actually decreased by 15 and 37%. The selective effect of BAU on uridine utilization is reflected in the ability of BAU to modify FUra-induced host toxicity. The dose of FUra required to kill 50% of the treated normal mice (350 mg/kg) is modestly increased by "rescue" regimens consisting of the subsequent administration of repeated injections of either BAU alone (30 mg/kg/injection) or uridine alone (250 mg/kg/injection). However, an increase of 54% is achieved when repeated injections of the combination of BAU and uridine are administered. In C57BL/6 mice bearing advanced transplants of colon tumor 38, the period of tumor growth inhibition resulting from multiple courses of FUra-containing drug regimens can be increased by the delayed administration of BAU alone or BAU combined with uridine. |
Social Media and Value Creation: Exploring the Perception of Generation Y toward Corporate Social Networking Applications Use Organizations increasingly rely on corporate social networks and online communities, under what is called today Enterprise 2.0, to enhance socialization and favor information/knowledge sharing, collaboration and value creation among coworkers. Researchers and practitioners to date have mostly assumed that people from this generation Y, because of their massive use of social media in the private arena, would be willing to accept and use them more easily and quickly in corporate environment. However, to the best of our knowledge, there is no empirical work which has been reported on this issue confirming this assumption. |
Carpal tunnel syndrome - adduction versus abduction. Editor - Sansone et al1 describe the findings of carpal tunnel syndrome in a recent article in Clinical Medicine & Research. In this article, the authors repeatedly describe weakness of thumb adduction as a sign of a carpal tunnel syndrome and cite the 2000 JAMA article by DArcy and McGee2 as support of these statements; however, it is thumb abduction, by the abductor pollicis muscle, which is weak in carpal tunnel syndrome.2 Weakness of thumb adduction is due to a problem of the adductor pollicis muscle, which is mainly innervated by the ulnar nerve. The typical sign of such an ulnar palsy is the Froments sign, named after the French physician, Jules Froment. |
Insights on production mechanism and industrial applications of renewable propylene glycol Summary Propylene glycol is a ubiquitous sustainable chemical that have several industrial applications. It can be used as a non-toxic antifreeze, moisturizers, and in cosmetics products. Commercial production of propylene glycol uses petroleum-based propylene oxide. Therefore, there is a need to develop alternative and renewable propylene glycol production routes. Renewable propylene glycol can be produced from catalytic hydrogenolysis of glycerol. This study reviews different catalyst for glycerol hydrogenolysis, the reaction mechanism, and process challenges. Additionally, previous studies related to the economic and environmental assessment of propylene glycol production are presented in detail. The technology readiness level of different production pathways were outlined as well as the challenges and future direction of propylene glycol production from glycerol and other renewable feedstocks. Catalytic transfer hydrogenolysis, a process that uses renewable H-donors in liquid medium for hydrogenolysis reaction is also discussed and compared with conventional hydrogenolysis. INTRODUCTION Glycerol is one of the major products obtained during biodiesel processing through the transesterification reaction. The reaction converts animal fats or vegetable oils and alcohols into biodiesel in the presence of a catalyst (). However, transesterification reaction produces significant quantities of crude glycerol that makes it unattractive from environmental and economic viewpoint. It should be noted that crude glycerol production has increased in recent years due to the elevated demand and supply of biodiesel. Crude glycerol comprises of about 10 wt.% of the final products obtained during the transesterification reaction (). Crude glycerol has low commercial value due to its heterogeneous composition. It contains about 80% glycerol and 20% impurities including water, soap, methanol, fatty acid methyl esters (FAMEs), and inorganic elements (). Therefore, it is imperative to develop alternative methods for the conversion of crude glycerol into value-added chemicals. Recently, there has been significant interest in the conversion of glycerol to value-added chemicals in order to reduce the overall cost of biodiesel production. Tan et al., reviewed different methods that uses glycerol as feedstocks for the production of green chemicals. The authors also outlined various valueadded products and biofuels that could be obtained from glycerol. The advances in the conversion of glycerol to chemicals such as acrolein, glycerol carbonate, propylene glycol (1,2-propanediol), ethanol, and lactic acid were discussed (). Monteiro et al., identified propylene glycol, 1,3-propanediol, and hydrogen as the main glycerol-derived products that have attracted research interest recently. Propylene glycol is a promising chemical with numerous applications including as an antifreeze, cosmetics agent, moisturizer, solvent, surfactant, and a preservative (Jim ). Commercially it can be produced from the hydrolysis of propylene oxide (). However, propylene oxide is obtained from petrochemical feedstocks. Factors such as environmental pollution, climate change, and declining petroleum reserves are major concerns that have promoted interest in alternative production routes (). The Chemical market reporter noted that the overall production of propylene glycol in the United States is about 1400 million pounds per year (). Moreover, the domestic consumption of the chemical is predicted to increase by 4% each year (). The global scenario for propylene glycol production is presented in Figure 1 with the North America accounting for over 55% of the global production. Other regions such as Asia and South America also produce about 22% and 14% of It should be emphasized that the propylene glycol market is also experiencing severe challenges due to the fluctuating fossil fuel prices. Therefore, new processes that can meet the increasing global demand of propylene glycol are required. Propylene glycol could also be produced from a more environmentally friendly routes by using renewable feedstocks such as glycerol. The conversion of glycerol to propylene glycol could help mitigate the challenges of glycerol production while producing enough propylene glycol to meet the elevating demand. Glycerol can be converted to propylene glycol via the selective hydrogenolysis reaction in the presence of a metallic catalyst and hydrogen in a batch or continuous reactor (). The source of hydrogen for the process is also a major concern. The produced propylene glycol is known as renewable propylene glycol (RPG) if the hydrogen is from renewable sources such as biomass gasification or electrolysis or when the process is performed with hydrogen generated in situ via an external hydrogen transfer. Several studies have been reported on the catalytic conversion of glycerol to propylene glycol (;Seretis and Tsiakaras, 2016). Some authors have also studied the selective hydrogenolysis process using hydrogen generated in situ (). Several reviews have also been documented on the selective hydrogenolysis of glycerol to propylene glycol using a metallic catalyst. Bozga et al., outlined the influence of metallic catalyst such as Cu, Ni, Co, and noble metal-based catalysts on the conversion of glycerol to propylene glycol. Vasiliadou and Lemonidou, reviewed the reaction mechanism of glycerol to propylene glycol. The authors also outlined the effect of reaction parameters (temperature, catalyst loading, hydrogen pressure, and glycerol concentration) and reaction medium (acidic/ alkaline) on propylene glycol yield. Nanda et al., discussed the advances in catalyst preparation and activation method. However, most of the available studies are scattered in literature. Table 1 compares the present study with reviews related to propylene glycol production. Based on the information in Table 1, studies on the advances in RPG production methods and industrial application have been scantly reported. Therefore, the current review presents the advances and progress in RPG production from glycerol. In addition, the industrial applications of RPG are outlined. The present review would not only complement the previous studies but also provide information that could be useful in the optimization and scale-up the glycerol to RPG conversion processes. PROPYLENE GLYCOL PRODUCTION ROUTES Propylene glycol is often produced from several pathways by using different feedstocks. It can be synthesized commercially from the hydration of propylene oxide (). At present, there are five different technologies used for the industrial production of propylene glycol as shown in Figure 2. They include the styrene monomer process employed by LyondellBasel and Shell, the anthraquinone process used by Dow Chemical and BASF, the tert-butyl alcohol process used by LyondellBasel and Huntsman Corp, the cumene hydroperoxide process used by Sumitomo Chemicals, and the chlorohydrine process by Dow Chemical (Martin and Murphy, 2000). Other processes such as the catalytic hydrogenation of lactic acid or the fermentation of glycerol in the presence of microorganisms could also produce iScience Review propylene glycol (;Seretis and Tsiakaras, 2016). Thermochemical processes occurring in the presence of heat and thermal energy could also convert carbohydrates such as xylitol and sorbitol into propylene glycol in the presence of a reducing agent (Sun and Liu, 2011). The hydrogenolysis of carbohydrates to propylene glycol involves two main steps. The first step occurs in the presence of metal catalyst and involves the dehydrogenation of polyols to carbonyl intermediates. Following the first step, the cleavage of specific C-C and C-O bonds occurs in a basic medium through the retroaldol condensation (;Sun and Liu, 2011). The production of RPG from glycerol occurs through a reaction known as selective hydrogenolysis. The process involves the breaking of C-O bonds, removal of hydroxyl groups, followed by the simultaneous addition of hydrogen in the presence of a catalyst as shown in Figure 3 (). It should be emphasized that when one side hydroxyl group is removed from glycerol RPG is produced. Moreover, Table 1. Comparisons between the present study and previous review articles related to propylene glycol production from glycerol Review title Main focus References Conversion of glycerol to propanediol and acrolein by heterogeneous catalysis Previous research studies and progress in the conversion of glycerol to propanediol and acrolein. Reaction mechanism of glycerol to propanediol and acrolein. Bozga et al., Recent advancements in catalytic conversion of glycerol into propylene glycol: Research progress in process development, effects of catalyst preparation, and activation methods during the catalytic conversion of glycerol to propylene glycol. Reviewed the economic feasibility of integrating bio-hydrogen production with hydrogenolysis reaction. Nanda et al., Glycerol transformation to value-added C3 diols: Reaction mechanism, kinetic, and engineering aspects. Discuss the effect of temperature, hydrogen pressure, glycerol concentration, and the reaction kinetics on the production of 1,2-and 1,3-propanediols. Vasiliadou and Lemonidou, Catalytic transfer hydrogenolysis as an efficient route in cleavage of lignin and model compounds The mechanism of catalytic transfer hydrogenolysis was presented as well as the catalysts required and hydrogen source. Current challenges and opportunities for future research required for the improvement of catalytic transfer hydrogenolysis were discussed. Zhang, Insights on production mechanism and industrial applications of renewable propylene glycol Review recent studies related to catalytic hydrogenolysis of glycerol to propylene glycol. Briefly discuss the catalytic transfer hydrogenolysis process (use of bi-functional catalyst, pros and cons of the technology). the dissociation of the hydroxyl groups present in the middle generates 1,3-propanediol. Many side products such as ethylene glycols, methanol propanol, and methane can also be produced from the reaction (). Hydrogenolysis of glycerol to RPG is an environmentally benign and cost-effective route compared to the conventional methods described in Figure 2. In addition, the conventional methods use petrochemical feedstocks and significant amount of water, thus producing byproducts such as di-and tri-propylene glycol (). The separation of the byproducts also increases the overall process economics. Hydrogenolysis reaction mechanisms As stated in the previous section, the hydrogenolysis of glycerol occurs via series of parallel and consecutive reaction pathways. These reactions often lead to the formation of products such as RPG, 1,3-propanediol, lactic acid, alcohols, and sometimes gases. It should be noted that a clear understanding of RPG reaction mechanism is important for the effective design of improved catalysts and process optimization. Previous studies ;) proposed two main reaction mechanisms for the conversion of glycerol to propanediols: dehydrationhydrogenation mechanism and dehydrogenation-dehydration-hydrogenation mechanism. Other iScience Review mechanisms such as direct hydrolysis mechanism (), chelation-hydrogenolysis mechanism, and etherification-hydrogenation mechanism have also been suggested in previous years. The dehydration-hydrogenation mechanism involves glycerol conversion to RPG via a reactive intermediate (acetol and its enol tautomer). The intermediate is subsequently hydrogenated to RPG (route 1 in Figure 4). Similarly, 1,3-propanediol can also be synthesized from the same route through the dehydration of glycerol to 3-hydroxypropanal and subsequent hydrogenation (route 2 in Figure 4). It should be emphasized that direct hydrogenation of glycerol through the C-C bond cleavage leads to the formation of ethylene glycol and methanol (route 3). The dehydration-hydrogenation mechanism is promoted under acidic conditions in the presence of acidic sites while the hydrogenation step is promoted by a metal ). Thus, hydrogenolysis catalysts are designed to have an acidic site and also contain a metal promoter. The dehydrogenation-dehydration-hydrogenation mechanism is prevalent when the reaction is performed in the presence of a basic medium (). The mechanism proceeds through the reversible dehydrogenation of glycerol to glyceraldehyde over the metal sites. Subsequently, the produced glyceraldehyde is dehydrated by losing water molecules to produce to 2-hydroxyacrolein, which is finally hydrogenated to RPG (route 2 in Figure 5). It should be mentioned that there is a possibility of a side reaction in this route that involves the conversion of 2-hydroxyacrolein to pyruvaldehyde by ketoenol tautomerism (route 1 in Figure 5). Moreover, the pyruvaldehyde can also undergo hydrogenation to form acetol which is subsequently hydrogenated to RPG. Pyruvaldehyde could also undergo oxidation under basic conditions to form lactic acid. Moreover, another important side reaction in this pathway is the formation of ethylene glycol (route 3 in Figure 5) via glyceraldehyde to glycolaldehyde and subsequent hydrogenation. It is important to ensure that the forward reaction (dehydrogenation) proceeds faster leading to the formation of glyceraldehyde. Previous studies have demonstrated that the addition of a specific base such as LiOH or NaOH could promote dehydration reaction. Catalysts for hydrogenolysis of glycerol to propylene glycol Hydrogenolysis reactions can be performed in the presence of homogeneous or heterogeneous catalyst. Homogeneous catalysts such as tungstic acid, sulfuric acid, and rhodium complex have been tested for the conversion of glycerol to RPG by several researchers (Che, 1987;). Che, patented a one-step conversion process that uses syngas at 200 C and 32 MPa pressure in the presence of rhodium complex and tungstic acid homogeneous catalyst. The reaction produced iScience Review RPG and 1,3-propanediol yields of 23% and 20%, respectively. In year 2000, Shell Oil Company patented a process that uses homogeneous palladium complex mixed water and sulfolane for the production of propylene glycol (). The mixture also contains methane sulfonic acid as catalyst. It should be mentioned that products such as RPG, 1-propanol, and 1,3-propanediol were obtained from the process in the ratio of 22:47:31, respectively. Although promising, the use of homogeneous catalysts for RPG production has several limitations including separation and recovery of products, corrosion issues, use of expensive and toxic solvent as well as economic and environmental concerns (). Therefore, research focus was directed toward the development of heterogeneous catalysts to address these issues. Heterogeneous catalyst used for glycerol to RPG can group based on the type of metals as transition (e.g. Co, Cu, and Ni) or noble metals (e.g. Pt, Rh, Ru, Pd, Ir, and Re). The noble metals are preferred because of their high selectivity toward RPG as well as high glycerol conversion (). Noble metal catalysts could also be used with various supports such as ZrO 2, Al 2 O 3, SiO 2, TiO 2, Fe 2 O 3, and CaO (). Catalysts supports help to provide stability, promote metal dispersion, and ensure that there is contact between the metal and the reactants. Several studies have reported the use of supported noble metal catalysts for the conversion of glycerol to RPG. Roy et al., used a mixture of 5 wt.% Ru and Pt on Al 2 O 3 support with in situ hydrogen for the conversion of glycerol to RPG. The reaction was performed in a multiple slurry reactor at 493 K for 6 h. The authors reported an optimal glycerol conversion of 50% and 47.2 % RPG selectivity (). Miyazawa et al., used Ru supported on carbon materials combined with Amberlyst ion-exchange resins. They reported a maximum glycerol conversion and RPG selectivity of 48.8% and 70.2%, respectively. The reaction was performed at 8.0 MPa initial hydrogen pressure, 10 h reaction time, 15 mg Ru/C, and 453 K (). A lower glycerol conversion (19.8%) and RPG selectivity (31.9%) was reported with Pt metal supported on amorphous silica-alumina (). Although noble metal-based catalysts are active, they are expensive and have the tendency to promote the cleavage of C-C bonds, leading to lower selectivity of RPG (). In contrast, transition metals are inexpensive alternative sources for RPG production. iScience Review Cu-based transition metal catalyst has shown promising glycerol conversion and RPG selectivity during hydrogenolysis reaction for 400 h duration, 220 C, and 20 bar pressure. Cu has the ability to selectively cleave C-O bonds without affecting the C-C bond (Mane and Rode, 2012 (). The increased conversion and RPG selectivity was attributed to the synergistic effect between Cu and Ru. Cu-based catalysts are often in their oxide form and require in situ reduction with enormous hydrogen pressure to produce Cu metal. However, few researchers have studied the addition of another metal to help eliminate the pre-reduction step. Zhou et al., used Cu and Ag metals supported on g-Al 2 O 3 at 200 C, 1.5 MPa initial H 2 pressure for 10 h. The catalysts showed up to 100% RPG selectivity and 27% glycerol conversion without the pre-reduction step (). The authors noted that the addition of Ag also helped in the elimination of the pre-reduction step. Sharma et al., patented a catalyst comprising of Cu:Zn:Cr:Zr in the ratio of 3:2:1:3. The catalysts produced 100% conversion of glycerol and 97% RPG selectivity at 250 C, 4.0 MPa of H 2 for 10 h. Moreover, the catalyst was reused 4 times with a slight decline in the conversion observed. Therefore, the authors concluded that the introduction of Zn and Zr into the Cu:Cr catalysts matrix helped improve the glycerol conversion and RPG selectivity (). Ni is another promising and cost-effective transition metal used for the hydrogenolysis of glycerol to RPG. Ni is an hydrogenation catalyst and has been studied for selective hydrogenolysis reaction without using external hydrogen (Mane and Rode, 2012). Yin et al., (). Table 2 summarizes previous studies on the use of metal-based catalyst for hydrogenolysis of glycerol to propylene glycol. Catalytic transfer hydrogenolysis Catalytic transfer hydrogenolysis (CTH) is perceived as one of environmentally friendly alternative to conventional hydrogenolysis that occurs without the addition of external hydrogen. CTH requires much milder iScience Review reaction temperature and lower operating pressure (). The process occurs in the presence of a catalyst that facilitates the production of renewable hydrogen donors in the liquid medium. Additionally, CTH is more energy efficient and economically viable due to the absence of external hydrogen. However, the success of CTH is based on the development of an effective bifunctional catalyst that can simultaneously enhance in situ hydrogen production and hydrogenolysis reaction. Figure 6 outlines the difference between conventional hydrogenolysis and CTH. Several researchers have assessed the development of It should be noted that the Ni and Cu transition metals were selected due to their low cost and ability to promote hydrogen formation. Ni-Zr catalyst was the most promising catalyst with 73% glycerol conversion. In addition, it was reported that the H-beta support contains increased Brnsted acidity. Zn has the ability to promote the conversion of glycerol to RPG when deposited on a Pd surface (). Therefore, a monoclinic zirconia-supported PdZn (PdZn/m-ZrO 2 ) was synthesized and tested for the CTH reaction. Although promising conversion and selectivity were attained with the catalyst, the leaching of Zn from the PdZn alloy phases led to catalysts deactivation. To prevent the deactivation, physical mixtures of Pd/m-ZrO 2 and ZnO were directly used for the CTH reaction. Thus, producing layers of PdZn alloy on Pd surfaces that prevented the Zn leaching (). disrupting ice crystals formation. Ethylene glycol is an extremely toxic chemical that is often transformed into calcium oxalate crystals when ingested (Lucy Bell, 2020). The produced calcium oxalate crystals often build up in the heart, lungs, and kidney, thus resulting in irrecoverable health issues. In addition, ethylene glycol is characterized by a sweet taste and smell that could be accidentally ingested by pets or children. Propylene glycol can also be used to maintain the integrity of DNA. Most times environmental samples that uses DNA for research are often stored in absolute ethanol. However, ethanol is expensive, and limited due to transportation restrictions especially in remote locations, and it is often classified as an hazardous chemicals (). In addition, a long ethanol evaporation period is needed to prevent polymerase chain reaction inhibition. Recent experimental studies show that propylene glycol-based antifreeze is a suitable alternative to ethanol for preserving macroinvertebrate DNA from bulk-benthos DNA samples (). In another study, RPG has been evaluated as potential antifreeze for water-based fracturing fluid and liquid additives in newly developed oil fields (). Study by Perez et al., shows that propylene glycol is a cryoprotective agent that could be used as a promising ingredient for treating cellular tissues and organs to prevent ice crystal's formation at low temperatures (). Recently, Abu-Hamdeh et al., developed a novel antifreeze nanofluid comprising of carbon nanotubes and ethylene glycol/propylene glycol. The nanofluid was used for the cooling of batteries. The authors stated that the developed nanofluid could also be used for other similar electrical systems such as a radiator. The developed antifreeze showed promising performance for battery cooling systems. Another study demonstrates the application of propylene glycol as an antifreeze agent in geothermal heat exchangers (). The authors assessed the economic and environmental analysis of using four different fluids including propylene glycol (25wt.% and 33 wt.%), calcium chloride, and pure water as antifreeze agents in geothermal heat exchangers. Their results show that propylene glycol and calcium chloride are promising antifreeze from economic and environmental perspectives (). Propylene glycol is also used as an antifreeze for aircraft and automotives (). In Canada, propylene glycol is commonly used to protect the heating, ventilation, and air conditioning (HVAC) systems from freezing due to extreme weathers. Food and beverage industry Propylene glycol also finds applications in the food industry as a preservative, solvent, and moisture removal agent (Lucy Bell, 2020). Propylene glycol is used as an additive in several food products because it is not self-reactive and hygroscopic. Food additives refer to substances that are not consumed as food but added intentionally to food for one or more technological benefits. Food additives can be antioxidants, emulsifiers, preservatives, or stabilizers. Propylene glycol is a promising food additive because of its ability to attract water molecules thus improving the texture, taste, appearance, and shelf life of food. In addition, it is a very good food preservative and an excellent humectant. It can be used as an antioxidant, dough strengthener, emulsifier, stabilizer, thickener, and texturizer in foods. Propylene iScience Review glycol is also used as a dough strengthener because of its ability to modify the starch and gluten in dough thereby improving the stability. Propylene glycol is recognized by the Food and Drug Administration (FDA) as a relatively safe substance for use as a food additive. It is found in so many foods including packaged foods, drink mixes, desserts, and premarinated ham and turkey (). Propylene glycol is also present in many condiments, frozen vegetables, dairy products, and bread as preservatives. Recent studies by Bokov et al., confirm that propylene glycol is not harmful or poisonous to the body when ingested. Although, few incidents of intoxication have been reported due to the consumption of extremely high doses. Shayanmehr et al., showed that propylene glycol is an effective co-solvent when mixed with sunflower oil for the production of linoleic acid used for milk enrichment. Pharmaceuticals Propylene glycol is a beneficial chemical found in a variety of pharmaceutical and health-care products. Often used to retain moisture and prevent the dryness of skin in alcohol-based hand sanitizers (). It is a promising solvent in several oral and injectable drugs (). Drugs have to penetrate into the deeper skin layers or permeate the skin for effective action. Moreover, the human skin represents an effective, selective barrier to chemical permeation. It should be mentioned that very few selective drugs possess the physicochemical properties required for this route of delivery (). Moreover, most of the drugs do not have the capacity to penetrate the skin in doses needed for efficiency systematic therapy. Therefore, propylene glycol is often used to improve drug permeation through the skin (). Moreover, the enhanced miscibility of propylene glycol with water enables it to be used as a solvent for pharmaceutical formulations that are insoluble including diazepam and benzodiazepine (Lucy Bell, 2020). Propylene glycol is added to drug formulation to enhance the solubility of hydrophobic compounds (Co and Gunnerson, 2019). Ceftriaxone is a promising third generation cephalosporin antibiotic approved by the FDA for the treatment of infections such as gonorrhea (), pneumonia (), and bacterial meningitis (). Propylene glycol polymer has been reported as an efficient stabilization agent for metal nanoparticles used in colorimetric assay for ceftriaxone (). Compared to the conventional analytical methods such as liquid chromatography-mass spectrometry (LC-MS) and fluorimetry, the propylene glycol polymer-enhanced stabilization agent is more effective. Acetaminophen (ACP, N-acetyl-p-aminophenol) is an analgesic drug that is widely use independently or combined with other drugs to manage pain or even as an antipyretic agent. It is extensively used because of its low allergy risk (Amann and Peskar, 2002). Moreover, the drug is administered to patients who are biased to salicylate (Amann and Peskar, 2002). It should be emphasized that acetaminophen is classified by the biopharmaceutics classification system as a class III drug exhibiting reduced water iScience Review solubility (14.3 mg ml 1 at 25 C ) and low permeability (). These properties adversely influence its dissolution kinetics thereby limiting the design of its liquid formulations. Propylene glycol can be used as a solvent to study and optimize the solubility behavior of important drugs such as acetaminophen () and sulfacetamide (). Other applications including cosmetics and personal care products Cosmetic and personal care products are also major areas where propylene glycol is often used. Propylene glycol can be used for diverse purposes ranging from skin conditioning agent to viscosity decreasing agent, as a cosmetic solvent, or ingredients for fragrance (). The hygroscopic nature of propylene glycol enhances its application in moisturizers and hair care products. Moreover, the polymer also minimizes bacterial growth and promotes the shelf life of cosmetics and personal care products (Lucy Bell, 2020). Electronic cigarette (e-cigarette) is used as a safe substitute to tobacco smoking, or as a smoking interruption device. Propylene glycol has been used as a promising liquid for e-cigarettes when mixed with glycerin (Lucy Bell, 2020). The liquid mixtures act as a carrier for nicotine and flavorings and enhance vapor generation. It should be mentioned that e-cigarette formulation contains between 89%-90% of propylene glycol (). Previous studies show that the use of propylene glycol in e-cigarettes do not pose a significant health issue through the inhalation route (). ECONOMIC AND ENVIRONMENTAL ASSESSMENT OF HYDROGENOLYSIS PROCESS The economic and environmental impact of different RPG production routes should be assessed and compared with the commercial production pathways. That way critical decisions related to the commercial implementation of the technologies would be presented. Additionally, techno-economic analysis (TEA) is a very important tool used to determine the profitability of a new technology as well as its market competitiveness (). Also, the life cycle assessment (LCA) is a popular method employed in assessing the environmental aspects associated with a product over its life cycle including the production and consumptions stages. TEA is often integrated with LCA for effective decision making whether to move a project or a proposal forward. Few studies have assessed the TEA of RPG production from glycerol via several routes (Table 4). Earlier studies by Gonzalez-Garay et al., compared the economic and environmental impacts of the commercial propylene glycol production process from propylene oxide with three hydrogenolysis routes ( Table 4). Among the selected route, non-isothermal hydrogenolysis at ambient pressure and external hydrogen is the most promising from economic and environmental considerations. Another study by Nachtergaele et al., applied the LCA approach to evaluate the environmental impact of RPG production from three different routes. Hydrogenolysis reaction led to a decline in GHG emissions by 60%. Furthermore, the emitted CO 2 eq. declined by 36%-38% when a combined heat and power system on-site is adopted (). Jim nez et al., designed two alternative routes for RPG production based on the sources of hydrogen. Route 1 includes the hydrogenolysis of glycerol with hydrogen from an external source. In contrast, hydrogen was produced from glycerol steam reforming in route 2. Economic analysis of the two routes shows that the use of glycerol steam reforming as hydrogen source led to an increase in the cost of RPG production from 1.36 US$/kg to 9.01 US$/kg. Moreover, the annual net profit from the RPG production process also decreased by 70%. Supramono and Ashshiddiq, proposed and assessed the economic viability of a process for the conversion of crude glycerol to RPG and acrolein. A net present value, payback period, and internal rate of return of USD 376 million, 1.26 years, and 149.9%, respectively, was obtained from the process. However, the authors did not perform a detailed cash flow analysis for the proposed design. Also, the methodology employed in the study was not clear. Additionally, the minimum selling price of RPG and acrolein was not presented. method was compared with three different hydrogenolysis routes. Route 2 appears to be the most promising from economic and environmental considerations. Factors such as prices of products, raw material cost, raw material conversion, and feed flow rate have a major impact on the profitability. Environmental effect of implementing different feedstock for renewable propylene glycol production. A transition from petroleum-based economy to renewables-based results in a decrease in the environmental impact between 40% and 60% kg CO2 eq. Nachtergaele et al., Route 1: Hydrogenolysis with hydrogen from an external source. Route 2: Hydrogenolysis with hydrogen produced locally from glycerol steam reforming. Assess the technical and economic impacts of two renewable propylene glycol production routes based on the source of hydrogen. An overall cost of production of 1.17 US$/kg was obtained from route 1. Route 2 led to an increase in the hydrogen production cost to 9.01 US$/kg. The overall energy input for route 2 is about 83% of route 1 process. Route 1 is better in terms of economic and environmental impacts. Review energy compared to route 1. Furthermore, there is about 64% decline in CO 2 emissions in route 2 compared to route 1. This is attributed to the elimination of external hydrogen source provided from natural gas. CTH process also led to a decline in the total investment cost by 7%. TECHNOLOGY READINESS LEVEL OF HYDROGENOLYSIS OF GLYCEROL TO RPG The maturity level of different RPG production routes can be assessed using the technology readiness level (TRL) metrics. Through the TRL methodology, the maturity level of a new technology can be evaluated and compared against alternatives using factors such as economic viability, environmental impacts, and TRL of a technology can be divided into different levels which are in accordance with the U.S. Department of Defense ( Figure 10). It should be noted the level ranges from level 1 (related to the conceptual stage) to level 9 (commercialization stage). Regarding the TRL of RPG, it should be noted that although most of the proposed technology are relatively new, several researchers have embraced them (). Table 5 outlines the maturity level of different RPG production pathways. There are several lab scale plants for RPG production using feedstock such as glycerol cellulose, lactic acid, and glucose. Moreover, Dow Company successfully designed a pilot plant facility at the Halterman Custom Processing Facility in Houston (). In 2011, Archer Daniels Midlands (ADM) was able to build a large-scale industrial RPG production facility at its Decatur, Illinois facility using a rhenium-promoted catalyst. The next year, BASF partnered with Oleon to build and open another RPG production plant in Belgium. Today, ADM uses plant-based glycerol, a co-product in the production of biodiesel for RPG production. The ADM RPG production facility has 100,000 million metric tons of RPG. ADM had stated that the use of soybean-derived and canola-derived glycerol in the RPG production facility led to a 61% decline in greenhouse gas emissions compared to the petroleum-based feedstocks. In China, about 5.2 kilo tons per year of RPG is produced with sorbitol as feedstock (Rosales-Calderon and Arantes, 2019).Since the catalytic conversion of glycerol to RPG has been adopted commercially by few companies, a TRL level of at least 8 would be suitable to describe the technology. However, the conversion of bio-based feedstocks such as cellulose, glucose, and lactic acid to RPG is still at its infancy stage and could be assigned TRL levels between 2 and 4. CHALLENGES AND FUTURE PROSPECTS Although the sustainable production of propylene glycol from several feedstocks has attracted attention from academic and industrial aspects, there are several challenges that need to be addressed. Hydrogenolysis of biodiesel-derived glycerol to RPG is the most common route and has been commercialized industrially; however, the source of hydrogen is still a major concern. Most studies have recommended iScience Review that hydrogen can be supplied externally without accounting for the source. If hydrogen comes from natural gas or petroleum resources, then the produced propylene glycol should not be called renewable. A feasible alternative would be to directly integrate hydrogen production processes with the hydrogenolysis reaction to promote circular economy. However, this would require detailed process design, energy, and exergy analysis as well as energy optimization. Such studies have not been reported. More important, the economic and environmental assessment of the integrated processes is scarcely reported. Glycerol is an abundant biodiesel production by-product; therefore, the conversion of crude glycerol to green chemicals and polymers has been a source of academic and industrial research (). Green chemical such as lactic acid, ethylene glycol, and acetol can also be converted to propylene glycol through catalytic hydrogenolysis. However, more research is required to develop an efficiency catalyst that can be recycled easily and also produce a maximum propylene glycol selectivity. Industrial production of RPG from hydrogenolysis of glycerol uses copper-chromite catalysis and a two-step synthesis including the novel reactive-distillation and acetol hydrogenation. Moreover, the selected route for the production of acetol and propylene glycol from glycerol includes a vapor-phase reaction over the copper-chromite catalyst in a packed bed reactor. In the presence of hydrogen, the vapor-phase reaction facilitates the conversion of glycerol to propylene glycol in a single reactor. Although this approach has been demonstrated in a continuous process, challenges of scalability and catalyst recycling still remain. Earlier studies have demonstrated that RPG could be produced without an external hydrogen source through the CTH process (). However, further research in terms of catalysts development and process scale-up is still needed. Conclusions Propylene glycol is a ubiquitous green chemical that has several industrial uses including as an antifreeze, cosmetics, agent, moisturizer, solvent, surfactant, and a preservative. However, majority of the worldwide consumption of propylene glycol is from petroleum-based propylene oxide, a process that is characterized by greenhouse gas emission release. Hydrogenolysis of glycerol is an alternative route for the production of propylene glycol. Catalytic hydrogenolysis can occur in the presence of external hydrogen or through the use of renewable H-donors in liquid medium. If the external source of hydrogen is supplied from renewable sources, the produced propylene glycol is called renewable propylene glycol (RPG). The present review outlines different RPG production routes, hydrogenolysis mechanisms, and potential industrial applications of propylene glycol. Presently, there are five different technologies used for the industrial production of propylene glycol They include the styrene monomer process employed by |
from django.http import HttpResponse
def sendfile(request, filename, **kwargs):
response = HttpResponse()
response['X-Sendfile'] = unicode(filename).encode('utf-8')
return response
|
MOSCOW -- After emerging from an eight-day space simulation on Friday, the all-female Russian crew said they had missed their loved ones in the experiment more than they missed the comforts of regular life.
While Russia's space medicine center in Moscow has conducted similar experiments in the past, including one several years ago in which six males were locked up for 500 days to model a flight to Mars, this is the first time the crew was all female.
At least one of the women from the crew hopes to become a cosmonaut in the future.
"Of course I want to go into space," said Inna Novinova. "This was an invaluable experience both in methods and isolation," that come with space flight, she said.
Speculation swirled in the Russian media about whether they would be able to survive without males or cosmetics for a week. The women assured reporters it was possible.
Asked whether she missed the comforts of regular life, such as showering and sweets, while in confinement, crewmember Anna Kussmaul told reporters she was more concerned about her family.
"Not knowing what was going on in their lives for eight days bothered me a lot more than not having shampoo, or chocolate," Kussmaul said. The women were not allowed to take their cellphones with them and communicated exclusively with the medicine center's command crew which monitored all of their activities and conversations.
"To those who said we'd end up ripping each other's hair out: we didn't even come close to that," Daria Komissarova told the dozens of journalists that greeted the women as they exited the space capsule installed at the Institute of Biomedical Problems, part of the Russian Academy of Sciences. |
package com.vansl.instruction.load;
import com.vansl.instruction.base.NoOperandsInstruction;
import com.vansl.rtdata.Frame;
public class FLOAD_3 extends NoOperandsInstruction {
@Override
public void execute(Frame frame) {
FLOAD.fload(frame,3);
}
}
|
Albert Asher
Rugby union career
Asher was only 11 years old when he played his first senior representative game of rugby union, playing for Tauranga against Rotorua. Eighteen months later he played against Auckland, becoming the youngest senior representative on record. Asher moved to Auckland, joining the City club and made his Auckland debut in 1898. He won a cap playing for the New Zealand team against Australia at Sydney Cricket Ground on 15 August 1903, scoring a try. While Asher played in only one Test match, he played in 11 games for the All Blacks and scored 53 points.
In 1904, while working with the fire brigade, he suffered a leg injury that kept him from meeting the 1904 British team that were touring New Zealand and he subsequently missed touring Great Britain with the Original All Blacks. After two years out of the game Asher again represented Auckland in 1907, before changing codes.
Rugby league career
Asher declined the offer to tour with the 1907–1908 New Zealand professional rugby team due to a broken ankle. In May 1908 he led the first New Zealand Māori rugby league team on a tour of New South Wales and Queensland, where they are credited with financially saving the New South Wales Rugby League. At the time it was reported in New Zealand that they were accidental converts to rugby league, expecting to meet rugby union teams in Sydney, but this has since proved to be a cover story. The team twice defeated New South Wales and also defeated Australia in one "test".
On their return Asher represented Auckland in their second match ever, against Wellington.
In 1909 Asher was invited to tour Australia with the 1909 New Zealand side but again declined, opting to remain in Auckland and organise the 1909 New Zealand Māori tour of Australia which was set for later in the year. During the year Asher again represented Auckland and played for the new City Rovers club which had been formed to compete in the new Auckland Rugby League.
In 1910 Asher played for City in the first official week of the Auckland Rugby League competition. On 25 June Asher was sent off by referee Jack Stanaway, the brother of Alex. The rest of the City side walked off in support of Asher. Asher became the first player to face the ARL judiciary, who cautioned him. Asher then traveled to Sydney, where he was part of the Australasian side that played two matches against the 1910 Great Britain team. Asher, who was known for leaping over players – leading to his nickname Opai, clashed with another known leaper, Billy Batten. Asher came into contact with Batten's knee in a mid air collision and was carried from the ground, requiring stitches before he returned.
Asher was to play against the Lions three more times once they reached New Zealand in July. Asher lined up against them for New Zealand Māori, Auckland and New Zealand. This was Asher's only cap for New Zealand. He finished the season by being part of the City side that won the inaugural competition, being awarded the Myers Cup.
Asher again played for New Zealand on their 1913 tour of Australia, however no test matches were played.
Asher continued to play for City until after the First World War.
Later years
After retirement Asher became the custodian of Carlaw Park between 1921 and 1943, the home of the Auckland Rugby League.
He is buried at Waikumete Cemetery, Auckland.
Legacy
Asher was inducted into the New Zealand Rugby League's Legends of League in 2008. |
<gh_stars>1-10
package edu.ustc.mix.persistence.mapper.system;
import java.util.List;
import java.util.Map;
import edu.ustc.mix.persistence.entity.system.User;
public interface UserMapper {
int add(User user);
int delete(Long userId);
int update(User user);
User getById(Long userId);
User getByUserName(String userName);
User getUserAndRelatedInfoById(Long userId);
User getUserAndRelatedInfoByUserName(String userName);
User getByMultiParams(Map<String, Object> params);
List<User> list(Map<String, Object> params);
List<User> getAllUsersAndRelatedInfo();
long count(Map<String, Object> params);
} |
import numpy as np
import tensorflow as tf
EPS = 1e-6
def compute_accuracy(logits, labels):
num = tf.cast(tf.argmax(logits, axis=-1, output_type=tf.int32) == labels, dtype=tf.float32)
accuracy = tf.reduce_mean(num)
return accuracy
def expand_ensemble_dim(x, num_ensembles):
""" functionality for outer class to expand before passing into the ensemble model. """
multiples = tf.concat(([num_ensembles], tf.ones_like(tf.shape(x))), axis=0)
x = tf.tile(tf.expand_dims(x, axis=0), multiples=multiples)
return x
def clip_by_value(t, clip_value_min=None, clip_value_max=None):
if clip_value_min is not None:
t = tf.maximum(t, clip_value_min)
if clip_value_max is not None:
t = tf.minimum(t, clip_value_max)
return t
def clip_by_value_preserve_gradient(t, clip_value_min=None, clip_value_max=None, name=None):
with tf.name_scope(name or 'clip_by_value_preserve_gradient'):
t = tf.convert_to_tensor(t, name='t')
clip_t = clip_by_value(t, clip_value_min, clip_value_max)
return t + tf.stop_gradient(clip_t - t)
def flatten_leading_dims(tensor, n_dims):
if n_dims <= 1:
return tensor
newshape = [tf.math.reduce_prod(tf.shape(tensor)[:n_dims])] + tf.TensorShape(tf.shape(tensor)[n_dims:])
return tf.reshape(tensor, shape=newshape)
def clip_atanh(x, name=None):
return tf.atanh(tf.clip_by_value(x, clip_value_min=-1. + EPS, clip_value_max=1. - EPS), name=name)
def compute_target_value(reward, gamma, done, next_q):
q_target = reward + gamma * (1.0 - done) * next_q
return q_target
def flat_vars(vars):
print('Tracing flat_vars')
vars = [tf.reshape(v, shape=(-1,)) for v in vars]
return tf.concat(vars, axis=0)
def set_flat_trainable_variables(model: tf.keras.layers.Layer, trainable_variables):
print(f'Tracing set_flat_params_to model={model.name}, flat_params={len(trainable_variables)}')
prev_ind = 0
for param in model.trainable_variables:
flat_size = tf.reduce_prod(param.shape)
param.assign(tf.reshape(trainable_variables[prev_ind:prev_ind + flat_size], shape=param.shape))
prev_ind += flat_size
def soft_update(target: tf.keras.layers.Layer, source: tf.keras.layers.Layer, tau):
print('Tracing soft_update_tf')
for target_param, source_param in zip(target.trainable_variables, source.trainable_variables):
target_param.assign(target_param * (1. - tau) + source_param * tau)
def hard_update(target: tf.keras.layers.Layer, source: tf.keras.layers.Layer):
print('Tracing hard_update_tf')
for target_param, source_param in zip(target.trainable_variables, source.trainable_variables):
target_param.assign(source_param)
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, tf.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 <NAME>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import asyncio
import json
import aiocouchdb.v1.authdb
from aiocouchdb.client import urljoin
from . import utils
class AuthDatabaseTestCase(utils.ServerTestCase):
def setUp(self):
super().setUp()
self.url_db = urljoin(self.url, '_users')
self.db = aiocouchdb.v1.authdb.AuthDatabase(self.url_db)
def test_get_doc_with_prefix(self):
doc = self.db['test']
self.assertEqual(doc.id, self.db.document_class.doc_prefix + 'test')
doc = self.db[self.db.document_class.doc_prefix + 'test']
self.assertEqual(doc.id, self.db.document_class.doc_prefix + 'test')
class UserDocumentTestCase(utils.ServerTestCase):
def setUp(self):
super().setUp()
self.username = utils.uuid()
docid = aiocouchdb.v1.authdb.UserDocument.doc_prefix + self.username
self.url_doc = urljoin(self.url, '_users', docid)
self.doc = aiocouchdb.v1.authdb.UserDocument(self.url_doc, docid=docid)
def tearDown(self):
self.loop.run_until_complete(self.teardown_document())
super().tearDown()
@asyncio.coroutine
def setup_document(self, password, **kwargs):
data = {
'_id': self.doc.id,
'name': self.doc.name,
'password': password,
'roles': [],
'type': 'user'
}
data.update(kwargs)
with self.response(data=b'{}'):
resp = yield from self.doc.register(password, **kwargs)
self.assert_request_called_with('PUT', '_users', self.doc.id, data=data)
self.assertIsInstance(resp, dict)
return resp
@asyncio.coroutine
def teardown_document(self):
if not (yield from self.doc.exists()):
return
with self.response(headers={'Etag': '"1-ABC"'}):
rev = yield from self.doc.rev()
yield from self.doc.delete(rev)
def test_require_docid(self):
with self.assertRaises(ValueError):
aiocouchdb.v1.authdb.UserDocument(self.url_doc)
def test_username(self):
self.assertEqual(self.doc.name, self.username)
def test_register(self):
yield from self.setup_document('s3cr1t')
def test_register_with_additional_data(self):
yield from self.setup_document('s3cr1t', email='<EMAIL>')
def test_change_password(self):
yield from self.setup_document('s3cr1t')
with self.response(data=b'{}'):
doc = yield from self.doc.get()
data = json.dumps(doc).encode()
with self.response(data=data):
yield from self.doc.update_password('<PASSWORD>')
doc['password'] = '<PASSWORD>'
self.assert_request_called_with(
'PUT', '_users', self.doc.id,
data=doc)
|
Big Data Predicts Centuries Of Harm If Climate Warming Goes Unchecked It took about 30 teams of scientists worldwide, using supercomputers to churn through mountains of data, to see patterns aligning of what will happen decades and centuries from now.
The heart of NASA Center for Climate Simulation in Maryland is the "Discover" supercomputer. The software programs behind climate simulations typically have 1 million lines of code, or more.
As diplomats argue in Paris over a new global agreement to fight climate change, their work is driven by scientists' dire predictions of how unchecked warming will transform our planet decades and centuries from now.
But how can researchers be so sure of what will happen that far off?
The Earth is big and its climate is complicated. To understand it, scientists turn to big, complicated climate simulations that run on supercomputers like the one at NASA's Center for Climate Simulation in Maryland.
There, in a windowless room full of what look like hulking black monoliths, thousands of linked computers crunch away on a mind-boggling number of numbers to come up with predictions about how varying amounts of greenhouse gas emissions will affect our climate's future.
"If you took everybody on the face of the Earth — all 7.3, 7.4 billion people — and you had them multiply two numbers together every single second for 145 hours, total, that's what this entire computing center can do in one second," says Dan Duffy, the high-performance-computing lead at this center.
And when the center runs a climate simulation, he says, it can take months for this powerful beast to spit out an answer. The computer programs that researchers use typically have 1 million lines of code or more.
This NASA video shows the magnitude of the shift in global temperatures that climate modelers predict, over the next century, if carbon dioxide concentrations continue their unabated climb. The temperature changes shown here are relative to the average temperatures observed from 1971-2000.
Plus, Duffy says, "we store large amounts of climate data." If you had the equivalent amount of storage for a music playlist on your computer, he explains, "you could have a music playlist that was 190,000 years long before you would have to listen to the same song twice."
One of the scientists who rely on this supercomputer to make predictions about climate change is Gavin Schmidt, director of the NASA Goddard Institute for Space Studies in New York City. His office is right above the famous corner diner from the TV show Seinfeld.
On the wall next to his desk is a big blackboard that's covered with equations. These scientific scribbles were left by the last guy who worked here, a famous climate change researcher named Jim Hansen, and this is the language these folks use to talk about everything that makes up Earth's climate.
A climate model, says Schmidt, is best thought of as an encapsulation of everything that scientists can go out and measure.
Gavin Schmidt, a climatologist at NASA's Goddard Institute for Space Studies in New York City, studies why and how Earth's climate varies over time.
"We can go and measure how much sunlight reflects off the sea ice," Schmidt says. "We can go and measure how much water you need to have in the air before you form a cloud. You can go and measure how the winds affect the ocean currents, right? Those are physical processes that we've been observing for hundreds of years. A climate model encapsulates each of those processes, the ones that we think of as being important, and it links them all together."
Predicting the future of the climate is much different from predicting the weather, says Schmidt.
"Climate is very much the ensemble of all the weather states," he explains. "It's the fact that it's warmer in the summer than in the winter. It's the fact that you have wet and dry seasons in the tropics. These things are predictable and their changes, if the signal is large enough, are also predictable, even though the individual weather is not."
Researchers created the first primitive models of Earth's climate back in the 1960s, on computers that used punch cards. Schmidt says there are now around 30 different groups worldwide doing climate simulations on supercomputers.
And the consensus of these teams is that if greenhouse gas emissions keep rising, the world will look different. On his desktop computer screen, Schmidt pulls up one graphic of the globe that's color-coded for temperature. As the years tick up into the future on a counter, the globe gets redder and redder.
"You're looking at a situation where there's very little ice left in the Arctic; you are looking at temperature changes on land that are the equivalent of, you know, moving south by about a couple thousand miles. The climate of New York would have the climate of Miami," says Schmidt.
How confident does he feel about those kinds of predictions? "That's a great question. There's ways that you can build confidence in these things," he says. These simulations are able to basically re-create the Earth's climate as we see it today. "What you see are patterns in the models that reflect what you see in the real world. We know we must be getting something fundamentally correct in order for that to happen."
He rattles off a long list of other reasons to feel confident. Can these models re-create past climate events like the last ice age? Yes. Do models created independently by different groups show similar patterns? Yes. Do the models make pretty good relatively short-term predictions, when that's possible? "They're not too bad," says Schmidt. "What you have is lots of lines of evidence that build credibility in these simulations."
Every detail won't be perfect. Researchers don't claim that these models will get everything exactly right. "Nonetheless, they make useful predictions," Schmidt says.
Whether the world actually will use those predictions — that's what diplomats are hashing out in Paris right now. |
<reponame>AnalyticalGraphicsInc/collada-dom<filename>dom/src/1.4/dom/domFx_surface_init_cube_common.cpp
/*
* Copyright 2006 Sony Computer Entertainment Inc.
*
* Licensed under the MIT Open Source License, for details please see license.txt or the website
* http://www.opensource.org/licenses/mit-license.php
*
*/
#include <dae.h>
#include <dae/daeDom.h>
#include <1.4/dom/domFx_surface_init_cube_common.h>
#include <dae/daeMetaCMPolicy.h>
#include <dae/daeMetaSequence.h>
#include <dae/daeMetaChoice.h>
#include <dae/daeMetaGroup.h>
#include <dae/daeMetaAny.h>
#include <dae/daeMetaElementAttribute.h>
namespace ColladaDOM141 {
daeElementRef
domFx_surface_init_cube_common::create(DAE& dae)
{
domFx_surface_init_cube_commonRef ref = new domFx_surface_init_cube_common(dae);
return ref;
}
daeMetaElement *
domFx_surface_init_cube_common::registerElement(DAE& dae)
{
daeMetaElement* meta = dae.getMeta(ID());
if ( meta != NULL ) return meta;
meta = new daeMetaElement(dae);
dae.setMeta(ID(), *meta);
meta->setName( "fx_surface_init_cube_common" );
meta->registerClass(domFx_surface_init_cube_common::create);
daeMetaCMPolicy *cm = NULL;
daeMetaElementAttribute *mea = NULL;
cm = new daeMetaChoice( meta, cm, 0, 0, 1, 1 );
mea = new daeMetaElementAttribute( meta, cm, 0, 1, 1 );
mea->setName( "all" );
mea->setOffset( daeOffsetOf(domFx_surface_init_cube_common,elemAll) );
mea->setElementType( domFx_surface_init_cube_common::domAll::registerElement(dae) );
cm->appendChild( mea );
mea = new daeMetaElementAttribute( meta, cm, 0, 1, 1 );
mea->setName( "primary" );
mea->setOffset( daeOffsetOf(domFx_surface_init_cube_common,elemPrimary) );
mea->setElementType( domFx_surface_init_cube_common::domPrimary::registerElement(dae) );
cm->appendChild( mea );
mea = new daeMetaElementArrayAttribute( meta, cm, 0, 6, 6 );
mea->setName( "face" );
mea->setOffset( daeOffsetOf(domFx_surface_init_cube_common,elemFace_array) );
mea->setElementType( domFx_surface_init_cube_common::domFace::registerElement(dae) );
cm->appendChild( mea );
cm->setMaxOrdinal( 0 );
meta->setCMRoot( cm );
// Ordered list of sub-elements
meta->addContents(daeOffsetOf(domFx_surface_init_cube_common,_contents));
meta->addContentsOrder(daeOffsetOf(domFx_surface_init_cube_common,_contentsOrder));
meta->addCMDataArray(daeOffsetOf(domFx_surface_init_cube_common,_CMData), 1);
meta->setElementSize(sizeof(domFx_surface_init_cube_common));
meta->validate();
return meta;
}
daeElementRef
domFx_surface_init_cube_common::domAll::create(DAE& dae)
{
domFx_surface_init_cube_common::domAllRef ref = new domFx_surface_init_cube_common::domAll(dae);
return ref;
}
daeMetaElement *
domFx_surface_init_cube_common::domAll::registerElement(DAE& dae)
{
daeMetaElement* meta = dae.getMeta(ID());
if ( meta != NULL ) return meta;
meta = new daeMetaElement(dae);
dae.setMeta(ID(), *meta);
meta->setName( "all" );
meta->registerClass(domFx_surface_init_cube_common::domAll::create);
meta->setIsInnerClass( true );
// Add attribute: ref
{
daeMetaAttribute *ma = new daeMetaAttribute;
ma->setName( "ref" );
ma->setType( dae.getAtomicTypes().get("xsIDREF"));
ma->setOffset( daeOffsetOf( domFx_surface_init_cube_common::domAll , attrRef ));
ma->setContainer( meta );
ma->setIsRequired( true );
meta->appendAttribute(ma);
}
meta->setElementSize(sizeof(domFx_surface_init_cube_common::domAll));
meta->validate();
return meta;
}
daeElementRef
domFx_surface_init_cube_common::domPrimary::create(DAE& dae)
{
domFx_surface_init_cube_common::domPrimaryRef ref = new domFx_surface_init_cube_common::domPrimary(dae);
return ref;
}
daeMetaElement *
domFx_surface_init_cube_common::domPrimary::registerElement(DAE& dae)
{
daeMetaElement* meta = dae.getMeta(ID());
if ( meta != NULL ) return meta;
meta = new daeMetaElement(dae);
dae.setMeta(ID(), *meta);
meta->setName( "primary" );
meta->registerClass(domFx_surface_init_cube_common::domPrimary::create);
meta->setIsInnerClass( true );
daeMetaCMPolicy *cm = NULL;
daeMetaElementAttribute *mea = NULL;
cm = new daeMetaSequence( meta, cm, 0, 0, 1 );
mea = new daeMetaElementArrayAttribute( meta, cm, 0, 6, 6 );
mea->setName( "order" );
mea->setOffset( daeOffsetOf(domFx_surface_init_cube_common::domPrimary,elemOrder_array) );
mea->setElementType( domFx_surface_init_cube_common::domPrimary::domOrder::registerElement(dae) );
cm->appendChild( mea );
cm->setMaxOrdinal( 0 );
meta->setCMRoot( cm );
// Add attribute: ref
{
daeMetaAttribute *ma = new daeMetaAttribute;
ma->setName( "ref" );
ma->setType( dae.getAtomicTypes().get("xsIDREF"));
ma->setOffset( daeOffsetOf( domFx_surface_init_cube_common::domPrimary , attrRef ));
ma->setContainer( meta );
ma->setIsRequired( true );
meta->appendAttribute(ma);
}
meta->setElementSize(sizeof(domFx_surface_init_cube_common::domPrimary));
meta->validate();
return meta;
}
daeElementRef
domFx_surface_init_cube_common::domPrimary::domOrder::create(DAE& dae)
{
domFx_surface_init_cube_common::domPrimary::domOrderRef ref = new domFx_surface_init_cube_common::domPrimary::domOrder(dae);
return ref;
}
daeMetaElement *
domFx_surface_init_cube_common::domPrimary::domOrder::registerElement(DAE& dae)
{
daeMetaElement* meta = dae.getMeta(ID());
if ( meta != NULL ) return meta;
meta = new daeMetaElement(dae);
dae.setMeta(ID(), *meta);
meta->setName( "order" );
meta->registerClass(domFx_surface_init_cube_common::domPrimary::domOrder::create);
meta->setIsInnerClass( true );
// Add attribute: _value
{
daeMetaAttribute *ma = new daeMetaAttribute;
ma->setName( "_value" );
ma->setType( dae.getAtomicTypes().get("Fx_surface_face_enum"));
ma->setOffset( daeOffsetOf( domFx_surface_init_cube_common::domPrimary::domOrder , _value ));
ma->setContainer( meta );
meta->appendAttribute(ma);
}
meta->setElementSize(sizeof(domFx_surface_init_cube_common::domPrimary::domOrder));
meta->validate();
return meta;
}
daeElementRef
domFx_surface_init_cube_common::domFace::create(DAE& dae)
{
domFx_surface_init_cube_common::domFaceRef ref = new domFx_surface_init_cube_common::domFace(dae);
return ref;
}
daeMetaElement *
domFx_surface_init_cube_common::domFace::registerElement(DAE& dae)
{
daeMetaElement* meta = dae.getMeta(ID());
if ( meta != NULL ) return meta;
meta = new daeMetaElement(dae);
dae.setMeta(ID(), *meta);
meta->setName( "face" );
meta->registerClass(domFx_surface_init_cube_common::domFace::create);
meta->setIsInnerClass( true );
// Add attribute: ref
{
daeMetaAttribute *ma = new daeMetaAttribute;
ma->setName( "ref" );
ma->setType( dae.getAtomicTypes().get("xsIDREF"));
ma->setOffset( daeOffsetOf( domFx_surface_init_cube_common::domFace , attrRef ));
ma->setContainer( meta );
ma->setIsRequired( true );
meta->appendAttribute(ma);
}
meta->setElementSize(sizeof(domFx_surface_init_cube_common::domFace));
meta->validate();
return meta;
}
} // ColladaDOM141
|
<reponame>pansila/Auto-Test-System
import aiofiles
import base64
import os
from pathlib import Path
from bson import ObjectId
from sanic_openapi import doc
from sanic.response import json, file
from sanic import Blueprint
from sanic.views import HTTPMethodView
from async_files.utils import async_wraps
from ..util import async_rmtree
from ..util.decorator import token_required
from ..model.database import Organization, Team, User, Test, Task, TaskQueue, TestResult
from ..service.auth_helper import Auth
from ..util.dto import OrganizationDto, json_response, organization_team
from ..util.response import response_message, SUCCESS, USER_NOT_EXIST, EPERM, ENOENT, EINVAL, EEXIST
from ..config import get_config
from ..util.identicon import render_identicon
USERS_ROOT = Path(get_config().USERS_ROOT)
_user_list = OrganizationDto.user_list
_organization_list = OrganizationDto.organization_list
_new_organization = OrganizationDto.new_organization
_organization_id = OrganizationDto.organization_id
_organization_team_list = OrganizationDto.organization_team_list
_transfer_ownership = OrganizationDto.transfer_ownership
_organization_avatar = OrganizationDto.organization_avatar
bp = Blueprint('organization', url_prefix='/organization')
class OrganizationView(HTTPMethodView):
@doc.summary('List all organizations joined by the logged in user')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.produces(_organization_list)
@token_required
async def get(self, request):
ret = []
check = []
user = request.ctx.user
async for organization in Organization.find({'owner': user.pk}):
owner = await organization.owner.fetch()
ret.append({
'label': organization.name,
'owner': owner.name,
'owner_email': owner.email,
'personal': organization.personal,
'value': str(organization.pk)
})
check.append(organization)
for organization in user.organizations:
if organization in check:
continue
owner = await organization.owner.fetch()
ret.append({
'label': organization.name,
'owner': owner.name,
'owner_email': owner.email,
'personal': organization.personal,
'value': str(organization.pk)
})
ret.sort(key=lambda x: not x['personal'])
return json(response_message(SUCCESS, organizations=ret))
@doc.summary('create a new organization')
@doc.description('The logged in user performing the operation will become the owner of the organization')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(doc.String(name='name', description="new organization\'s name"), location='body')
@doc.produces(json_response)
@token_required
async def post(self, request):
data = request.json
name = data.get('name', None)
if not name:
return json(response_message(EINVAL, 'Field name is required'))
user = request.ctx.user
org = Organization(name=name)
org.owner = user
org.members.append(user)
await org.commit()
user.organizations.append(org)
await user.commit()
org.path = name + '#' + str(org.pk)
org_root = USERS_ROOT / org.path
try:
await aiofiles.os.mkdir(org_root)
except FileExistsError as e:
return json(response_message(EEXIST))
img = await render_identicon(hash(name), 27)
await async_wraps(img.save)(org_root / ('%s.png' % org.pk))
org.avatar = f'{org.pk}.png'
await org.commit()
return json(response_message(SUCCESS))
@doc.summary('delete an organization')
@doc.description('Only the owner of the organization could perform this operation')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_organization_id, location='body')
@doc.produces(json_response)
@token_required
async def delete(self, request):
organization_id = request.json.get('organization_id', None)
if not organization_id:
return json(response_message(EINVAL, "Field organization_id is required"))
organization = await Organization.find_one({'_id': ObjectId(organization_id)})
if not organization:
return json(response_message(ENOENT, "Team not found"))
user = request.ctx.user
if await organization.owner.fetch() != user:
return json(response_message(EINVAL, 'You are not the organization owner'))
try:
await async_rmtree(USERS_ROOT / organization.path)
except FileNotFoundError:
pass
user.organizations.remove(organization)
await user.commit()
# Tests belong to teams of the organization will be deleted as well by this query
async for test in Test.find({'organization': organization.pk}):
async for task in Task.find({'test': test.pk}):
async for tr in TestResult.find({'task': task.pk}):
await tr.delete()
await task.delete()
await test.delete()
async for queue in TaskQueue.find({'organization': organization.pk}):
queue.to_delete = True
queue.organization = None
queue.team = None
await queue.commit()
async for team in Team.find({'organization': organization.pk}):
async for user in User.find():
user.teams.remove(team)
await User.commit()
await team.delete()
await organization.delete()
return json(response_message(SUCCESS))
@bp.get('/avatar/<org_id>')
@doc.summary('get the avatar of an organization')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.produces(_organization_avatar)
@token_required
async def handler(request, org_id):
user = request.ctx.user
org = await Organization.find_one({'_id': ObjectId(org_id)})
if org:
if user not in org.members:
return json(response_message(EPERM), 'You are not a member of the organization')
if org.avatar:
async with aiofiles.open(USERS_ROOT / org.path / org.avatar, 'rb') as img:
_, ext = os.path.splitext(org.avatar)
return json(response_message(SUCCESS, type=f'image/{ext[1:]}', data=base64.b64encode(await img.read()).decode('ascii')))
else:
owner = await org.owner.fetch()
async with aiofiles.open(USERS_ROOT / org.path / owner.avatar, 'rb') as img:
_, ext = os.path.splitext(owner.avatar)
return json(response_message(SUCCESS, type=f'image/{ext[1:]}', data=base64.b64encode(await img.read()).decode('ascii')))
return json(response_message(ENOENT, 'Organization not found'))
@bp.delete('/member')
@doc.summary('let current logged in user quit the organization')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_organization_id, location='body')
@doc.produces(json_response)
@token_required
async def handler(request):
organization_id = request.json.get('organization_id', None)
if not organization_id:
return json(response_message(EINVAL, "Field organization_id is required"))
org_to_quit = await Organization.find_one({'_id': ObjectId(organization_id)})
if not org_to_quit:
return json(response_message(ENOENT, "Organization not found"))
user = request.ctx.user
for organization in user.organizations:
if organization != org_to_quit:
continue
if await organization.owner.fetch() == user:
return json(response_message(EPERM, "Can't quit the organization as you are the owner"))
organization.members.remove(user)
await organization.commit()
user.organizations.remove(organization)
await user.commit()
return json(response_message(SUCCESS))
else:
return json(response_message(EINVAL, "User is not in the organization"))
@bp.get('/all')
@doc.summary('list all organizations registered')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.produces(_organization_list)
@token_required
async def handler(request):
ret = []
async for organization in Organization.find():
if organization.name == 'Personal':
continue
owner = await organization.owner.fetch()
ret.append({
'label': organization.name,
'owner': owner.name,
'owner_email': owner.email,
'personal': organization.personal,
'value': str(organization.pk)
})
return json(response_message(SUCCESS, organizations=ret))
@bp.get('/include_team')
@doc.summary('list all organizations and teams registered')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.produces(_organization_team_list)
@token_required
async def handler(request):
ret = []
check = []
user = request.ctx.user
async for organization in Organization.find({'owner': user.pk}):
owner = await organization.owner.fetch()
r = {
'label': organization.name,
'owner': owner.name,
'owner_email': owner.email,
'personal': organization.personal,
'value': str(organization.pk)
}
ret.append(r)
check.append(organization)
if not organization.teams:
continue
if len(organization.teams) > 0:
r['children'] = []
for team in organization.teams:
team = await team.fetch()
owner = await team.owner.fetch()
r['children'].append({
'label': team.name,
'owner': owner.name,
'owner_email': owner.email,
'value': str(team.pk)
})
for organization in user.organizations:
if organization in check:
continue
organization = await organization.fetch()
owner = await organization.owner.fetch()
r = {
'label': organization.name,
'owner': owner.name,
'owner_email': owner.email,
'personal': organization.personal,
'value': str(organization.pk)
}
ret.append(r)
if not organization.teams:
continue
if len(organization.teams) > 0:
r['children'] = []
for team in organization.teams:
team = await team.fetch()
owner = await team.owner.fetch()
r['children'].append({
'label': team.name,
'owner': owner.name,
'owner_email': owner.email,
'value': str(team.pk)
})
return json(response_message(SUCCESS, organization_team=ret))
@bp.post('/join')
@doc.summary('join an organization')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_organization_id, location='body')
@doc.produces(json_response)
@token_required
async def handler(request):
org_id = request.json.get('organization_id', None)
if not org_id:
return json(response_message(EINVAL, "Field organization_id is required"))
user = request.ctx.user
organization = await Organization.find_one({'_id': ObjectId(org_id)})
if not organization:
return json(response_message(ENOENT, 'Organization not found'))
if user not in organization.members:
organization.members.append(user)
await organization.commit()
if organization not in user.organizations:
user.organizations.append(organization)
await user.commit()
return json(response_message(SUCCESS))
@bp.get('/users')
@doc.summary('list all users of an organization')
@doc.description('Note: Users in a team of the organization will not be counted')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_organization_id)
@doc.produces(_user_list)
@token_required
async def handler(request):
user = request.ctx.user
organization_id = request.args.get('organization_id', None)
if not organization_id:
return json(response_message(EINVAL, 'Field organization_id is required'))
organization = await Organization.find_one({'_id': ObjectId(organization_id)})
if not organization:
return json(response_message(ENOENT, 'Organization not found'))
if user not in organization.members:
return json(response_message(EPERM, 'You are not in the organization'))
ret = []
for member in organization.members:
m = await member.fetch()
ret.append({
'value': str(m.pk),
'label': m.name,
'email': m.email
})
return json(response_message(SUCCESS, users=ret))
@bp.get('/all_users')
@doc.summary('list all users')
@doc.description('Note: All Users in the organization and the organization\'s teams will be counted')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_organization_id)
@doc.produces(_user_list)
@token_required
async def handler(request):
user = request.ctx.user
organization_id = request.args.get('organization_id', None)
if not organization_id:
return json(response_message(EINVAL, 'Field organization_id is required'))
organization = await Organization.find_one({'_id': ObjectId(organization_id)})
if not organization:
return json(response_message(ENOENT, 'Organization not found'))
for member in organization.members:
m = await member.fetch()
if user == m:
break
else:
return json(response_message(EPERM, 'You are not in the organization'))
ret = []
check_list = []
for member in organization.members:
m = await member.fetch()
check_list.append(m)
ret.append({'value': str(m.pk), 'label': m.name, 'email': m.email})
for team in organization.teams:
for user in team.members:
u = await user.fetch()
user_id = str(u.pk)
if u not in check_list:
ret.append({'value': user_id, 'label': u.name, 'email': u.email})
check_list.append(u)
return json(response_message(SUCCESS, users=ret))
@bp.post('/transfer')
@doc.summary('transfer ownership of an organization')
@doc.description('The new owner should have joined the organization or a team of the organization')
@doc.consumes(doc.String(name='X-Token'), location='header')
@doc.consumes(_transfer_ownership, location='body')
@doc.produces(json_response)
@token_required
async def handler(request):
user = request.ctx.user
organization_id = request.json.get('organization_id', None)
if not organization_id:
return json(response_message(EINVAL, 'Field organization_id is required'))
organization = await Organization.find_one({'_id': ObjectId(organization_id)})
if not organization:
return json(response_message(ENOENT, 'Organization not found'))
if await organization.owner.fetch() != user:
return json(response_message(EPERM, 'You are not the organization owner'))
owner_id = request.json.get('new_owner', None)
if not owner_id:
return json(response_message(EINVAL, 'Field new_owner is required'))
owner = await User.find_one({'_id': ObjectId(owner_id)})
if not owner:
return json(response_message(ENOENT, 'New owner not found'))
if owner not in organization.members:
for team in organization.teams:
if owner in team.members:
break
else:
return json(response_message(EPERM, 'New owner should be a member of the organization'))
organization.owner = owner
if owner not in organization.members:
organization.members.append(owner)
await organization.commit()
for team in organization.teams:
if team.owner == user:
team.owner = owner
if owner not in team.members:
team.members.append(owner)
await team.commit()
return json(response_message(SUCCESS))
bp.add_route(OrganizationView.as_view(), '/')
|
The Sincerest Form of Flattery: Nationalist Emulation during the COVID-19 Pandemic As COVID-19 rapidly spread across the globe, every government in the world has been forced to enact policies to slow the spread of the virus. While leaders often claim responses are based on the best available advice from scientists and public health experts, recent policy diffusion research suggests that countries are emulating the COVID-19 policies of their neighbors instead of responding to domestic conditions. Political and geographic considerations play a role in determining which countries imitate one another, but even among countries that are politically or geographically distant, nationalist regimes seem to favor certain approaches towards the pandemic. We investigate why this is the case by examining whether countries that embrace a nationalist ideology are more likely to emulate the COVID-19 policies of similarly nationalist regimes. We demonstrate that, even after controlling for domestic circumstances and linguistic, trade, geographic, and political connections, nationalist countries are emulating each others responses. These results are robust and shed light not only on new mechanisms of policy diffusion but also on the growing international cooperation of nationalist regimes and leaders. makes analyzing this emerging phenomenon of cross-border cooperation between nationalists both vitally important and difficult, especially for a large-scale quantitative approach. Additionally, the relative newness and rapid growth of nationalist internationalism mean that its influence on policy diffusion may not have been detectable even a few years earlier. Given the wealth of high-definition data it has created, the Pandemic offers a unique opportunity to test the level to which nationalist countries take cues from one another. Disaster Nationalism and COVID-19 While previous research on policy diffusion during the pandemic found that countries in general are emulating each other's responses, the rise of nationalist internationalism and what appears to be specifically nationalist responses to COVID-19 raises the question of whether and how nationalists are emulating each other's responses. Disasters are exogenous shocks that can rapidly alter the physical, human, political, social, and economic landscape of a country. They create "unsettled times, or ontologically insecure moments when social and political institutions are in flux". In some cases, this can facilitate civic engagement, bridging divides and prompting strangers or even enemies to go to great lengths to help each other. While most natural disasters are regional, a disaster that is sufficiently widespread or severe enough to be perceived as national in scale can bolster feelings of nationalism. As former premier of China Wen Jiabao famously wrote in response to the Sichuan Earthquake: "Disasters regenerate a nation. 多难兴邦". Savvy governments may capitalize on this rally around the flag effect. However, early evidence from the pandemic suggests that calls to nationalism may only work on those already predisposed to nationalist ideology. In other cases, disasters can exacerbate existing social, political, religious, ethnic, or other divides even to the point of armed conflict. This is particularly true when leaders use a disaster to demonize an outgroup, such as domestic minority groups, refugees, or immigrants. Nationalists also tend to blame other countries or transnational actors. Yet even when leaders do not fault outgroups for disasters, political focus tends to shift towards national problems. As a result, efforts at a more expansive foreign policy may wither as a country's focus turns to domestic concerns. Japan may have fallen into this trap after the Fukushima disaster even as its East Asian rival China avoided it following the Sichuan earthquake and rapidly expanded the scope of its foreign policy engagement. So how does a nationalist lens help us understand countries' responses to COVID-19? Bieber expresses concern that the pandemic will provide an opportunity for farright nationalist governments to pursue prior preferences: ramping up authoritarianism, reducing democratic freedoms and civil liberties, promoting biases against certain groups, strengthening borders, deglobalizing, and appealing to the politics of fear. Yet, these were already existing tendencies and tell us little about how nationalists will handle the pandemic itself and where such regimes look for information about and examples of policies. It seems obvious that the COVID-19 pandemic is meaningfully distinct from most disasters we have seen over the past century. While its death toll, to date at least, is not particularly high compared to some of the largest pandemics, natural, and manmade disasters over the past century, its scope is unprecedented. The Great Chinese Famine killed somewhere between 15 and 45 million people, but was restricted almost entirely to China. Taken in total, the Second World War had a higher death toll and greater geographic range, but still left many parts of the world, most notably sub-Saharan Africa and Latin America, largely untouched. The two obvious points of comparison with COVID-19, especially as its death toll continues to rise, are the AIDS epidemic and the 1918 Influenza Pandemic. The AIDS epidemic, while similarly spread throughout the world, was much slower moving and disproportionately impacted marginalized communities. In the early years, this made it easier to ignore the issue or blame it on outgroup scapegoats. The 1918 Influenza Pandemic is more similar in speed to the spread of COVID-19, yet the world in which it occurred was so different that it is hard to draw lessons from that pandemic. Among other things, more limited and slower travel, the end of the First World War, and globe-spanning empires created a very different international political landscape from that of today. Furthermore, robust data describing countries and their response to the onset of Spanish Flu are unavailable, making it difficult to examine the drivers of countries' responses. Because the COVID-19 Pandemic is truly global in a way that few disasters have been, it is reasonable to expect that reactions may be significantly different than in more localized disasters. It might be harder or less tempting to blame an outgroup when the whole world finds itself in nearly identical circumstances. The availability of high-definition data make it possible to systematically investigate how countries responded COVID-19, allowing us to investigate for the first time the factors driving decision-making among nationalist countries in a global crisis. Nationalist Responses and Emulation Language Because we use an index measuring the overall strictness of anti-COVID-19 measures to operationalize nationalist emulation, the specific nature and details of nationalist leaders' responses to the Pandemic are not addressed directly by our data analysis. However, a brief look at the responses of countries that scored above the median on our measure of nationalism (as discussed below) demonstrate certain similarities in approaches to the pandemic. Specifically, nationalist regimes often appear to have used a combination of downplaying the problem, especially early on, appealing to exceptionalism by arguing that their country would be uniquely protected, promoting as-yet unproven preventative measures and treatments, and blaming outgroups including other countries and transnational actors. President Bolsonaro of Brazil called COVID-19 "a little flu" and claimed that Brazilians "are uniquely suited to weather the pandemic because they can be dunked in raw sewage and 'don't catch a thing'". The United Kingdom considered a herd immunity strategy, saw the virus as only a "moderate risk" and refused to emulate its Northern European neighbors, allowing bars and nightclubs to stay open as the rest of Europe closed down. President Duterte of the Philippines assured the media on February 3rd that "even without the vaccines it will just die a natural death". Iranian Supreme Leader Ayatollah Ali Khamenei tweeted that "COVID-19 is not such a big tragedy" "The prayers of the pure youth and pious are very effective in repelling major tragedies". Belarusian President Alexander Lukashenko appealed to national identity when he advised Belarusians to drink vodka and visit saunas to stay healthy, while refusing to order a lockdown. Hungarian Prime Minister Viktor Orban blamed foreigners and migration for the spread of coronavirus in in his country. The Presidents of both the United States and Brazil focused on the same antimalarial drug despite a lack of evidence, demonstrating nationalist internationalism by discussing a joint research effort. Nationalist regimes have also tended to boast about their response to COVID-19 and, when previous overly optimistic predictions proved false, have massaged statistics to make their handling of the virus look better. Turkmenistan has taken this strategy to its logical conclusion, denying the country had even a single case even while insisting its citizens wear masks to protect against "dust". Sometimes making use of these doctored statistics, nationalist leaders have tried to emphasize the success of their response, "...we are working rather smoothly and emerging from this situation with the coronavirus confidently and, with minimal losses..." Vladimir Putin told Russian state TV in June. "So far we've done it better than nearly any other country in the world and I assess that by the end of this we will be the best in the world," claimed nationalist Prime Minister of Israel Binyamin Netanyahu in March, only to see cases spike again a few months later. The responses of nationalists seem markedly different from the cautious and measured messaging of less-nationalist leaders. The case in point was New Zealand where, despite managing perhaps the most successful Pandemic response anywhere, Prime Minister Jacinda Ardern told her citizens that "there is no widespread undetected community transmission in New Zealand but we must remain vigilant if we are to keep it that way". There has also been evidence of nationalist leaders communicating with each other and praising each other's responses. In March, US President Donald Trump announced a ban on travel from the 26 countries in the European Schengen Area. Yet, he made a point not only of excluding the United Kingdom (UK) from the ban, but claiming that the UK "has got very strong borders and they are doing a very good job" despite the fact that the UK was taking a relatively lax approach to the Pandemic and had more cases than many Schengen Area countries. Some speculated that this was a favor for or in admiration of nationalist Prime Minister Boris Johnson. Later the two agreed on a coordinated response to the pandemic. Trump is also reported to have frequently spoken with nationalist leaders including Duterte, Putin, and Erdogan during the pandemic. Considering that none of these countries are particularly geographically, economically, or scientifically close, this is strong evidence of communication between similarly-minded nationalist leaders. Possible limits of international nationalist solidarity were also on display when it came to nationalist leaders' assessment of other nationalists' pandemic responses; Putin has publicly criticized the US handling of the pandemic. This points to the inherent contradictions of nationalist internationalism which allows or encourages nationalist leaders to boost their image by criticizing outsiders, including other nationalists, even as they emulate each other. The specifics of nationalist responses to COVID-19 deserve further study. In the subsequent sections we take one important step in this direction by assessing whether or not regular communication and stylistic similarities in nationalists' political responses reflect deeper similarities in policies and whether or not they emulate each other in the enactment of these policies. Policy Diffusion Policy diffusion theory seeks to explain why and when governance units, (from counties to countries) adopt specific policies. It considers internal determinants such as the problem characteristics in a country, political and institutional context, available resources and government capacity as well as external determinants in the form of coercion, competition, learning, and emulation from other governance units. The combination of these factors drives the spread of policies between different governance units. Trade flow, geographic proximity, and cultural or linguistic similarities are typically used as proxies to capture relationships between countries. Yet, the field is still rapidly evolving. The impact of different diffusion mechanisms has been shown to change over time and recent research has demonstrated the importance of ideology to be increasing relative to that of geography. But, exactly which mechanisms lead to a governance unit adopting policies is still a source of debate. In the typical policy diffusion model, governance units look to each other's policies, analyzing outcomes in order to follow successful policy experiments and adopt the optimal policies. The speed of the COVID-19 Pandemic has short-circuited this model. The rapid spread of the virus, the need to act quickly, and the lag time between the implementation of policies and statistics that reliably capture their effects mean that rather than learning from their experiences, countries are often simply emulating neighbors' policies without information about their outcomes. Existing research has suggested that in the Pandemic, emulation primarily occurs between geographic and political neighbors (countries with similar levels of democracy). In order to test whether the same type of emulation is occurring between nationalist regimes, we follow existing policy diffusion research in conducting panel regression analysis that attempts to model the spread of policies based on how similar different countries are in terms of levels of nationalism. Materials and Methods We employ a dyadic event history analysis approach to analyze a panel of international COVID-19 data that we construct from a variety of secondary sources. Dyadic event history analyses are useful for explaining where diffusing policies originate. Pandemic data are taken from the Oxford COVID-19 Government Response Tracker (OxCGRT) which provides daily information on international COVID-19 rates, deaths, and the dynamic government responses to the crisis. Since the rise in cases across missing observations can be consistently estimated given the observations before and after the missing data, we interpolate missing data points for the number of confirmed COVID-19 cases in each country over time. OxCGRT provides up to date indicators measuring countries' health and economic policies as well as how they respond to the pandemic in key policy areas relevant to transmission of the virus. Eight of these policy areas (school closings, business closings, transportation shutdowns, gathering size restrictions, internal travel, international travel, event cancelations, stay at home orders, and public information campaigns) are directly linked to countries' efforts to stem transmission and have been combined into the COVID-19 policy stringency index in the OxCGRT dataset. This measure describes the level of policy stringency in each country on each day and serves as our dependent variable. As a robustness test, we additionally run our model on a more precise index focused specifically on social distancing policies which was also used in recent policy diffusion research. This more precise index is important for our purposes because it excludes international border closures, which we might expect to be connected to high levels of nationalism. We develop this more specific measure through refining the original stringency index to a social distancing index by aggregating measures for the five indicators most relevant to social distancing efforts: school and business closings, transportation shutdowns, event cancelations, and internal travel restrictions. We combine the OxCGRT data with measures of countries' political characteristics from the Varieties of Democracy (V-Dem) dataset. V-Dem is currently the standard choice for these types of comparative politics analyses. V-Dem assesses governments' legitimization strategies based on expert opinions and provides a measure of countries' level of nationalism along with other critical socio-political controls. The consensus opinion among expert coders of whether a country promotes nationalism as a legitimating ideology is measured on a scale of 0 to 1 in the V-Dem dataset and is described in Table 1. We also control for countries' administrative capacity using the Worldwide Governance Indicator (WGI) 2019 estimate of government effectiveness and several relevant socio-economic controls. From the World Bank, we include Gross Domestic Product (GDP) per capita adjusted for international Purchasing Power Parity (PPP) and international tourism arrivals into a country. From the World Health Organization (WHO), we include hospital beds per capita, healthcare spending as a percentage of GDP, and the percentage of the population over 65 years old. Since we are interested in understanding how countries emulate one another in a global crisis, we develop several measures of interconnectivity to investigate diffusion across different signaling pathways. This allows us to explore how different countries' policymakers take cues from each other and why they do so. We create international connectivity matrices from the V-Dem data as well as the Correlates of War Project (COW) and the Centre d'Etudes Prospectives et d'Informations Internationales (CEPII) to examine country interconnectivity across geographic proximity, trade, language similarity, political similarity, and ideological similarity. We focus on countries' ideological similarity to investigate whether nationalist states emulate one another, measuring nationalism in two ways. First, we measure the similarity between each dyad of countries in our dataset as the absolute value of the difference between their V-Dem nationalism scores. We treat this measure to ensure that we only capture similarity between nationalist peers (as opposed to similarity between more internationalist countries) by estimating similarities only among countries in the dataset with above-average nationalism scores. If we included countries with below-average scores as well, our results would indicate the impact of ideological similarity rather than nationalistic similarity. Second, we interact the nationalism scores of each country dyad to generate a paired nationalism-similarity index and use this measure to test the robustness of our results, again ensuring that our findings are not driven by emulation between more internationalist peers. While this method allows us to examine country dyads, the structure of our data prevents us from assessing the directionality of individual changes between countries. Since our dependent variable represents an index of policy stringency, it does not allow us to track the origin of specific COVID-19 policies. Even if we do not take these measures to focus only on emulation between more nationalist regimes, however, our results still appear to be robust. Countries in our data are considered geographically adjacent if they are contiguous or separated by less than 400 miles of uninterrupted water. This measure allows us to estimate the impact of regional proximity on countries' COVID-19 responses. Economic interdependency is captured through directional trade between country dyads. Political similarity is measured as the difference between two countries' V-Dem Electoral Democracy Index. Next, we use international language similarity scores from CEPII as calculated based on ethnologue classifications of language trees. We weight relevant COVID-19 variables by each type of international connectivity to investigate diffusion effects across each pathway. By including these dimensions of international interconnection in our model, we are able to assess the impact of countries' nationalist ideologies while controlling for alternative pathways of influence. Finally, we calculate the mean policy stringency and COVID-19 per capita case numbers among each countries' neighbors for each group and lag our time-dependent variables by one day to account for a minimum period of time necessary for policy implementation. Additionally, we interact country-level time-invariant control variables with confirmed COVID-19 cases to estimate their policy impact conditional on pandemic circumstances. By interacting our country-level covariates with the countries' confirmed cases we can assess how these variables influence a country's policies conditional on the status of the pandemic. We merge these data into an unbalanced panel dataset covering 196 countries from January 1st to July 13th in 2020, organized using country-day as the unit of analysis. Descriptive statistics are in Table 2. The structure of our data prevents us from using a directed dyad approach, creating the potential for bias in our results. Unconditional dyadic approaches can bias results, artificially strengthening the claim that emulation is taking place. While the contextual evidence we present mitigates concern over our findings, future research should investigate the directionality of emulation among nationalist peers. Additionally, duration dependence and unobserved international heterogeneity over time may introduce bias to our results. The baseline likelihood of countries adopting more stringent COVID-19 policies may change over time as advice from world health officials or agencies change or public pressure for change shifts in response to pandemic conditions. While the short time period we observe (seven months) and the overall rapid pace of the pandemic may decrease the size, if not the likelihood, of changes to the baseline likelihood, we cannot rule out that possibility. Fixed effects models are useful for controlling for unobserved heterogeneity among groups and determining causality using panel data. We use linear fixed effects where X it is a set of explanatory variables, it represents fixed effects controlling for subject-specific unobserved heterogeneity across countries, and it represents the error term, varying over countries (i) and days (t). This method controls for individualspecific correlation between country-level variables in our model and allows us to draw accurate inferences. Model 1 We examine COVID-19 policy diffusion between nationalist countries to understand how the nationalism of a country influences the choices its policymakers make. Models 1 and 2, included in Table 3, estimate the peer effects of ideologically similar, nationalist peers on policy emulation. In Model 1, we test whether nationalist countries are significantly more likely to emulate their nationalist peers, and in Model 2, we test whether this influence appears to persist after controlling for diffusion across geographic, economic, political, and lingual pathways of international interconnection. Results The results depicted in Fig. 1 (which correspond to Model 2 in Table 3, but with all independent variables standardized to improve legibility of the graph) shows the impact of key socio-political, economic, and diffusion variables on countries' COVID-19 policy stringency. This provides a visual representation of how countries enact more stringent policies to contain the spread of the virus if their nationalist peers have done the same, even when controlling for other potential pathways of imitation. (Full statistical output is included in Table 3). Both models in Table 3 demonstrate the influence that nationalist decision-makers have had over their ideological peers during the COVID-19 pandemic. When testing the peer-effects of nationalism individually in Model 1, we find that policymakers in countries with above-average levels of nationalism emulate the COVID-19 policies of other nationalist states. Countries with above-average nationalism are significantly more likely to implement a policy change the day after an ideologically nationalist peer changes its COVID-19 policies. Furthermore, the results from Model 2 indicate that this influence is very robust. The influence of nationalist peers persists even after controlling for other international pathways of policy emulation that have been shown to drive policy decisions during the COVID-19 pandemic. Our results demonstrate that nationalist peer relationships significantly influence COVID-19 policy stringency even when controlling for emulation of geographic, trade, political, and lingual peers. Geographic connections remain a significant driver in our model as well, revealing support for similar influence among geographic neighbors. However, nationalist similarity appears to be a more important driver of policy than political, trade, or lingual connection. During the pandemic, nationalist countries are making policy decisions about how to respond to the global crisis by taking cues from other nationalist countries. We conduct several robustness checks to test the validity of our claims. First, we estimate our models on a narrower policy index of social distancing policies; then, we test the influence of nationalist peer effects using an alternative measurement of nationalist similarity by interacting countries' nationalism scores. These results are included in Models 3 through 6 in Table A of the Appendix. With the exception of Model 4, measuring the impact of our alternative measure of nationalist similarity on overall policy stringency, all of these tests uphold our initial findings. Several other models which used different configurations of control variables, although not shown, also suggest that these results are very robust. Interestingly, while COVID-19 cases per capita is significant in Model 1, the baseline number of confirmed COVID-19 cases in a country is not significant once we control for the full set of emulation pathways in Model 2. Government effectiveness and the proportion of the population over 65 years old are significant, indicating that these variables influence policy decisions contingent on the number of confirmed cases, but countries' COVID-19 status is otherwise insignificant. Our results show that the number of confirmed cases per capita in neighboring countries is also insignificant in policymakers' decisions (though not in all specifications). This reveals the importance of imitation and policy emulation in policymaking related to this global crisis, something previous research had already suggested. Countries' COVID-19 policies are more heavily influenced by the decisions of their geographic and ideological peers than the status of the crisis itself. Rather than responding to health statistics, countries are imitating their neighbors, while nationalist countries are mimicking their ideological peers. The relationship of internal, country-level characteristics, as conditioned by the number of COVID-19 cases in the country, are largely insignificant in our models. These variables demonstrate policymakers' reaction to problem conditions in their country, but the lack of broad significance reinforces the indication that decision makers are responding more to the actions of peer states than to relevant health and social characteristics at home. Aside from the peer effects of nationalist and nearby countries, we find that government effectiveness is, perhaps unsurprisingly, significantly correlated with higher COVID-19 policy stringency contingent on confirmed cases. GDP is significant and negative in Model 1, but insignificant in Model 2. This suggests that less wealthy countries feel the need to enact stricter policies in response to COVID-19 cases and may be conditional on our already controlling for government capacity. However, the percentage of a counties' population older than 65 years old, a demographic widely considered at-risk for COVID-19, is actually negatively correlated with policy stringency in Model 2. Countries' COVID-19 policies reacting to the presence of COVID-19 cases were significantly less stringent the higher the proportion of the population is in this at-risk category. This counterintuitive result is inconsistent with expectations and emphasizes the disproportionate influence of peer imitation over internal characteristics in this global crisis. Domestic nationalism was only weakly positively significant in Model 1, demonstrating that this is more a story of nationalists emulating their perceived peers rather than a nationalist ideology driving a specific approach to the Pandemic. Other internal characteristics are insignificant in our main model. Discussion and Conclusions Our findings demonstrate that nationalist countries around the world have been emulating each other's responses to the COVID-19 pandemic. These results are robust even when controlling for emulation among similar political systems. The fact that nationalist countries are emulating each other's policy responses to COVID-19 rather than following the latest health and scientific evidence, or even imitating other countries with similar levels of democracy, seems remarkable. Upon close consideration, there were already some signs of nationalists admiring and emulating each other's political styles. Even before the pandemic, many leaders, for example, saw the increasing role of nationalism in the United States as a signal that they could play up their own nationalist tendencies. Our analysis shows that emulation between nationalist regimes is occurring but does not provide much insight into why. There are several likely possibilities as to how this policy diffusion is happening, but more research is needed to test how important any of them are. One possible explanation for how and why nationalist regimes would emulate each other is that they think it might help them deal with the fallout from their initial downplaying of the crisis. It is possible that other nationalist regimes ramping up antipandemic measures provides political cover for nationalist leaders to do the same without admitting their earlier mistakes and may make leaders feel secure that they will not suffer major political consequences. Given the highly-compressed timeline, however, our preferred explanation for why nationalist leaders emulate each other is more straightforward and consistent with the previous research on this issue; the unprecedented speed and scale of the pandemic has put leaders all over the world on their heels. Their responses, therefore, often demonstrate the sometimes-unreflective emulation of countries or leaders that they see as similar to themselves. Our findings do not tell us a great deal about modes of nationalist response to disasters, nor are they meant to. Instead, they show that nationalist countries are looking to each other rather than, or at least in addition to, countries with which they share geographic, trade, political, or linguistic ties. This is consistent with, but not identical to, Motadel's nationalist internationalism, which emphasizes cooperation. The emulation evident in our model does not necessarily imply cooperation between nationalist countries, though there is evidence of communication between leaders, but it does show that they see each other as peers. Perhaps even more important than cooperation, however, we show nationalist governments are enacting similar policies based on their emulation of each other. Our findings provide critical insight in the field of policy diffusion, both on the narrower issue of considering the impact of nationalism and on the broader view considering different policy vectors. While trade, geography, and cultural, political or linguistic similarities are likely to continue to be important mechanisms for policy diffusion, new vectors need to be explored. Many thus far unknown vectors of policy diffusion may exist and could be relatively easily testable, especially making use of the data from V-Dem and other similar sources. Here we have shown that a nationalist legitimating ideology is a, previously unacknowledged, vector of policy diffusion. Given the recent rise of nationalist leaders in a variety of countries and the relative newness of nationalist internationalism, this may be an almost entirely new vector of policy diffusion. It could also be that this vector was important in the past but was never tested, or that it has been important in certain eras, such as the 1930s, but not others. Evaluating which of these is true should be possible using historical data. It is also possible that nationalist diffusion will be limited to certain types of policies such as trade, refugees, or immigration. Considering the role it played in the pandemic we consider this unlikely, but again this could be a fruitful area for future research. Looking forward, it will become ever more important to ask if nationalist diffusion will continue to increase and, if so, whether it will diminish the importance of other kinds of diffusion. What our findings mean for the future of the pandemic or nationalism generally is less clear. It seems possible that as the crisis drags on, the evolving situation and politics in each country will lead to different approaches to the pandemic. As COVID-19 becomes a known quantity, the urgency and uncertainty of the early months of the pandemic which may have left leaders with little choice but to emulate each other will wane. Nationalist leaders may plot more strategic paths, which could diverge from each other. What is likely to continue, however, is a general strengthening and deepening of ties of nationalist internationalism. Even if individual nationalist leaders, especially in democracies, lose power as a result of mishandling the pandemic, it seems likely that a general trend of nationalist leaders looking to each other for support and as examples will continue. Indeed, if nationalist leaders are willing to stick to their ideology and emulate each other even in the face of a once-in-a-century pandemic it seems almost certain that they will continue to do so in the future. It seems likely therefore that only a profound electoral defeat or other major domestic political setbacks will dampen the nationalist internationalism we have found evidence for here. 9.745 The Sincerest Form of Flattery: Nationalist Emulation during the... |
Ukraines Implications for Indo-Pacific Alignment Russias actions in Ukraine since February 2022 have sent shockwaves globally. Attention has understandably focused on the change in European attitudes toward security threats posed by Moscow, with the United States leading coalition-building responses including naming and shaming, imposing sanctions, and supplying military assistance to Ukraine. The demonstrative effect of the strength, unity and speed of the Western response must ring alarm bells for Beijing, but it also leads to interesting questions about each states choice of alignment globally. In Europe, most states have chosen shared common security interests with one side of a great-power rivalry (in this case, with the US over Russia and China). Undeniably, the assistance of key US partners in the Indo-Pacific, such as Australia and Japan, in imposing sanctions on Russia and providing military support to Ukraine, shows that US allies and partners around the world are remarkably united. However, the broader response of Indo-Pacific powers to Russia has been divided, and runs the gamut between strong opposition, support, and ambivalence. While the United States has received quick and robust support from many close allies in the region, it has had difficulties in gathering full-fledged support from some of its partners such as India, Singapore and Thailand. The world is analyzing developments in Ukraine and gleaning lessons that can be applied beyond Europe. Arguably, the likelihood of a new Cold War with the |
def all_actions(action_bits: int) -> [Enum]:
return [action.name for action in LoggingActions if action.value & action_bits] |
Phase I study of cilostazol. Safety evaluation at increasing single doses in healthy volunteers. The safety and blood concentrations of the novel synthetic platelet aggregation inhibitor cilostazol (6--3,4-dihydro-2(1H)-qui nolinone, OPC-13013) were determined in eight healthy volunteers. The drug was orally administered to the subjects once at successive dose increments of 25, 50, 75, 100, 150, 200 and 300 mg/body. Subjective complaints were headache at 75 mg/body in 1 of 4 subjects and dull headache at 75 mg/body in 2 of 4. Each of these symptoms was also seen at higher doses in 1 to 2 of 4 subjects. Clinical laboratory values, blood pressure and heart rate were normal. Blood concentrations of the drug peaked after 3 to 4 h, the levels declining with half-lives of 2.6 to 3.2 h in the alpha-phase and 19.5 to 25.5 h in the beta-phase. The decline was generally prompt. The average peak concentrations were 283.7, 663.4, 540, 823.6, 1110.4, 1129.2 and 1623.9 mg/ml at the above doses, respectively. The effect of food on the blood concentration was also studied; however, the areas under the curve did not indicate significant differences between the fed and fasted subjects. |
Autologous Cartilage Chip Transplantation Improves Repair Tissue Composition Compared With Marrow Stimulation Background: Repair of chondral injuries by use of cartilage chips has recently demonstrated clinical feasibility. Purpose: To investigate in vivo cartilage repair outcome of autologous cartilage chips compared with marrow stimulation in full-thickness cartilage defects in a minipig model. Study Design: Controlled laboratory study. Methods: Six Gttingen minipigs received two 6-mm chondral defects in the medial and lateral trochlea of each knee. The two treatment groups were autologous cartilage chips embedded in fibrin glue (ACC) (n = 12) and marrow stimulation (MST) (n = 12). The animals were euthanized after 6 months, and the composition of repair tissue was quantitatively determined using histomorphometry. Semiquantitative evaluation was performed by means of the International Cartilage Repair Society (ICRS) II score. Collagen type II staining was used to further evaluate the repair tissue composition. Results: Significantly more hyaline cartilage was found in the ACC (17.1%) compared with MST (2.9%) group (P <.01). Furthermore, the ACC group had significantly less fibrous tissue (23.8%) compared with the MST group (41.1%) (P <.01). No significant difference in fibrocartilage content was found (54.7% for ACC vs 50.8% for MST). The ACC group had significantly higher ICRS II scores for tissue morphological characteristics, matrix staining, cell morphological characteristics, surface assessment, mid/deep assessment, and overall assessment (P <.05). The ACC-treated defects had significantly more collagen type II staining (54.5%) compared with the MST-treated defects (28.1%) (P <.05). Conclusion: ACC transplant resulted in improved quality of cartilage repair tissue compared with MST at 6 months postoperatively. Clinical Relevance: Further studies are needed to investigate ACC as a possible alternative first-line treatment for focal cartilage injuries in the knee. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.