file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client_conn.rs | self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks: 'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send + 'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc(hidden)]
pub fn _dump_state(&self) -> HttpFutureSend<ConnStateSnapshot> {
let (tx, rx) = oneshot::channel();
self.dump_state_with_resp_sender(tx);
let rx =
rx.map_err(|_| Error::from(io::Error::new(io::ErrorKind::Other, "oneshot canceled")));
Box::new(rx)
}
pub fn wait_for_connect_with_resp_sender(
&self,
tx: oneshot::Sender<result::Result<()>>,
) -> std_Result<(), oneshot::Sender<result::Result<()>>> {
self.write_tx
.unbounded_send(ClientToWriteMessage::WaitForHandshake(tx))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::WaitForHandshake(tx) => tx,
_ => unreachable!(),
})
}
}
impl Service for ClientConn {
// TODO: copy-paste with Client::start_request
fn start_request(&self, headers: Headers, body: HttpStreamAfterHeaders) -> Response {
let (resp_tx, resp_rx) = oneshot::channel();
let start = StartRequestMessage {
headers: headers,
body: body,
resp_tx: resp_tx,
};
if let Err(_) = self.start_request_with_resp_sender(start) {
return Response::err(error::Error::Other("client died"));
}
let resp_rx =
resp_rx.map_err(|oneshot::Canceled| error::Error::Other("client likely died"));
let resp_rx = resp_rx.map(|r| r.into_stream_flag());
let resp_rx = resp_rx.flatten_stream();
Response::from_stream(resp_rx)
}
}
impl<I> ConnReadSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Types = ClientTypes<I>;
fn process_headers(
&mut self,
stream_id: StreamId,
end_stream: EndStream,
headers: Headers,
) -> result::Result<Option<HttpStreamRef<ClientTypes<I>>>> {
let existing_stream = self
.get_stream_for_headers_maybe_send_error(stream_id)?
.is_some();
if !existing_stream {
return Ok(None);
}
let in_message_stage = self
.streams
.get_mut(stream_id)
.unwrap()
.stream()
.in_message_stage;
let headers_place = match in_message_stage {
InMessageStage::Initial => HeadersPlace::Initial,
InMessageStage::AfterInitialHeaders => HeadersPlace::Trailing,
InMessageStage::AfterTrailingHeaders => {
return Err(error::Error::InternalError(format!(
"closed stream must be handled before"
)));
}
};
if let Err(e) = headers.validate(RequestOrResponse::Response, headers_place) | {
warn!("invalid headers: {:?}: {:?}", e, headers);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
} | conditional_block |
|
cassandra.go | instance of the Cassandra publisher
// Client is not initiated until the first data publish happends.
func NewCassandraPublisher() *CassandraPublisher {
return &CassandraPublisher{}
}
// CassandraPublisher defines Cassandra publisher
type CassandraPublisher struct {
client *cassaClient
}
// GetConfigPolicy returns plugin mandatory fields as the config policy
func (cas *CassandraPublisher) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
config := cpolicy.NewPolicyNode()
caPathRule, err := cpolicy.NewStringRule(caPathRuleKey, false, "")
handleErr(err)
caPathRule.Description = "Path to the CA certificate for the Cassandra server"
config.Add(caPathRule)
certPathRule, err := cpolicy.NewStringRule(certPathRuleKey, false, "")
handleErr(err)
certPathRule.Description = "Path to the self signed certificate for the Cassandra client"
config.Add(certPathRule)
connectionTimeoutRule, err := cpolicy.NewIntegerRule(connectionTimeoutRuleKey, false, 2)
handleErr(err)
connectionTimeoutRule.Description = "Initial connection timeout in seconds, default: 2"
config.Add(connectionTimeoutRule)
createKeyspaceRule, err := cpolicy.NewBoolRule(createKeyspaceRuleKey, false, true)
handleErr(err)
createKeyspaceRule.Description = "Create keyspace if it's not exist, default: true"
config.Add(createKeyspaceRule)
enableServerCertVerRule, err := cpolicy.NewBoolRule(enableServerCertVerRuleKey, false, true)
handleErr(err)
enableServerCertVerRule.Description = "If true, verify a hostname and a server key, default: true"
config.Add(enableServerCertVerRule)
ignorePeerAddrRule, err := cpolicy.NewBoolRule(ignorePeerAddrRuleKey, false, false)
handleErr(err)
ignorePeerAddrRule.Description = "Turn off cluster hosts tracking, default: false"
config.Add(ignorePeerAddrRule)
initialHostLookupRule, err := cpolicy.NewBoolRule(initialHostLookupRuleKey, false, true)
handleErr(err)
initialHostLookupRule.Description = "Lookup for cluster hosts information, default: true"
config.Add(initialHostLookupRule)
keyPathRule, err := cpolicy.NewStringRule(keyPathRuleKey, false, "")
handleErr(err)
keyPathRule.Description = "Path to the private key for the Cassandra client"
config.Add(keyPathRule)
keyspaceNameRule, err := cpolicy.NewStringRule(keyspaceNameRuleKey, false, "snap")
handleErr(err)
keyspaceNameRule.Description = "Keyspace name, default: snap"
config.Add(keyspaceNameRule)
passwordRule, err := cpolicy.NewStringRule(passwordRuleKey, false, "")
handleErr(err)
passwordRule.Description = "Password used to authenticate to the Cassandra"
config.Add(passwordRule)
portRule, err := cpolicy.NewIntegerRule(portRuleKey, false, 9042)
handleErr(err)
portRule.Description = "Cassandra server port, default: 9042"
config.Add(portRule)
serverAddrRule, err := cpolicy.NewStringRule(serverAddrRuleKey, true)
handleErr(err)
serverAddrRule.Description = "Cassandra server"
config.Add(serverAddrRule)
useSslOptionsRule, err := cpolicy.NewBoolRule(sslOptionsRuleKey, false, false)
handleErr(err)
useSslOptionsRule.Description = "Not required, if true, use ssl options to connect to the Cassandra, default: false"
config.Add(useSslOptionsRule)
tableNameRule, err := cpolicy.NewStringRule(tableNameRuleKey, false, "metrics")
handleErr(err)
tableNameRule.Description = "Table name, default: metrics"
config.Add(tableNameRule)
tagIndexRule, err := cpolicy.NewStringRule(tagIndexRuleKey, false, "")
handleErr(err)
tagIndexRule.Description = "Name of tags to be indexed separated by a comma"
config.Add(tagIndexRule)
timeoutRule, err := cpolicy.NewIntegerRule(timeoutRuleKey, false, 2)
handleErr(err)
timeoutRule.Description = "Connection timeout in seconds, default: 2"
config.Add(timeoutRule)
usernameRule, err := cpolicy.NewStringRule(usernameRuleKey, false, "")
handleErr(err)
usernameRule.Description = "Name of a user used to authenticate to Cassandra"
config.Add(usernameRule)
cp.Add([]string{""}, config)
return cp, nil
}
// Publish publishes metric data to Cassandra
func (cas *CassandraPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := getLogger(config)
var metrics []plugin.MetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Error("decoding error")
return err
}
default:
logger.Errorf("unknown content type '%v'", contentType)
return fmt.Errorf("Unknown content type '%s'", contentType)
}
// Only initialize client once if possible
if cas.client == nil {
co := prepareClientOptions(config)
// Initialize a new client.
tagIndex, ok := getValueForKey(config, tagIndexRuleKey).(string)
checkAssertion(ok, tagIndex)
cas.client = NewCassaClient(co, tagIndex)
}
return cas.client.saveMetrics(metrics)
}
// Close closes the Cassandra client session
func (cas *CassandraPublisher) Close() {
if cas.client != nil {
cas.client.session.Close()
}
}
func prepareClientOptions(config map[string]ctypes.ConfigValue) clientOptions {
serverAddr, ok := getValueForKey(config, serverAddrRuleKey).(string)
checkAssertion(ok, serverAddrRuleKey)
serverPort, ok := getValueForKey(config, portRuleKey).(int)
checkAssertion(ok, portRuleKey)
timeout, ok := getValueForKey(config, timeoutRuleKey).(int)
checkAssertion(ok, timeoutRuleKey)
connTimeout, ok := getValueForKey(config, connectionTimeoutRuleKey).(int)
checkAssertion(ok, connectionTimeoutRuleKey)
initialHostLookup, ok := getValueForKey(config, initialHostLookupRuleKey).(bool)
checkAssertion(ok, initialHostLookupRuleKey)
ignorePeerAddr, ok := getValueForKey(config, ignorePeerAddrRuleKey).(bool)
checkAssertion(ok, ignorePeerAddrRuleKey)
keyspaceName, ok := getValueForKey(config, keyspaceNameRuleKey).(string)
checkAssertion(ok, keyspaceNameRuleKey)
createKeyspace, ok := getValueForKey(config, createKeyspaceRuleKey).(bool)
checkAssertion(ok, createKeyspaceRuleKey)
useSslOptions, ok := getValueForKey(config, sslOptionsRuleKey).(bool)
checkAssertion(ok, sslOptionsRuleKey)
tableName, ok := getValueForKey(config, tableNameRuleKey).(string)
checkAssertion(ok, tableNameRuleKey)
var sslOptions *sslOptions
if useSslOptions {
sslOptions = getSslOptions(config)
}
return clientOptions{
server: serverAddr,
port: serverPort,
timeout: time.Duration(timeout) * time.Second,
connectionTimeout: time.Duration(connTimeout) * time.Second,
initialHostLookup: initialHostLookup,
ignorePeerAddr: ignorePeerAddr,
keyspace: keyspaceName,
createKeyspace: createKeyspace,
ssl: sslOptions,
tableName: tableName,
}
}
func getValueForKey(cfg map[string]ctypes.ConfigValue, key string) interface{} {
if cfg == nil {
log.Error("Configuration of a plugin not found")
}
configElem := cfg[key]
if configElem == nil {
log.Errorf("Valid configuration not found for a key %s", key)
}
var value interface{}
switch configElem.Type() {
case "bool":
value = configElem.(ctypes.ConfigValueBool).Value
case "string":
value = configElem.(ctypes.ConfigValueStr).Value
case "integer":
value = configElem.(ctypes.ConfigValueInt).Value
default:
log.Errorf("Proper value type not found for a key %s", key)
}
return value
}
func getSslOptions(cfg map[string]ctypes.ConfigValue) *sslOptions {
username, ok := getValueForKey(cfg, usernameRuleKey).(string)
checkAssertion(ok, usernameRuleKey)
password, ok := getValueForKey(cfg, passwordRuleKey).(string)
checkAssertion(ok, passwordRuleKey)
keyPath, ok := getValueForKey(cfg, keyPathRuleKey).(string)
checkAssertion(ok, keyPathRuleKey)
certPath, ok := getValueForKey(cfg, certPathRuleKey).(string)
checkAssertion(ok, certPathRuleKey)
caPath, ok := getValueForKey(cfg, caPathRuleKey).(string)
checkAssertion(ok, caPathRuleKey)
enableServerCertVerification, ok := getValueForKey(cfg, enableServerCertVerRuleKey).(bool)
checkAssertion(ok, enableServerCertVerRuleKey)
options := sslOptions{
username: username,
password: password,
keyPath: keyPath,
certPath: certPath,
caPath: caPath,
enableServerCertVerification: enableServerCertVerification,
}
return &options
}
func handleErr(e error) {
if e != nil {
log.Fatalf("%s", e.Error())
}
}
func checkAssertion(ok bool, key string) {
if !ok | {
errorMsg := fmt.Sprintf("Invalid data type for a key %s", key)
err := errors.New(errorMsg)
log.Error(err)
} | conditional_block |
|
cassandra.go | data
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType}, plugin.RoutingStrategy(plugin.StickyRouting), plugin.ConcurrencyCount(1))
}
// NewCassandraPublisher returns an instance of the Cassandra publisher
// Client is not initiated until the first data publish happends.
func NewCassandraPublisher() *CassandraPublisher {
return &CassandraPublisher{}
}
// CassandraPublisher defines Cassandra publisher
type CassandraPublisher struct {
client *cassaClient
}
// GetConfigPolicy returns plugin mandatory fields as the config policy
func (cas *CassandraPublisher) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
config := cpolicy.NewPolicyNode()
caPathRule, err := cpolicy.NewStringRule(caPathRuleKey, false, "")
handleErr(err)
caPathRule.Description = "Path to the CA certificate for the Cassandra server"
config.Add(caPathRule)
certPathRule, err := cpolicy.NewStringRule(certPathRuleKey, false, "")
handleErr(err)
certPathRule.Description = "Path to the self signed certificate for the Cassandra client"
config.Add(certPathRule)
connectionTimeoutRule, err := cpolicy.NewIntegerRule(connectionTimeoutRuleKey, false, 2)
handleErr(err)
connectionTimeoutRule.Description = "Initial connection timeout in seconds, default: 2"
config.Add(connectionTimeoutRule)
createKeyspaceRule, err := cpolicy.NewBoolRule(createKeyspaceRuleKey, false, true)
handleErr(err)
createKeyspaceRule.Description = "Create keyspace if it's not exist, default: true"
config.Add(createKeyspaceRule)
enableServerCertVerRule, err := cpolicy.NewBoolRule(enableServerCertVerRuleKey, false, true)
handleErr(err)
enableServerCertVerRule.Description = "If true, verify a hostname and a server key, default: true"
config.Add(enableServerCertVerRule)
ignorePeerAddrRule, err := cpolicy.NewBoolRule(ignorePeerAddrRuleKey, false, false)
handleErr(err)
ignorePeerAddrRule.Description = "Turn off cluster hosts tracking, default: false"
config.Add(ignorePeerAddrRule)
initialHostLookupRule, err := cpolicy.NewBoolRule(initialHostLookupRuleKey, false, true)
handleErr(err)
initialHostLookupRule.Description = "Lookup for cluster hosts information, default: true"
config.Add(initialHostLookupRule)
keyPathRule, err := cpolicy.NewStringRule(keyPathRuleKey, false, "")
handleErr(err)
keyPathRule.Description = "Path to the private key for the Cassandra client"
config.Add(keyPathRule)
keyspaceNameRule, err := cpolicy.NewStringRule(keyspaceNameRuleKey, false, "snap")
handleErr(err)
keyspaceNameRule.Description = "Keyspace name, default: snap"
config.Add(keyspaceNameRule)
passwordRule, err := cpolicy.NewStringRule(passwordRuleKey, false, "")
handleErr(err)
passwordRule.Description = "Password used to authenticate to the Cassandra"
config.Add(passwordRule)
portRule, err := cpolicy.NewIntegerRule(portRuleKey, false, 9042)
handleErr(err)
portRule.Description = "Cassandra server port, default: 9042"
config.Add(portRule)
serverAddrRule, err := cpolicy.NewStringRule(serverAddrRuleKey, true)
handleErr(err)
serverAddrRule.Description = "Cassandra server"
config.Add(serverAddrRule)
useSslOptionsRule, err := cpolicy.NewBoolRule(sslOptionsRuleKey, false, false)
handleErr(err)
useSslOptionsRule.Description = "Not required, if true, use ssl options to connect to the Cassandra, default: false"
config.Add(useSslOptionsRule)
tableNameRule, err := cpolicy.NewStringRule(tableNameRuleKey, false, "metrics")
handleErr(err)
tableNameRule.Description = "Table name, default: metrics"
config.Add(tableNameRule)
tagIndexRule, err := cpolicy.NewStringRule(tagIndexRuleKey, false, "")
handleErr(err)
tagIndexRule.Description = "Name of tags to be indexed separated by a comma"
config.Add(tagIndexRule)
timeoutRule, err := cpolicy.NewIntegerRule(timeoutRuleKey, false, 2)
handleErr(err)
timeoutRule.Description = "Connection timeout in seconds, default: 2"
config.Add(timeoutRule)
usernameRule, err := cpolicy.NewStringRule(usernameRuleKey, false, "")
handleErr(err)
usernameRule.Description = "Name of a user used to authenticate to Cassandra"
config.Add(usernameRule)
cp.Add([]string{""}, config)
return cp, nil
}
// Publish publishes metric data to Cassandra
func (cas *CassandraPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := getLogger(config)
var metrics []plugin.MetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Error("decoding error")
return err
}
default:
logger.Errorf("unknown content type '%v'", contentType)
return fmt.Errorf("Unknown content type '%s'", contentType)
}
// Only initialize client once if possible
if cas.client == nil {
co := prepareClientOptions(config)
// Initialize a new client.
tagIndex, ok := getValueForKey(config, tagIndexRuleKey).(string)
checkAssertion(ok, tagIndex)
cas.client = NewCassaClient(co, tagIndex)
}
return cas.client.saveMetrics(metrics)
}
// Close closes the Cassandra client session
func (cas *CassandraPublisher) Close() {
if cas.client != nil {
cas.client.session.Close()
}
}
func prepareClientOptions(config map[string]ctypes.ConfigValue) clientOptions {
serverAddr, ok := getValueForKey(config, serverAddrRuleKey).(string)
checkAssertion(ok, serverAddrRuleKey)
serverPort, ok := getValueForKey(config, portRuleKey).(int)
checkAssertion(ok, portRuleKey)
timeout, ok := getValueForKey(config, timeoutRuleKey).(int)
checkAssertion(ok, timeoutRuleKey)
connTimeout, ok := getValueForKey(config, connectionTimeoutRuleKey).(int)
checkAssertion(ok, connectionTimeoutRuleKey)
initialHostLookup, ok := getValueForKey(config, initialHostLookupRuleKey).(bool)
checkAssertion(ok, initialHostLookupRuleKey)
ignorePeerAddr, ok := getValueForKey(config, ignorePeerAddrRuleKey).(bool)
checkAssertion(ok, ignorePeerAddrRuleKey)
keyspaceName, ok := getValueForKey(config, keyspaceNameRuleKey).(string)
checkAssertion(ok, keyspaceNameRuleKey)
createKeyspace, ok := getValueForKey(config, createKeyspaceRuleKey).(bool)
checkAssertion(ok, createKeyspaceRuleKey)
useSslOptions, ok := getValueForKey(config, sslOptionsRuleKey).(bool)
checkAssertion(ok, sslOptionsRuleKey)
tableName, ok := getValueForKey(config, tableNameRuleKey).(string)
checkAssertion(ok, tableNameRuleKey)
var sslOptions *sslOptions
if useSslOptions {
sslOptions = getSslOptions(config)
}
return clientOptions{
server: serverAddr,
port: serverPort,
timeout: time.Duration(timeout) * time.Second,
connectionTimeout: time.Duration(connTimeout) * time.Second,
initialHostLookup: initialHostLookup,
ignorePeerAddr: ignorePeerAddr,
keyspace: keyspaceName,
createKeyspace: createKeyspace,
ssl: sslOptions,
tableName: tableName,
}
}
func getValueForKey(cfg map[string]ctypes.ConfigValue, key string) interface{} {
if cfg == nil {
log.Error("Configuration of a plugin not found")
}
configElem := cfg[key]
if configElem == nil {
log.Errorf("Valid configuration not found for a key %s", key)
}
var value interface{}
switch configElem.Type() {
case "bool":
value = configElem.(ctypes.ConfigValueBool).Value
case "string":
value = configElem.(ctypes.ConfigValueStr).Value
case "integer":
value = configElem.(ctypes.ConfigValueInt).Value
default:
log.Errorf("Proper value type not found for a key %s", key)
}
return value
}
func getSslOptions(cfg map[string]ctypes.ConfigValue) *sslOptions {
username, ok := getValueForKey(cfg, usernameRuleKey).(string)
checkAssertion(ok, usernameRuleKey)
password, ok := getValueForKey(cfg, passwordRuleKey).(string)
checkAssertion(ok, passwordRuleKey)
keyPath, ok := getValueForKey(cfg, keyPathRuleKey).(string)
checkAssertion(ok, keyPathRuleKey)
certPath, ok := getValueForKey(cfg, certPathRuleKey).(string)
checkAssertion(ok, certPathRuleKey)
caPath, ok := getValueForKey(cfg, caPathRuleKey).(string)
checkAssertion(ok, caPathRuleKey)
enableServerCertVerification, ok := getValueForKey(cfg, enableServerCertVerRuleKey).(bool)
checkAssertion(ok, enableServerCertVerRuleKey)
options := sslOptions{
username: username,
password: password,
keyPath: keyPath,
certPath: certPath,
caPath: caPath,
enableServerCertVerification: enableServerCertVerification,
}
return &options
}
func | handleErr | identifier_name |
|
cassandra.go | Meta {
return plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType}, plugin.RoutingStrategy(plugin.StickyRouting), plugin.ConcurrencyCount(1))
}
// NewCassandraPublisher returns an instance of the Cassandra publisher
// Client is not initiated until the first data publish happends.
func NewCassandraPublisher() *CassandraPublisher {
return &CassandraPublisher{}
}
// CassandraPublisher defines Cassandra publisher
type CassandraPublisher struct {
client *cassaClient
}
// GetConfigPolicy returns plugin mandatory fields as the config policy
func (cas *CassandraPublisher) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
config := cpolicy.NewPolicyNode()
caPathRule, err := cpolicy.NewStringRule(caPathRuleKey, false, "")
handleErr(err)
caPathRule.Description = "Path to the CA certificate for the Cassandra server"
config.Add(caPathRule)
certPathRule, err := cpolicy.NewStringRule(certPathRuleKey, false, "")
handleErr(err)
certPathRule.Description = "Path to the self signed certificate for the Cassandra client"
config.Add(certPathRule)
connectionTimeoutRule, err := cpolicy.NewIntegerRule(connectionTimeoutRuleKey, false, 2)
handleErr(err)
connectionTimeoutRule.Description = "Initial connection timeout in seconds, default: 2"
config.Add(connectionTimeoutRule)
createKeyspaceRule, err := cpolicy.NewBoolRule(createKeyspaceRuleKey, false, true)
handleErr(err)
createKeyspaceRule.Description = "Create keyspace if it's not exist, default: true"
config.Add(createKeyspaceRule)
enableServerCertVerRule, err := cpolicy.NewBoolRule(enableServerCertVerRuleKey, false, true)
handleErr(err)
enableServerCertVerRule.Description = "If true, verify a hostname and a server key, default: true"
config.Add(enableServerCertVerRule)
ignorePeerAddrRule, err := cpolicy.NewBoolRule(ignorePeerAddrRuleKey, false, false)
handleErr(err)
ignorePeerAddrRule.Description = "Turn off cluster hosts tracking, default: false"
config.Add(ignorePeerAddrRule)
initialHostLookupRule, err := cpolicy.NewBoolRule(initialHostLookupRuleKey, false, true)
handleErr(err)
initialHostLookupRule.Description = "Lookup for cluster hosts information, default: true"
config.Add(initialHostLookupRule)
keyPathRule, err := cpolicy.NewStringRule(keyPathRuleKey, false, "")
handleErr(err)
keyPathRule.Description = "Path to the private key for the Cassandra client"
config.Add(keyPathRule)
keyspaceNameRule, err := cpolicy.NewStringRule(keyspaceNameRuleKey, false, "snap")
handleErr(err)
keyspaceNameRule.Description = "Keyspace name, default: snap"
config.Add(keyspaceNameRule)
passwordRule, err := cpolicy.NewStringRule(passwordRuleKey, false, "")
handleErr(err)
passwordRule.Description = "Password used to authenticate to the Cassandra"
config.Add(passwordRule)
portRule, err := cpolicy.NewIntegerRule(portRuleKey, false, 9042)
handleErr(err)
portRule.Description = "Cassandra server port, default: 9042"
config.Add(portRule)
serverAddrRule, err := cpolicy.NewStringRule(serverAddrRuleKey, true)
handleErr(err)
serverAddrRule.Description = "Cassandra server"
config.Add(serverAddrRule)
useSslOptionsRule, err := cpolicy.NewBoolRule(sslOptionsRuleKey, false, false)
handleErr(err)
useSslOptionsRule.Description = "Not required, if true, use ssl options to connect to the Cassandra, default: false"
config.Add(useSslOptionsRule)
tableNameRule, err := cpolicy.NewStringRule(tableNameRuleKey, false, "metrics")
handleErr(err)
tableNameRule.Description = "Table name, default: metrics"
config.Add(tableNameRule)
tagIndexRule, err := cpolicy.NewStringRule(tagIndexRuleKey, false, "")
handleErr(err)
tagIndexRule.Description = "Name of tags to be indexed separated by a comma"
config.Add(tagIndexRule)
timeoutRule, err := cpolicy.NewIntegerRule(timeoutRuleKey, false, 2)
handleErr(err)
timeoutRule.Description = "Connection timeout in seconds, default: 2"
config.Add(timeoutRule)
usernameRule, err := cpolicy.NewStringRule(usernameRuleKey, false, "")
handleErr(err)
usernameRule.Description = "Name of a user used to authenticate to Cassandra"
config.Add(usernameRule)
cp.Add([]string{""}, config)
return cp, nil
}
// Publish publishes metric data to Cassandra
func (cas *CassandraPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := getLogger(config)
var metrics []plugin.MetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Error("decoding error")
return err
}
default:
logger.Errorf("unknown content type '%v'", contentType)
return fmt.Errorf("Unknown content type '%s'", contentType)
}
// Only initialize client once if possible
if cas.client == nil {
co := prepareClientOptions(config)
// Initialize a new client.
tagIndex, ok := getValueForKey(config, tagIndexRuleKey).(string)
checkAssertion(ok, tagIndex)
cas.client = NewCassaClient(co, tagIndex)
}
return cas.client.saveMetrics(metrics)
}
// Close closes the Cassandra client session
func (cas *CassandraPublisher) Close() {
if cas.client != nil {
cas.client.session.Close()
}
}
func prepareClientOptions(config map[string]ctypes.ConfigValue) clientOptions {
serverAddr, ok := getValueForKey(config, serverAddrRuleKey).(string)
checkAssertion(ok, serverAddrRuleKey)
serverPort, ok := getValueForKey(config, portRuleKey).(int)
checkAssertion(ok, portRuleKey)
timeout, ok := getValueForKey(config, timeoutRuleKey).(int)
checkAssertion(ok, timeoutRuleKey)
connTimeout, ok := getValueForKey(config, connectionTimeoutRuleKey).(int)
checkAssertion(ok, connectionTimeoutRuleKey)
initialHostLookup, ok := getValueForKey(config, initialHostLookupRuleKey).(bool)
checkAssertion(ok, initialHostLookupRuleKey)
ignorePeerAddr, ok := getValueForKey(config, ignorePeerAddrRuleKey).(bool)
checkAssertion(ok, ignorePeerAddrRuleKey)
keyspaceName, ok := getValueForKey(config, keyspaceNameRuleKey).(string)
checkAssertion(ok, keyspaceNameRuleKey)
createKeyspace, ok := getValueForKey(config, createKeyspaceRuleKey).(bool)
checkAssertion(ok, createKeyspaceRuleKey)
useSslOptions, ok := getValueForKey(config, sslOptionsRuleKey).(bool)
checkAssertion(ok, sslOptionsRuleKey)
tableName, ok := getValueForKey(config, tableNameRuleKey).(string)
checkAssertion(ok, tableNameRuleKey)
var sslOptions *sslOptions
if useSslOptions {
sslOptions = getSslOptions(config)
}
return clientOptions{
server: serverAddr,
port: serverPort,
timeout: time.Duration(timeout) * time.Second,
connectionTimeout: time.Duration(connTimeout) * time.Second,
initialHostLookup: initialHostLookup,
ignorePeerAddr: ignorePeerAddr,
keyspace: keyspaceName,
createKeyspace: createKeyspace,
ssl: sslOptions,
tableName: tableName,
}
}
func getValueForKey(cfg map[string]ctypes.ConfigValue, key string) interface{} {
if cfg == nil {
log.Error("Configuration of a plugin not found")
}
configElem := cfg[key]
if configElem == nil {
log.Errorf("Valid configuration not found for a key %s", key)
}
var value interface{}
switch configElem.Type() {
case "bool":
value = configElem.(ctypes.ConfigValueBool).Value
case "string":
value = configElem.(ctypes.ConfigValueStr).Value
case "integer":
value = configElem.(ctypes.ConfigValueInt).Value
default:
log.Errorf("Proper value type not found for a key %s", key)
}
return value
}
func getSslOptions(cfg map[string]ctypes.ConfigValue) *sslOptions {
username, ok := getValueForKey(cfg, usernameRuleKey).(string)
checkAssertion(ok, usernameRuleKey)
password, ok := getValueForKey(cfg, passwordRuleKey).(string)
checkAssertion(ok, passwordRuleKey)
keyPath, ok := getValueForKey(cfg, keyPathRuleKey).(string)
checkAssertion(ok, keyPathRuleKey)
certPath, ok := getValueForKey(cfg, certPathRuleKey).(string)
checkAssertion(ok, certPathRuleKey)
caPath, ok := getValueForKey(cfg, caPathRuleKey).(string)
checkAssertion(ok, caPathRuleKey)
enableServerCertVerification, ok := getValueForKey(cfg, enableServerCertVerRuleKey).(bool)
checkAssertion(ok, enableServerCertVerRuleKey)
options := sslOptions{
username: username,
password: password,
keyPath: keyPath,
certPath: certPath,
caPath: caPath,
enableServerCertVerification: enableServerCertVerification,
}
return &options
}
| func handleErr(e error) {
if e != nil { | random_line_split |
|
cassandra.go | Rule.Description = "Path to the CA certificate for the Cassandra server"
config.Add(caPathRule)
certPathRule, err := cpolicy.NewStringRule(certPathRuleKey, false, "")
handleErr(err)
certPathRule.Description = "Path to the self signed certificate for the Cassandra client"
config.Add(certPathRule)
connectionTimeoutRule, err := cpolicy.NewIntegerRule(connectionTimeoutRuleKey, false, 2)
handleErr(err)
connectionTimeoutRule.Description = "Initial connection timeout in seconds, default: 2"
config.Add(connectionTimeoutRule)
createKeyspaceRule, err := cpolicy.NewBoolRule(createKeyspaceRuleKey, false, true)
handleErr(err)
createKeyspaceRule.Description = "Create keyspace if it's not exist, default: true"
config.Add(createKeyspaceRule)
enableServerCertVerRule, err := cpolicy.NewBoolRule(enableServerCertVerRuleKey, false, true)
handleErr(err)
enableServerCertVerRule.Description = "If true, verify a hostname and a server key, default: true"
config.Add(enableServerCertVerRule)
ignorePeerAddrRule, err := cpolicy.NewBoolRule(ignorePeerAddrRuleKey, false, false)
handleErr(err)
ignorePeerAddrRule.Description = "Turn off cluster hosts tracking, default: false"
config.Add(ignorePeerAddrRule)
initialHostLookupRule, err := cpolicy.NewBoolRule(initialHostLookupRuleKey, false, true)
handleErr(err)
initialHostLookupRule.Description = "Lookup for cluster hosts information, default: true"
config.Add(initialHostLookupRule)
keyPathRule, err := cpolicy.NewStringRule(keyPathRuleKey, false, "")
handleErr(err)
keyPathRule.Description = "Path to the private key for the Cassandra client"
config.Add(keyPathRule)
keyspaceNameRule, err := cpolicy.NewStringRule(keyspaceNameRuleKey, false, "snap")
handleErr(err)
keyspaceNameRule.Description = "Keyspace name, default: snap"
config.Add(keyspaceNameRule)
passwordRule, err := cpolicy.NewStringRule(passwordRuleKey, false, "")
handleErr(err)
passwordRule.Description = "Password used to authenticate to the Cassandra"
config.Add(passwordRule)
portRule, err := cpolicy.NewIntegerRule(portRuleKey, false, 9042)
handleErr(err)
portRule.Description = "Cassandra server port, default: 9042"
config.Add(portRule)
serverAddrRule, err := cpolicy.NewStringRule(serverAddrRuleKey, true)
handleErr(err)
serverAddrRule.Description = "Cassandra server"
config.Add(serverAddrRule)
useSslOptionsRule, err := cpolicy.NewBoolRule(sslOptionsRuleKey, false, false)
handleErr(err)
useSslOptionsRule.Description = "Not required, if true, use ssl options to connect to the Cassandra, default: false"
config.Add(useSslOptionsRule)
tableNameRule, err := cpolicy.NewStringRule(tableNameRuleKey, false, "metrics")
handleErr(err)
tableNameRule.Description = "Table name, default: metrics"
config.Add(tableNameRule)
tagIndexRule, err := cpolicy.NewStringRule(tagIndexRuleKey, false, "")
handleErr(err)
tagIndexRule.Description = "Name of tags to be indexed separated by a comma"
config.Add(tagIndexRule)
timeoutRule, err := cpolicy.NewIntegerRule(timeoutRuleKey, false, 2)
handleErr(err)
timeoutRule.Description = "Connection timeout in seconds, default: 2"
config.Add(timeoutRule)
usernameRule, err := cpolicy.NewStringRule(usernameRuleKey, false, "")
handleErr(err)
usernameRule.Description = "Name of a user used to authenticate to Cassandra"
config.Add(usernameRule)
cp.Add([]string{""}, config)
return cp, nil
}
// Publish publishes metric data to Cassandra
func (cas *CassandraPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := getLogger(config)
var metrics []plugin.MetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.WithFields(log.Fields{
"err": err,
}).Error("decoding error")
return err
}
default:
logger.Errorf("unknown content type '%v'", contentType)
return fmt.Errorf("Unknown content type '%s'", contentType)
}
// Only initialize client once if possible
if cas.client == nil {
co := prepareClientOptions(config)
// Initialize a new client.
tagIndex, ok := getValueForKey(config, tagIndexRuleKey).(string)
checkAssertion(ok, tagIndex)
cas.client = NewCassaClient(co, tagIndex)
}
return cas.client.saveMetrics(metrics)
}
// Close closes the Cassandra client session
func (cas *CassandraPublisher) Close() {
if cas.client != nil {
cas.client.session.Close()
}
}
func prepareClientOptions(config map[string]ctypes.ConfigValue) clientOptions {
serverAddr, ok := getValueForKey(config, serverAddrRuleKey).(string)
checkAssertion(ok, serverAddrRuleKey)
serverPort, ok := getValueForKey(config, portRuleKey).(int)
checkAssertion(ok, portRuleKey)
timeout, ok := getValueForKey(config, timeoutRuleKey).(int)
checkAssertion(ok, timeoutRuleKey)
connTimeout, ok := getValueForKey(config, connectionTimeoutRuleKey).(int)
checkAssertion(ok, connectionTimeoutRuleKey)
initialHostLookup, ok := getValueForKey(config, initialHostLookupRuleKey).(bool)
checkAssertion(ok, initialHostLookupRuleKey)
ignorePeerAddr, ok := getValueForKey(config, ignorePeerAddrRuleKey).(bool)
checkAssertion(ok, ignorePeerAddrRuleKey)
keyspaceName, ok := getValueForKey(config, keyspaceNameRuleKey).(string)
checkAssertion(ok, keyspaceNameRuleKey)
createKeyspace, ok := getValueForKey(config, createKeyspaceRuleKey).(bool)
checkAssertion(ok, createKeyspaceRuleKey)
useSslOptions, ok := getValueForKey(config, sslOptionsRuleKey).(bool)
checkAssertion(ok, sslOptionsRuleKey)
tableName, ok := getValueForKey(config, tableNameRuleKey).(string)
checkAssertion(ok, tableNameRuleKey)
var sslOptions *sslOptions
if useSslOptions {
sslOptions = getSslOptions(config)
}
return clientOptions{
server: serverAddr,
port: serverPort,
timeout: time.Duration(timeout) * time.Second,
connectionTimeout: time.Duration(connTimeout) * time.Second,
initialHostLookup: initialHostLookup,
ignorePeerAddr: ignorePeerAddr,
keyspace: keyspaceName,
createKeyspace: createKeyspace,
ssl: sslOptions,
tableName: tableName,
}
}
func getValueForKey(cfg map[string]ctypes.ConfigValue, key string) interface{} {
if cfg == nil {
log.Error("Configuration of a plugin not found")
}
configElem := cfg[key]
if configElem == nil {
log.Errorf("Valid configuration not found for a key %s", key)
}
var value interface{}
switch configElem.Type() {
case "bool":
value = configElem.(ctypes.ConfigValueBool).Value
case "string":
value = configElem.(ctypes.ConfigValueStr).Value
case "integer":
value = configElem.(ctypes.ConfigValueInt).Value
default:
log.Errorf("Proper value type not found for a key %s", key)
}
return value
}
func getSslOptions(cfg map[string]ctypes.ConfigValue) *sslOptions {
username, ok := getValueForKey(cfg, usernameRuleKey).(string)
checkAssertion(ok, usernameRuleKey)
password, ok := getValueForKey(cfg, passwordRuleKey).(string)
checkAssertion(ok, passwordRuleKey)
keyPath, ok := getValueForKey(cfg, keyPathRuleKey).(string)
checkAssertion(ok, keyPathRuleKey)
certPath, ok := getValueForKey(cfg, certPathRuleKey).(string)
checkAssertion(ok, certPathRuleKey)
caPath, ok := getValueForKey(cfg, caPathRuleKey).(string)
checkAssertion(ok, caPathRuleKey)
enableServerCertVerification, ok := getValueForKey(cfg, enableServerCertVerRuleKey).(bool)
checkAssertion(ok, enableServerCertVerRuleKey)
options := sslOptions{
username: username,
password: password,
keyPath: keyPath,
certPath: certPath,
caPath: caPath,
enableServerCertVerification: enableServerCertVerification,
}
return &options
}
func handleErr(e error) {
if e != nil {
log.Fatalf("%s", e.Error())
}
}
func checkAssertion(ok bool, key string) {
if !ok {
errorMsg := fmt.Sprintf("Invalid data type for a key %s", key)
err := errors.New(errorMsg)
log.Error(err)
}
}
func getLogger(config map[string]ctypes.ConfigValue) *log.Entry | {
logger := log.WithFields(log.Fields{
"plugin-name": name,
"plugin-version": version,
"plugin-type": pluginType.String(),
})
// default
log.SetLevel(log.WarnLevel)
if debug, ok := config["debug"]; ok {
switch v := debug.(type) {
case ctypes.ConfigValueBool:
if v.Value {
log.SetLevel(log.DebugLevel)
return logger
}
default:
logger.WithFields(log.Fields{
"field": "debug", | identifier_body |
|
gmssl_test.go | iv
}
// SMS4-CBC Encrypt/Decrypt
func TestSMS4CBC(t *testing.T) {
/* Generate random key and IV */
key := randomKey()
iv := randomIV()
rawContent := []byte("hello")
encrypt, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, true)
PanicError(err)
ciphertext1, err := encrypt.Update(rawContent)
PanicError(err)
ciphertext2, err := encrypt.Final()
PanicError(err)
ciphertext := make([]byte, 0, len(ciphertext1)+len(ciphertext2))
ciphertext = append(ciphertext, ciphertext1...)
ciphertext = append(ciphertext, ciphertext2...)
decryptor, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, false)
PanicError(err)
plaintext1, err := decryptor.Update(ciphertext)
PanicError(err)
plaintext2, err := decryptor.Final()
PanicError(err)
plaintext := make([]byte, 0, len(plaintext1)+len(plaintext2))
plaintext = append(plaintext, plaintext1...)
plaintext = append(plaintext, plaintext2...)
if string(plaintext) != string(rawContent) {
t.Fatalf("decrypt result should be %s, but got %s", rawContent, plaintext)
}
fmt.Printf("sms4_cbc(%s) = %x\n", plaintext, ciphertext)
}
func TestSMS4ECB(t *testing.T) {
key := randomKey()
rawContent := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}
ciphertext, err := gmssl.CipherECBenc(rawContent, key)
PanicError(err)
fmt.Printf("ciphertext = %x\n", ciphertext)
plaintext, err := gmssl.CipherECBdec(ciphertext, key)
PanicError(err)
fmt.Printf("plaintext = %x\n", plaintext)
if string(plaintext) != string(rawContent) {
t.Fatalf("decrypt result should be %x, but got %x", rawContent, plaintext)
}
fmt.Printf("sms4_ecb(%x) = %x\n", plaintext, rawContent)
}
func TestRSA(t *testing.T) {
/* private key */
rsaArgs := [][2]string{
{"rsa_keygen_bits", "2048"},
{"rsa_keygen_pubexp", "65537"},
}
rsa, err := gmssl.GeneratePrivateKey("RSA", rsaArgs, nil)
PanicError(err)
rsaPem, err := rsa.GetPublicKeyPEM()
PanicError(err)
fmt.Println(rsaPem)
}
func TestEngineCommands(t *testing.T) {
engines := gmssl.GetEngineNames()
for _, engine := range engines {
eng, err := gmssl.NewEngineByName(engine)
fmt.Printf("\n testing on engine=[%s] \n", engine)
// FIXME: it fails when engine==dynamic
PanicError(err)
cmds, err := eng.GetCommands()
PanicError(err)
for _, cmd := range cmds {
fmt.Printf("engine[%s].cmd[%s] \n", engine, cmd)
}
}
}
var sm2pkpem string // global variable
func TestKeyPair(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2sktxt, err := sm2sk.GetText()
PanicError(err)
sm2skpem, err := sm2sk.GetPEM(gmssl.SMS4, "password")
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
fmt.Printf("private key as text = %s", sm2sktxt)
fmt.Printf("private key as pem = %s", sm2skpem)
fmt.Printf("public key as pem = %s", sm2pkpem)
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm3ctx := newSM3DigestContext()
/* SM2 sign/verification */
sm2zid, err := sm2pk.ComputeSM2IDDigest("1234567812345678")
PanicError(err)
err = sm3ctx.Reset()
PanicError(err)
err = sm3ctx.Update(sm2zid)
PanicError(err)
err = sm3ctx.Update([]byte("message"))
PanicError(err)
digest, err := sm3ctx.Final()
PanicError(err)
signature, err := sm2sk.Sign("sm2sign", digest, nil)
PanicError(err)
fmt.Printf("sm2sign(sm3(\"message\")) = %x\n", signature)
err = sm2pk.Verify("sm2sign", digest, signature, nil)
if err == nil {
fmt.Printf("sm2 verify success\n")
} else {
t.Fatalf("sm2 verify failure")
}
/* SM2 encrypt */
sm2msg := "01234567891123456789212345678931234567894123456789512345678961234567897123"
sm2encalg := "sm2encrypt-with-sm3"
sm2ciphertext, err := sm2pk.Encrypt(sm2encalg, []byte(sm2msg), nil)
PanicError(err)
sm2plaintext, err := sm2sk.Decrypt(sm2encalg, sm2ciphertext, nil)
PanicError(err)
fmt.Printf("sm2enc(\"%s\") = %x\n", sm2plaintext, sm2ciphertext)
if sm2msg != string(sm2plaintext) {
t.Fatalf("SM2 encryption/decryption failure")
}
}
func TestPubKeyGenerate(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2pk_1, err := sm2sk.GetPublicKey()
PanicError(err)
sm2pktxt_1, err := sm2pk_1.GetText()
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
sm2pk_2, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt_2, err := sm2pk_2.GetText()
PanicError(err)
if sm2pktxt_1 != sm2pktxt_2 {
t.Fatalf("SM2 generate public key checkfailure")
}
}
func TestLoadPubKeyFromPem(t *testing.T) {
// Note This test reply on variable Context
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt, err := sm2pk.GetText()
PanicError(err)
sm2pkpem_, err := sm2pk.GetPEM()
PanicError(err)
fmt.Printf("public key as text --> %s \n", sm2pktxt)
fmt.Printf("public key as pem --> %s", sm2pkpem_)
}
func TestCertificate(t *testing.T) { | MQ4wDAYDVQQDDAVQS1VDQTAeFw0xNzA2MDEwMDAwMDBaFw0yMDA2MDEwMDAwMDBa
MEYxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJCSjEMMAoGA1UECgwDUEtVMQswCQYD
VQQLDAJDQTEPMA0GA1UEAwwGYW50c3NzMFkwEwYHKoZIzj0CAQYIKoEcz1UBgi0D
QgAEHpXtrYNlwesl7IyPuaHKKHqn4rHBk+tCU0l0T+zuBNMHAOJzKNDbobno6gOI
EQlVfC9q9 | /* Certificate */
certpem := `-----BEGIN CERTIFICATE-----
MIICAjCCAaigAwIBAgIBATAKBggqgRzPVQGDdTBSMQswCQYDVQQGEwJDTjELMAkG
A1UECAwCQkoxCzAJBgNVBAcMAkJKMQwwCgYDVQQKDANQS1UxCzAJBgNVBAsMAkNB | random_line_split |
gmssl_test.go | ]string{
{"rsa_keygen_bits", "2048"},
{"rsa_keygen_pubexp", "65537"},
}
rsa, err := gmssl.GeneratePrivateKey("RSA", rsaArgs, nil)
PanicError(err)
rsaPem, err := rsa.GetPublicKeyPEM()
PanicError(err)
fmt.Println(rsaPem)
}
func TestEngineCommands(t *testing.T) {
engines := gmssl.GetEngineNames()
for _, engine := range engines {
eng, err := gmssl.NewEngineByName(engine)
fmt.Printf("\n testing on engine=[%s] \n", engine)
// FIXME: it fails when engine==dynamic
PanicError(err)
cmds, err := eng.GetCommands()
PanicError(err)
for _, cmd := range cmds {
fmt.Printf("engine[%s].cmd[%s] \n", engine, cmd)
}
}
}
var sm2pkpem string // global variable
func TestKeyPair(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2sktxt, err := sm2sk.GetText()
PanicError(err)
sm2skpem, err := sm2sk.GetPEM(gmssl.SMS4, "password")
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
fmt.Printf("private key as text = %s", sm2sktxt)
fmt.Printf("private key as pem = %s", sm2skpem)
fmt.Printf("public key as pem = %s", sm2pkpem)
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm3ctx := newSM3DigestContext()
/* SM2 sign/verification */
sm2zid, err := sm2pk.ComputeSM2IDDigest("1234567812345678")
PanicError(err)
err = sm3ctx.Reset()
PanicError(err)
err = sm3ctx.Update(sm2zid)
PanicError(err)
err = sm3ctx.Update([]byte("message"))
PanicError(err)
digest, err := sm3ctx.Final()
PanicError(err)
signature, err := sm2sk.Sign("sm2sign", digest, nil)
PanicError(err)
fmt.Printf("sm2sign(sm3(\"message\")) = %x\n", signature)
err = sm2pk.Verify("sm2sign", digest, signature, nil)
if err == nil {
fmt.Printf("sm2 verify success\n")
} else {
t.Fatalf("sm2 verify failure")
}
/* SM2 encrypt */
sm2msg := "01234567891123456789212345678931234567894123456789512345678961234567897123"
sm2encalg := "sm2encrypt-with-sm3"
sm2ciphertext, err := sm2pk.Encrypt(sm2encalg, []byte(sm2msg), nil)
PanicError(err)
sm2plaintext, err := sm2sk.Decrypt(sm2encalg, sm2ciphertext, nil)
PanicError(err)
fmt.Printf("sm2enc(\"%s\") = %x\n", sm2plaintext, sm2ciphertext)
if sm2msg != string(sm2plaintext) {
t.Fatalf("SM2 encryption/decryption failure")
}
}
func TestPubKeyGenerate(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2pk_1, err := sm2sk.GetPublicKey()
PanicError(err)
sm2pktxt_1, err := sm2pk_1.GetText()
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
sm2pk_2, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt_2, err := sm2pk_2.GetText()
PanicError(err)
if sm2pktxt_1 != sm2pktxt_2 {
t.Fatalf("SM2 generate public key checkfailure")
}
}
func TestLoadPubKeyFromPem(t *testing.T) {
// Note This test reply on variable Context
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt, err := sm2pk.GetText()
PanicError(err)
sm2pkpem_, err := sm2pk.GetPEM()
PanicError(err)
fmt.Printf("public key as text --> %s \n", sm2pktxt)
fmt.Printf("public key as pem --> %s", sm2pkpem_)
}
func TestCertificate(t *testing.T) {
/* Certificate */
certpem := `-----BEGIN CERTIFICATE-----
MIICAjCCAaigAwIBAgIBATAKBggqgRzPVQGDdTBSMQswCQYDVQQGEwJDTjELMAkG
A1UECAwCQkoxCzAJBgNVBAcMAkJKMQwwCgYDVQQKDANQS1UxCzAJBgNVBAsMAkNB
MQ4wDAYDVQQDDAVQS1VDQTAeFw0xNzA2MDEwMDAwMDBaFw0yMDA2MDEwMDAwMDBa
MEYxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJCSjEMMAoGA1UECgwDUEtVMQswCQYD
VQQLDAJDQTEPMA0GA1UEAwwGYW50c3NzMFkwEwYHKoZIzj0CAQYIKoEcz1UBgi0D
QgAEHpXtrYNlwesl7IyPuaHKKHqn4rHBk+tCU0l0T+zuBNMHAOJzKNDbobno6gOI
EQlVfC9q9uk9lO174GJsMLWJJqN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0E
HxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFJsrRYOA
J8gpNq0KK6yuh/Dv9SjaMB8GA1UdIwQYMBaAFH1Dhf9CqQQYHF/8euzcPROIzn0r
MAoGCCqBHM9VAYN1A0gAMEUCIQCjrQ2nyiPqod/gZdj5X1+WW4fGtyqXvXLL3lOF
31nA/gIgZOpHLnvkyggY9VFfEQVp+8t6kewSfxb4eOImSu+dZcE=
-----END CERTIFICATE-----`
cert, err := gmssl.NewCertificateFromPEM(certpem, "")
PanicError(err)
subject, err := cert.GetSubject()
PanicError(err)
issuer, err := cert.GetIssuer()
PanicError(err)
serial, err := cert.GetSerialNumber()
PanicError(err)
certpk, err := cert.GetPublicKey()
PanicError(err)
certpktxt, err := certpk.GetText()
PanicError(err)
certtxt, err := cert.GetText()
PanicError(err)
fmt.Println("Certificate:")
fmt.Printf(" Subject = %s\n", subject)
fmt.Printf(" Issuer = %s \n", issuer)
fmt.Printf(" Serial Number = %s\n", serial)
fmt.Println(certpktxt)
fmt.Println(certtxt)
}
func TestSSL(t *testing.T) {
/* SSL */
hostname := "its.pku.edu.cn"
ssl, err := gmssl.NewSSLContext("3.3", "mozilla-cacerts.pem", "")
PanicError(err)
conn, err := ssl.Connect(hostname, "443", "ALL")
PanicError(err)
result, err := conn.GetVerifyResult()
PanicError(err)
if result != 0 {
t.Fatalf("http://%s certificate verify failure\n", hostname)
}
peercert, err := conn.GetPeerCertificate()
PanicError(err)
fmt.Println(result)
peercerttxt, err := peercert.GetText()
PanicError(err)
fmt.Println(peercerttxt)
}
func | BenchmarkSM2Sign | identifier_name |
|
gmssl_test.go | iv
}
// SMS4-CBC Encrypt/Decrypt
func TestSMS4CBC(t *testing.T) | PanicError(err)
plaintext := make([]byte, 0, len(plaintext1)+len(plaintext2))
plaintext = append(plaintext, plaintext1...)
plaintext = append(plaintext, plaintext2...)
if string(plaintext) != string(rawContent) {
t.Fatalf("decrypt result should be %s, but got %s", rawContent, plaintext)
}
fmt.Printf("sms4_cbc(%s) = %x\n", plaintext, ciphertext)
}
func TestSMS4ECB(t *testing.T) {
key := randomKey()
rawContent := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}
ciphertext, err := gmssl.CipherECBenc(rawContent, key)
PanicError(err)
fmt.Printf("ciphertext = %x\n", ciphertext)
plaintext, err := gmssl.CipherECBdec(ciphertext, key)
PanicError(err)
fmt.Printf("plaintext = %x\n", plaintext)
if string(plaintext) != string(rawContent) {
t.Fatalf("decrypt result should be %x, but got %x", rawContent, plaintext)
}
fmt.Printf("sms4_ecb(%x) = %x\n", plaintext, rawContent)
}
func TestRSA(t *testing.T) {
/* private key */
rsaArgs := [][2]string{
{"rsa_keygen_bits", "2048"},
{"rsa_keygen_pubexp", "65537"},
}
rsa, err := gmssl.GeneratePrivateKey("RSA", rsaArgs, nil)
PanicError(err)
rsaPem, err := rsa.GetPublicKeyPEM()
PanicError(err)
fmt.Println(rsaPem)
}
func TestEngineCommands(t *testing.T) {
engines := gmssl.GetEngineNames()
for _, engine := range engines {
eng, err := gmssl.NewEngineByName(engine)
fmt.Printf("\n testing on engine=[%s] \n", engine)
// FIXME: it fails when engine==dynamic
PanicError(err)
cmds, err := eng.GetCommands()
PanicError(err)
for _, cmd := range cmds {
fmt.Printf("engine[%s].cmd[%s] \n", engine, cmd)
}
}
}
var sm2pkpem string // global variable
func TestKeyPair(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2sktxt, err := sm2sk.GetText()
PanicError(err)
sm2skpem, err := sm2sk.GetPEM(gmssl.SMS4, "password")
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
fmt.Printf("private key as text = %s", sm2sktxt)
fmt.Printf("private key as pem = %s", sm2skpem)
fmt.Printf("public key as pem = %s", sm2pkpem)
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm3ctx := newSM3DigestContext()
/* SM2 sign/verification */
sm2zid, err := sm2pk.ComputeSM2IDDigest("1234567812345678")
PanicError(err)
err = sm3ctx.Reset()
PanicError(err)
err = sm3ctx.Update(sm2zid)
PanicError(err)
err = sm3ctx.Update([]byte("message"))
PanicError(err)
digest, err := sm3ctx.Final()
PanicError(err)
signature, err := sm2sk.Sign("sm2sign", digest, nil)
PanicError(err)
fmt.Printf("sm2sign(sm3(\"message\")) = %x\n", signature)
err = sm2pk.Verify("sm2sign", digest, signature, nil)
if err == nil {
fmt.Printf("sm2 verify success\n")
} else {
t.Fatalf("sm2 verify failure")
}
/* SM2 encrypt */
sm2msg := "01234567891123456789212345678931234567894123456789512345678961234567897123"
sm2encalg := "sm2encrypt-with-sm3"
sm2ciphertext, err := sm2pk.Encrypt(sm2encalg, []byte(sm2msg), nil)
PanicError(err)
sm2plaintext, err := sm2sk.Decrypt(sm2encalg, sm2ciphertext, nil)
PanicError(err)
fmt.Printf("sm2enc(\"%s\") = %x\n", sm2plaintext, sm2ciphertext)
if sm2msg != string(sm2plaintext) {
t.Fatalf("SM2 encryption/decryption failure")
}
}
func TestPubKeyGenerate(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2pk_1, err := sm2sk.GetPublicKey()
PanicError(err)
sm2pktxt_1, err := sm2pk_1.GetText()
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
sm2pk_2, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt_2, err := sm2pk_2.GetText()
PanicError(err)
if sm2pktxt_1 != sm2pktxt_2 {
t.Fatalf("SM2 generate public key checkfailure")
}
}
func TestLoadPubKeyFromPem(t *testing.T) {
// Note This test reply on variable Context
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt, err := sm2pk.GetText()
PanicError(err)
sm2pkpem_, err := sm2pk.GetPEM()
PanicError(err)
fmt.Printf("public key as text --> %s \n", sm2pktxt)
fmt.Printf("public key as pem --> %s", sm2pkpem_)
}
func TestCertificate(t *testing.T) {
/* Certificate */
certpem := `-----BEGIN CERTIFICATE-----
MIICAjCCAaigAwIBAgIBATAKBggqgRzPVQGDdTBSMQswCQYDVQQGEwJDTjELMAkG
A1UECAwCQkoxCzAJBgNVBAcMAkJKMQwwCgYDVQQKDANQS1UxCzAJBgNVBAsMAkNB
MQ4wDAYDVQQDDAVQS1VDQTAeFw0xNzA2MDEwMDAwMDBaFw0yMDA2MDEwMDAwMDBa
MEYxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJCSjEMMAoGA1UECgwDUEtVMQswCQYD
VQQLDAJDQTEPMA0GA1UEAwwGYW50c3NzMFkwEwYHKoZIzj0CAQYIKoEcz1UBgi0D
QgAEHpXtrYNlwesl7IyPuaHKKHqn4rHBk+tCU0l0T+zuBNMHAOJzKNDbobno6gOI
EQlVfC9 | {
/* Generate random key and IV */
key := randomKey()
iv := randomIV()
rawContent := []byte("hello")
encrypt, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, true)
PanicError(err)
ciphertext1, err := encrypt.Update(rawContent)
PanicError(err)
ciphertext2, err := encrypt.Final()
PanicError(err)
ciphertext := make([]byte, 0, len(ciphertext1)+len(ciphertext2))
ciphertext = append(ciphertext, ciphertext1...)
ciphertext = append(ciphertext, ciphertext2...)
decryptor, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, false)
PanicError(err)
plaintext1, err := decryptor.Update(ciphertext)
PanicError(err)
plaintext2, err := decryptor.Final() | identifier_body |
gmssl_test.go |
}
// SMS4-CBC Encrypt/Decrypt
func TestSMS4CBC(t *testing.T) {
/* Generate random key and IV */
key := randomKey()
iv := randomIV()
rawContent := []byte("hello")
encrypt, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, true)
PanicError(err)
ciphertext1, err := encrypt.Update(rawContent)
PanicError(err)
ciphertext2, err := encrypt.Final()
PanicError(err)
ciphertext := make([]byte, 0, len(ciphertext1)+len(ciphertext2))
ciphertext = append(ciphertext, ciphertext1...)
ciphertext = append(ciphertext, ciphertext2...)
decryptor, err := gmssl.NewCipherContext(gmssl.SMS4, key, iv, false)
PanicError(err)
plaintext1, err := decryptor.Update(ciphertext)
PanicError(err)
plaintext2, err := decryptor.Final()
PanicError(err)
plaintext := make([]byte, 0, len(plaintext1)+len(plaintext2))
plaintext = append(plaintext, plaintext1...)
plaintext = append(plaintext, plaintext2...)
if string(plaintext) != string(rawContent) |
fmt.Printf("sms4_cbc(%s) = %x\n", plaintext, ciphertext)
}
func TestSMS4ECB(t *testing.T) {
key := randomKey()
rawContent := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}
ciphertext, err := gmssl.CipherECBenc(rawContent, key)
PanicError(err)
fmt.Printf("ciphertext = %x\n", ciphertext)
plaintext, err := gmssl.CipherECBdec(ciphertext, key)
PanicError(err)
fmt.Printf("plaintext = %x\n", plaintext)
if string(plaintext) != string(rawContent) {
t.Fatalf("decrypt result should be %x, but got %x", rawContent, plaintext)
}
fmt.Printf("sms4_ecb(%x) = %x\n", plaintext, rawContent)
}
func TestRSA(t *testing.T) {
/* private key */
rsaArgs := [][2]string{
{"rsa_keygen_bits", "2048"},
{"rsa_keygen_pubexp", "65537"},
}
rsa, err := gmssl.GeneratePrivateKey("RSA", rsaArgs, nil)
PanicError(err)
rsaPem, err := rsa.GetPublicKeyPEM()
PanicError(err)
fmt.Println(rsaPem)
}
func TestEngineCommands(t *testing.T) {
engines := gmssl.GetEngineNames()
for _, engine := range engines {
eng, err := gmssl.NewEngineByName(engine)
fmt.Printf("\n testing on engine=[%s] \n", engine)
// FIXME: it fails when engine==dynamic
PanicError(err)
cmds, err := eng.GetCommands()
PanicError(err)
for _, cmd := range cmds {
fmt.Printf("engine[%s].cmd[%s] \n", engine, cmd)
}
}
}
var sm2pkpem string // global variable
func TestKeyPair(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2sktxt, err := sm2sk.GetText()
PanicError(err)
sm2skpem, err := sm2sk.GetPEM(gmssl.SMS4, "password")
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
fmt.Printf("private key as text = %s", sm2sktxt)
fmt.Printf("private key as pem = %s", sm2skpem)
fmt.Printf("public key as pem = %s", sm2pkpem)
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm3ctx := newSM3DigestContext()
/* SM2 sign/verification */
sm2zid, err := sm2pk.ComputeSM2IDDigest("1234567812345678")
PanicError(err)
err = sm3ctx.Reset()
PanicError(err)
err = sm3ctx.Update(sm2zid)
PanicError(err)
err = sm3ctx.Update([]byte("message"))
PanicError(err)
digest, err := sm3ctx.Final()
PanicError(err)
signature, err := sm2sk.Sign("sm2sign", digest, nil)
PanicError(err)
fmt.Printf("sm2sign(sm3(\"message\")) = %x\n", signature)
err = sm2pk.Verify("sm2sign", digest, signature, nil)
if err == nil {
fmt.Printf("sm2 verify success\n")
} else {
t.Fatalf("sm2 verify failure")
}
/* SM2 encrypt */
sm2msg := "01234567891123456789212345678931234567894123456789512345678961234567897123"
sm2encalg := "sm2encrypt-with-sm3"
sm2ciphertext, err := sm2pk.Encrypt(sm2encalg, []byte(sm2msg), nil)
PanicError(err)
sm2plaintext, err := sm2sk.Decrypt(sm2encalg, sm2ciphertext, nil)
PanicError(err)
fmt.Printf("sm2enc(\"%s\") = %x\n", sm2plaintext, sm2ciphertext)
if sm2msg != string(sm2plaintext) {
t.Fatalf("SM2 encryption/decryption failure")
}
}
func TestPubKeyGenerate(t *testing.T) {
/* SM2 key pair operations */
sm2keygenargs := [][2]string{
{"ec_paramgen_curve", "sm2p256v1"},
{"ec_param_enc", "named_curve"},
}
sm2sk, err := gmssl.GeneratePrivateKey("EC", sm2keygenargs, nil)
PanicError(err)
sm2pk_1, err := sm2sk.GetPublicKey()
PanicError(err)
sm2pktxt_1, err := sm2pk_1.GetText()
PanicError(err)
sm2pkpem, err = sm2sk.GetPublicKeyPEM()
PanicError(err)
sm2pk_2, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt_2, err := sm2pk_2.GetText()
PanicError(err)
if sm2pktxt_1 != sm2pktxt_2 {
t.Fatalf("SM2 generate public key checkfailure")
}
}
func TestLoadPubKeyFromPem(t *testing.T) {
// Note This test reply on variable Context
sm2pk, err := gmssl.NewPublicKeyFromPEM(sm2pkpem)
PanicError(err)
sm2pktxt, err := sm2pk.GetText()
PanicError(err)
sm2pkpem_, err := sm2pk.GetPEM()
PanicError(err)
fmt.Printf("public key as text --> %s \n", sm2pktxt)
fmt.Printf("public key as pem --> %s", sm2pkpem_)
}
func TestCertificate(t *testing.T) {
/* Certificate */
certpem := `-----BEGIN CERTIFICATE-----
MIICAjCCAaigAwIBAgIBATAKBggqgRzPVQGDdTBSMQswCQYDVQQGEwJDTjELMAkG
A1UECAwCQkoxCzAJBgNVBAcMAkJKMQwwCgYDVQQKDANQS1UxCzAJBgNVBAsMAkNB
MQ4wDAYDVQQDDAVQS1VDQTAeFw0xNzA2MDEwMDAwMDBaFw0yMDA2MDEwMDAwMDBa
MEYxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJCSjEMMAoGA1UECgwDUEtVMQswCQYD
VQQLDAJDQTEPMA0GA1UEAwwGYW50c3NzMFkwEwYHKoZIzj0CAQYIKoEcz1UBgi0D
QgAEHpXtrYNlwesl7IyPuaHKKHqn4rHBk+tCU0l0T+zuBNMHAOJzKNDbobno6gOI
EQlVfC9 | {
t.Fatalf("decrypt result should be %s, but got %s", rawContent, plaintext)
} | conditional_block |
mod.rs | SubscriptionCursor},
};
pub use crate::nakadi_types::Error;
mod typed;
pub use typed::*;
/// Information on the current batch passed to a `BatchHandler`.
///
/// The `frame_id` is monotonically increasing for each `BatchHandler`
/// within a stream(same `StreamId`)
/// as long a s a dispatch strategy which keeps the ordering of
/// events is chosen. There may be gaps between the ids.
#[derive(Debug)]
#[non_exhaustive]
pub struct BatchMeta<'a> {
pub stream_id: StreamId,
pub cursor: &'a SubscriptionCursor,
/// Timestamp when the first byte was received
pub frame_started_at: Instant,
/// Timestamp when the frame was completed
pub frame_completed_at: Instant,
pub frame_id: usize,
pub n_events: usize,
}
/// Returned by a `BatchHandler` and tell `Nakadion`
/// how to continue.
#[derive(Debug, Clone)]
pub enum BatchPostAction {
/// Commit the batch
Commit(BatchStats),
/// Do not commit the batch and continue
///
/// Use if committed "manually" within the handler
DoNotCommit(BatchStats),
/// Abort the current stream and reconnect
AbortStream(String),
/// Abort the consumption and shut down
ShutDown(String),
}
impl BatchPostAction {
pub fn commit_no_stats() -> Self {
BatchPostAction::Commit(BatchStats::default())
}
pub fn commit(t_deserialize: Duration) -> Self {
BatchPostAction::Commit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
pub fn do_not_commit_no_stats() -> Self {
BatchPostAction::DoNotCommit(BatchStats::default())
}
pub fn do_not_commit(t_deserialize: Duration) -> Self {
BatchPostAction::DoNotCommit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
}
/// Statistics on the processed batch
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub struct BatchStats {
/// The time it took to deserialize the batch
pub t_deserialize: Option<Duration>,
}
/// Returned by a `BatchHandler` when queried
/// on inactivity.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InactivityAnswer {
KeepMeAlive,
KillMe,
}
impl InactivityAnswer {
/// Returns `true` if the `BatchHandler` should be killed.
pub fn should_kill(self) -> bool {
self == InactivityAnswer::KillMe
}
/// Returns `true` if the `BatchHandler` should stay alive.
pub fn should_stay_alive(self) -> bool {
self == InactivityAnswer::KeepMeAlive
}
}
/// A handler that implements batch processing logic.
///
/// This trait will be called by Nakadion when a batch has to
/// be processed. The `BatchHandler` only receives an `EventType`
/// and a slice of bytes that contains the batch.
///
/// The `events` slice always contains a JSON encoded array of events.
///
/// # Hint
///
/// The `handle` method gets called on `&mut self`.
///
/// # Example
///
/// ```rust
/// use futures::FutureExt;
///
/// use nakadion::handler::{BatchHandler, BatchPostAction, BatchMeta, Bytes, BatchHandlerFuture};
/// use nakadion::nakadi_types::subscription::EventTypeName;
///
/// // Use a struct to maintain state
/// struct MyHandler {
/// pub count: i32,
/// }
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// self.count += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
/// ```
pub trait BatchHandler: Send {
/// Handle a batch of bytes
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a>;
/// Periodically called if there were no events for a given time.
///
/// This method will only be called if the parameter `handler_inactivity_timeout_secs`
/// was set for the `Consumer`
fn on_inactive(
&mut self,
_inactive_for: Duration,
_last_activity: Instant,
) -> InactivityAnswer {
InactivityAnswer::KeepMeAlive
}
}
/// Simple wrapper for `BatchHandlers` from closures
pub struct HandlerFn<F>(pub F);
impl<F> BatchHandler for HandlerFn<F>
where
F: for<'a> FnMut(Bytes, BatchMeta<'a>) -> BatchHandlerFuture<'a> + Send,
{
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a> {
(self.0)(events, meta)
}
}
/// Defines what a `BatchHandler` will receive.
///
/// This value should the same for the whole lifetime of the
/// `BatchHandler`. "Should" because in the end it is the
/// `BatchHandlerFactory` which returns `BatchHandler`s. But it
/// is guaranteed that `Nakadion` will only pass events to a `BatchHandler`
/// as defined by the `DispatchStrategy`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HandlerAssignment {
/// Everything can be passed to the `BatchHandler`.
Unspecified,
/// The `BatchHandler` will only receive events
/// of the given event type but from any partition.
EventType(EventTypeName),
/// The `BatchHandler` will only receive events
/// of the given event type on the given partition.
EventTypePartition(EventTypePartition),
}
impl HandlerAssignment {
pub fn event_type(&self) -> Option<&EventTypeName> {
self.event_type_and_partition().0
}
pub fn partition(&self) -> Option<&PartitionId> {
self.event_type_and_partition().1
}
pub fn event_type_and_partition(&self) -> (Option<&EventTypeName>, Option<&PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(&event_type), None),
HandlerAssignment::EventTypePartition(ref etp) => {
(Some(etp.event_type()), Some(etp.partition()))
}
}
}
pub fn into_event_type_and_partition(self) -> (Option<EventTypeName>, Option<PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(event_type), None),
HandlerAssignment::EventTypePartition(etp) => {
let (a, b) = etp.split();
(Some(a), Some(b))
}
}
}
}
impl fmt::Display for HandlerAssignment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HandlerAssignment::Unspecified => write!(f, "[unspecified]")?,
HandlerAssignment::EventType(ref event_type) => {
write!(f, "[event_type={}]", event_type)?
}
HandlerAssignment::EventTypePartition(ref event_type_partition) => write!(
f,
"[event_type={}, partition={}]",
event_type_partition.event_type(), | )?,
}
Ok(())
}
}
/// A factory that creates `BatchHandler`s.
///
/// # Usage
///
/// A `BatchHandlerFactory` can be used in two ways:
///
/// * It does not contain any state it shares with the created `BatchHandler`s.
/// This is useful when incoming data is partitioned in a way that all
/// `BatchHandler`s act only on data that never appears on another partition.
///
/// * It contains state that is shared with the `BatchHandler`s. E.g. a cache
/// that contains data that can appear on other partitions.
/// # Example
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// use futures::{FutureExt, future::BoxFuture};
///
/// use nakadion::handler::*;
///
/// // Use a struct to maintain state
/// struct MyHandler(Arc<Mutex<i32>>);
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// *self.0.lock().unwrap() += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
///
/// // We keep shared state for all handlers in the `BatchHandlerFactory`
/// struct MyBatchHandlerFactory(Arc<Mutex<i32>>);
///
/// // Now we implement the trait `BatchHandlerFactory` to control how
/// // our `BatchHandler`s are created
/// impl BatchHandlerFactory for MyBatchHandlerFactory {
/// fn handler(
/// &self,
/// _assignment: &HandlerAssignment,
/// ) -> BoxFuture<Result<Box<dyn BatchHandler>, Error>> {
/// async move {
/// Ok(Box::new(MyHandler(self.0.clone())) as Box<_>)
/// }.boxed()
/// }
/// }
///
/// let count = Arc::new(Mutex::new(0));
///
/// let factory = MyBatchHandlerFactory(count.clone());
/// ```
pub trait | event_type_partition.partition() | random_line_split |
mod.rs | SubscriptionCursor},
};
pub use crate::nakadi_types::Error;
mod typed;
pub use typed::*;
/// Information on the current batch passed to a `BatchHandler`.
///
/// The `frame_id` is monotonically increasing for each `BatchHandler`
/// within a stream(same `StreamId`)
/// as long a s a dispatch strategy which keeps the ordering of
/// events is chosen. There may be gaps between the ids.
#[derive(Debug)]
#[non_exhaustive]
pub struct BatchMeta<'a> {
pub stream_id: StreamId,
pub cursor: &'a SubscriptionCursor,
/// Timestamp when the first byte was received
pub frame_started_at: Instant,
/// Timestamp when the frame was completed
pub frame_completed_at: Instant,
pub frame_id: usize,
pub n_events: usize,
}
/// Returned by a `BatchHandler` and tell `Nakadion`
/// how to continue.
#[derive(Debug, Clone)]
pub enum BatchPostAction {
/// Commit the batch
Commit(BatchStats),
/// Do not commit the batch and continue
///
/// Use if committed "manually" within the handler
DoNotCommit(BatchStats),
/// Abort the current stream and reconnect
AbortStream(String),
/// Abort the consumption and shut down
ShutDown(String),
}
impl BatchPostAction {
pub fn commit_no_stats() -> Self {
BatchPostAction::Commit(BatchStats::default())
}
pub fn commit(t_deserialize: Duration) -> Self {
BatchPostAction::Commit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
pub fn do_not_commit_no_stats() -> Self {
BatchPostAction::DoNotCommit(BatchStats::default())
}
pub fn do_not_commit(t_deserialize: Duration) -> Self {
BatchPostAction::DoNotCommit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
}
/// Statistics on the processed batch
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub struct BatchStats {
/// The time it took to deserialize the batch
pub t_deserialize: Option<Duration>,
}
/// Returned by a `BatchHandler` when queried
/// on inactivity.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InactivityAnswer {
KeepMeAlive,
KillMe,
}
impl InactivityAnswer {
/// Returns `true` if the `BatchHandler` should be killed.
pub fn should_kill(self) -> bool {
self == InactivityAnswer::KillMe
}
/// Returns `true` if the `BatchHandler` should stay alive.
pub fn should_stay_alive(self) -> bool {
self == InactivityAnswer::KeepMeAlive
}
}
/// A handler that implements batch processing logic.
///
/// This trait will be called by Nakadion when a batch has to
/// be processed. The `BatchHandler` only receives an `EventType`
/// and a slice of bytes that contains the batch.
///
/// The `events` slice always contains a JSON encoded array of events.
///
/// # Hint
///
/// The `handle` method gets called on `&mut self`.
///
/// # Example
///
/// ```rust
/// use futures::FutureExt;
///
/// use nakadion::handler::{BatchHandler, BatchPostAction, BatchMeta, Bytes, BatchHandlerFuture};
/// use nakadion::nakadi_types::subscription::EventTypeName;
///
/// // Use a struct to maintain state
/// struct MyHandler {
/// pub count: i32,
/// }
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// self.count += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
/// ```
pub trait BatchHandler: Send {
/// Handle a batch of bytes
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a>;
/// Periodically called if there were no events for a given time.
///
/// This method will only be called if the parameter `handler_inactivity_timeout_secs`
/// was set for the `Consumer`
fn on_inactive(
&mut self,
_inactive_for: Duration,
_last_activity: Instant,
) -> InactivityAnswer {
InactivityAnswer::KeepMeAlive
}
}
/// Simple wrapper for `BatchHandlers` from closures
pub struct HandlerFn<F>(pub F);
impl<F> BatchHandler for HandlerFn<F>
where
F: for<'a> FnMut(Bytes, BatchMeta<'a>) -> BatchHandlerFuture<'a> + Send,
{
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a> {
(self.0)(events, meta)
}
}
/// Defines what a `BatchHandler` will receive.
///
/// This value should the same for the whole lifetime of the
/// `BatchHandler`. "Should" because in the end it is the
/// `BatchHandlerFactory` which returns `BatchHandler`s. But it
/// is guaranteed that `Nakadion` will only pass events to a `BatchHandler`
/// as defined by the `DispatchStrategy`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HandlerAssignment {
/// Everything can be passed to the `BatchHandler`.
Unspecified,
/// The `BatchHandler` will only receive events
/// of the given event type but from any partition.
EventType(EventTypeName),
/// The `BatchHandler` will only receive events
/// of the given event type on the given partition.
EventTypePartition(EventTypePartition),
}
impl HandlerAssignment {
pub fn | (&self) -> Option<&EventTypeName> {
self.event_type_and_partition().0
}
pub fn partition(&self) -> Option<&PartitionId> {
self.event_type_and_partition().1
}
pub fn event_type_and_partition(&self) -> (Option<&EventTypeName>, Option<&PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(&event_type), None),
HandlerAssignment::EventTypePartition(ref etp) => {
(Some(etp.event_type()), Some(etp.partition()))
}
}
}
pub fn into_event_type_and_partition(self) -> (Option<EventTypeName>, Option<PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(event_type), None),
HandlerAssignment::EventTypePartition(etp) => {
let (a, b) = etp.split();
(Some(a), Some(b))
}
}
}
}
impl fmt::Display for HandlerAssignment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HandlerAssignment::Unspecified => write!(f, "[unspecified]")?,
HandlerAssignment::EventType(ref event_type) => {
write!(f, "[event_type={}]", event_type)?
}
HandlerAssignment::EventTypePartition(ref event_type_partition) => write!(
f,
"[event_type={}, partition={}]",
event_type_partition.event_type(),
event_type_partition.partition()
)?,
}
Ok(())
}
}
/// A factory that creates `BatchHandler`s.
///
/// # Usage
///
/// A `BatchHandlerFactory` can be used in two ways:
///
/// * It does not contain any state it shares with the created `BatchHandler`s.
/// This is useful when incoming data is partitioned in a way that all
/// `BatchHandler`s act only on data that never appears on another partition.
///
/// * It contains state that is shared with the `BatchHandler`s. E.g. a cache
/// that contains data that can appear on other partitions.
/// # Example
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// use futures::{FutureExt, future::BoxFuture};
///
/// use nakadion::handler::*;
///
/// // Use a struct to maintain state
/// struct MyHandler(Arc<Mutex<i32>>);
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// *self.0.lock().unwrap() += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
///
/// // We keep shared state for all handlers in the `BatchHandlerFactory`
/// struct MyBatchHandlerFactory(Arc<Mutex<i32>>);
///
/// // Now we implement the trait `BatchHandlerFactory` to control how
/// // our `BatchHandler`s are created
/// impl BatchHandlerFactory for MyBatchHandlerFactory {
/// fn handler(
/// &self,
/// _assignment: &HandlerAssignment,
/// ) -> BoxFuture<Result<Box<dyn BatchHandler>, Error>> {
/// async move {
/// Ok(Box::new(MyHandler(self.0.clone())) as Box<_>)
/// }.boxed()
/// }
/// }
///
/// let count = Arc::new(Mutex::new(0));
///
/// let factory = MyBatchHandlerFactory(count.clone());
/// ```
pub | event_type | identifier_name |
ebest.py | ] = row_res
self.waiting = False
def query(res, send, cont=False, timeout=10):
""" Query 요청
@arg res[str]`t1102` 사용할 res 파일명
@arg send[dict] 전송할 데이터
{
'Block1': [{'Field1': 'Value1', 'Field2': 'Value2'}, {...}, {...}],
'Block2': {'Field3': 'Value3', 'Field4': 'Value4'}
}
단일 InBlock의 경우에는 아래와 같이 간단한 형식도 입력받음
{'Field1': 'Value1', 'Field2': 'Value2'}
@arg cont[*bool=False] 연속조회 여부
@arg timeout[*int=10] 서버 응답 최대 대기 시간, -1인 경우 infinite time
"""
# res 파일 로드
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
if not cont:
# 전송 현황 업데이트
if not res in _query_status:
_query_status[res] = []
while _query_status[res] and _query_status[res][-1] + 1 < time.time():
_query_status[res].pop()
# 초당 전송 횟수를 고려
tr_count_per_sec = _query.GetTRCountPerSec(res)
if len(_query_status[res]) >= tr_count_per_sec:
delay = max(_query_status[res][-1] + 1.05 - time.time(), 0)
time.sleep(delay)
# 기간(10분)당 전송 횟수를 고려
# TODO : 10분 제한이 걸리면 blocking state 진입
tr_count_limit = _query.GetTRCountLimit(res)
while tr_count_limit and _query.GetTRCountRequest(res) >= tr_count_limit:
time.sleep(1)
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
# simplified 된 input를 받았을 경우
send_first_value = list(send.values())[0]
if not (
isinstance (send_first_value, list) or
isinstance (send_first_value, dict)
):
send = { '{}InBlock'.format(res): send }
# 전송할 데이터를 설정
for block in send.keys():
if isinstance(send[block], dict):
for (k, v) in send[block].items():
_query.set_data(block, k, v)
elif isinstance(send[block], list):
for i in range(len(send[block])):
for (k, v) in send[block][i].items():
_query.set_data(block, k, v, i)
else:
raise ValueError('알 수 없는 형태의 데이터입니다')
else:
time.sleep(0.05)
# 데이터 요청
_query.Request(cont)
now = time.time()
if not cont:
_query_status[res].insert(0, now)
_query.waiting = True
while _query.waiting:
if timeout >= 0 and now + timeout < time.time():
_query.waiting = False
raise TimeoutError('Query Timeout')
PumpWaitingMessages()
return _query.response
class _RealtimeHandler:
def OnReceiveRealData(self, res):
response = {}
for field in meta_res[res]['output']['OutBlock']['fields']:
response[field['name']] = self.GetFieldData('OutBlock', field['name'])
self.callback(res, response)
class Realtime:
def __init__(self, res, callback):
self._res = res
self._instance = DispatchWithEvents('XA_DataSet.XAReal', _RealtimeHandler)
self._instance.LoadFromResFile(f'/Res/{res}.res')
self._instance.callback = callback
self.subscribed_keys = []
def subscribe(self, key=None):
if key in self.subscribed_keys:
print(f'{self._res}는 이미 {key} 데이터를 수신 중입니다.')
return None
if key:
self._instance.SetFieldData('InBlock', meta_res[self._res]['input']['InBlock']['fields'][0]['name'], key)
self._instance.AdviseRealData()
self.subscribed_keys.append(key)
def unsubscirbe(self, key=None):
if key is None:
self._instance.UnadviseRealData()
else:
if key not in self.subscribed_keys:
raise ValueError(f'{self._res}는 {key} 데이터를 수신하고 있지 않습니다.')
self._instnace.UnadviseRealDataWithKey(key)
@staticmethod
def listen(delay=.01):
while True:
PumpWaitingMessages()
time.sleep(delay)
""" Wrapper Functions
"""
def transactions_tick(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' '
cts_time = ' '
while True:
response = query('t8411', {
'shcode': shcode,
'ncnt': interval,
'qrycnt': 2000,
'nday': '0',
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'cts_time': cts_time,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8411OutBlock1'] + data
cts_date = response['t8411OutBlock']['cts_date']
cts_time = response['t8411OutBlock']['cts_time']
if not (cts_date or cts_time):
break
data = pd.DataFrame(data)
data.index = (data['date'] + data['time']).apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'jongchk', 'rate']]
return data
def transactions_min(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' '
cts_time = ' '
while True:
response = query('t8412', {
'shcode': shcode,
'ncnt': interval,
'qrycnt': 2000,
'nday': '0',
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'cts_time': cts_time,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8412OutBlock1'] + data
cts_date = response['t8412OutBlock']['cts_date']
cts_time = response['t8412OutBlock']['cts_time']
if not (cts_date or cts_time):
break
if len(data) == 0:
return None
data = pd.DataFrame(data)
data.index = (data['date'] + data['time']).apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'value', 'jongchk', 'rate']]
return data
def transactions_day(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or (datetime.now()-timedelta(31)).strftime('%Y%m%d')
data = []
cts_date = ' '
while True:
response = query('t8413', {
'shcode': shcode,
'gubun': '2',
'qrycnt': 2000,
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8413OutBlock1'] + data
cts_date = response['t8413OutBlock']['cts_date']
if not cts_date:
break
data = pd.DataFrame(data)
data.index = (data['date'] + '180000').apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['ope | n', 'high', 'low | identifier_name |
|
ebest.py | list(send.values())[0]
if not (
isinstance (send_first_value, list) or
isinstance (send_first_value, dict)
):
send = { '{}InBlock'.format(res): send }
# 전송할 데이터를 설정
for block in send.keys():
if isinstance(send[block], dict):
for (k, v) in send[block].items():
_query.set_data(block, k, v)
elif isinstance(send[block], list):
for i in range(len(send[block])):
for (k, v) in send[block][i].items():
_query.set_data(block, k, v, i)
else:
raise ValueError('알 수 없는 형태의 데이터입니다')
else:
time.sleep(0.05)
# 데이터 요청
_query.Request(cont)
now = time.time()
if not cont:
_query_status[res].insert(0, now)
_query.waiting = True
while _query.waiting:
if timeout >= 0 and now + timeout < time.time():
_query.waiting = False
raise TimeoutError('Query Timeout')
PumpWaitingMessages()
return _query.response
class _RealtimeHandler:
def OnReceiveRealData(self, res):
response = {}
for field in meta_res[res]['output']['OutBlock']['fields']:
response[field['name']] = self.GetFieldData('OutBlock', field['name'])
self.callback(res, response)
class Realtime:
def __init__(self, res, callback):
self._res = res
self._instance = DispatchWithEvents('XA_DataSet.XAReal', _RealtimeHandler)
self._instance.LoadFromResFile(f'/Res/{res}.res')
self._instance.callback = callback
self.subscribed_keys = []
def subscribe(self, key=None):
if key in self.subscribed_keys:
print(f'{self._res}는 이미 {key} 데이터를 수신 중입니다.')
return None
if key:
self._instance.SetFieldData('InBlock', meta_res[self._res]['input']['InBlock']['fields'][0]['name'], key)
self._instance.AdviseRealData()
self.subscribed_keys.append(key)
def unsubscirbe(self, key=None):
if key is None:
self._instance.UnadviseRealData()
else:
if key not in self.subscribed_keys:
raise ValueError(f'{self._res}는 {key} 데이터를 수신하고 있지 않습니다.')
self._instnace.UnadviseRealDataWithKey(key)
@staticmethod
def listen(delay=.01):
while True:
PumpWaitingMessages()
time.sleep(delay)
""" Wrapper Functions
"""
def transactions_tick(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' '
cts_time = ' '
while True:
response = query('t8411', {
'shcode': shcode,
'ncnt': interval,
'qrycnt': 2000,
'nday': '0',
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'cts_time': cts_time,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8411OutBlock1'] + data
cts_date = response['t8411OutBlock']['cts_date']
cts_time = response['t8411OutBlock']['cts_time']
if not (cts_date or cts_time):
break
data = pd.DataFrame(data)
data.index = (data['date'] + data['time']).apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'jongchk', 'rate']]
return data
def transactions_min(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' '
cts_time = ' '
while True:
response = query('t8412', {
'shcode': shcode,
'ncnt': interval,
'qrycnt': 2000,
'nday': '0',
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'cts_time': cts_time,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8412OutBlock1'] + data
cts_date = response['t8412OutBlock']['cts_date']
cts_time = response['t8412OutBlock']['cts_time']
if not (cts_date or cts_time):
break
if len(data) == 0:
return None
data = pd.DataFrame(data)
data.index = (data['date'] + data['time']).apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'value', 'jongchk', 'rate']]
return data
def transactions_day(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or (datetime.now()-timedelta(31)).strftime('%Y%m%d')
data = []
cts_date = ' '
while True:
response = query('t8413', {
'shcode': shcode,
'gubun': '2',
'qrycnt': 2000,
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8413OutBlock1'] + data
cts_date = response['t8413OutBlock']['cts_date']
if not cts_date:
break
data = pd.DataFrame(data)
data.index = (data['date'] + '180000').apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'value', 'jongchk', 'rate']]
return data
def transactions(shcode, interval, sdate=None, edate=None):
""" 거래내역
t8411 : 주식챠트(틱/n틱)
t8412 : 주식챠트(N분)
t8413 : 주식챠트(일주월)
t1305 : 기간별 주가 : 일주월
t4201 : 주식차트(종합) - 틱/분/일/주/월
@arg shcode[str]`000030`
@arg interval[*str='5min']
(t)ick, (M)in, (d)ay, (w)eek, (m)onth
@arg sdate[*str or datetime.date or datetime.datetime]
@arg edate[*str or datetime.date or datetime.datetime]
"""
interval = (interval
.replace('tick', 't')
.replace('Min', 'M')
.replace('day', 'd')
.replace('week', 'w')
.replace('month', 'm'))
if isinstance(sdate, date):
sdate = sdate.strftime('%Y%m%d')
if isinstance(edate, date):
edate = edate.strftime('%Y%m%d')
if interval[-1] == 't':
interval = re.match(r'(\d*)t', interval).group(1) or 1
return transactions_tick(shcode, interval, sdate, edate)
elif interval[-1] == 'M':
interval = re.match(r'(\d*)M', interval).group(1) or 1
return transactions_min(shcode, interval, sdate, edate)
elif interval[-1] == 'd':
interval = re.match(r'(\d*)d', interval).group(1) or 1
return transactions_day(shcode, interval, sdate, edate)
elif interval[-1] in ['w', 'm']:
raise NotImplementedError
else:
raise ValueError('알 수 없는 interval 타입입니다.')
| conditional_block |
||
ebest.py | 그인 실패 : {}'.format(msg))
def OnDisconnect(self):
""" 서버와의 연결이 끊어졌을 때 실행되는 함수
"""
self.waiting = False
logger.info('[*] 서버와의 연결이 끊어졌습니다')
_session = DispatchWithEvents('XA_Session.XASession', _SessionHandler)
def login(
server=None,
username=None,
password=None,
):
""" 로그인
"""
# 기존에 연결되어 있는 서버가 있으면, 연결을 끊는다
if _session.IsConnected():
_session.DisconnectServer()
# 로그인 시 필요한 정보를 입력받는다
login_server = (server or input('[*] 접속 서버 ((r)eal / (D)emo / (a)ce) : ')).lower()[:1]
login_server = {
'r': 'hts.ebestsec.co.kr',
'd': 'demo.ebestsec.co.kr',
'a': '127.0.0.1'
}.get(login_server, 'demo.ebestsec.co.kr')
login_port = 20001
login_id = username or input('[*] 아이디 : ')
login_pw = password or getpass('[*] 패스워드 : ')
login_cert = '' if login_server == 'demo.ebestsec.co.kr' else getpass('[*] 공인인증서 암호 : ')
# 로그인 요청을 보낸다
_session.waiting = True
_session.ConnectServer(login_server, login_port)
_session.Login(login_id, login_pw, login_cert, 0, 0)
while _session.waiting:
PumpWaitingMessages()
time.sleep(0.05)
def accounts():
""" 계좌 리스트
"""
accounts = []
num_account = _session.GetAccountListCount()
for i in range(num_account):
acc_no = _session.GetAccountList(i)
accounts.append({
'acc' : acc_no,
'nm' : _session.GetAccountName(acc_no),
'detail' : _session.GetAcctDetailName(acc_no),
'nick' : _session.GetAcctNickname(acc_no)
})
return accounts
def account(index=0):
""" 계좌번호
@arg index[*int=0] 불러올 계좌의 순번
"""
return _session.GetAccountList(index)
""" Query
"""
_query_status = {}
class _QueryHandler:
def __init__(self):
self.response = {}
self.decomp = False
self.qrycnt = None
self.waiting = False
self.res = None
def init(self, res):
self.LoadFromResFile('/Res/{}.res'.format(res))
self.res = res
def set_data(self, block, k, v, index=0):
if k == 'comp_yn' and v.lower() == 'y':
self.decomp = True
elif k == 'qrycnt':
self.qrycnt = int(v)
self.SetFieldData(block, k, index, v)
def get_block_data(self, block, index=0):
block_data = {}
for field in meta_res[self.res]['output'][block]['fields']:
data = self.GetFieldData(block, field['name'], index)
if field['type'] == 'long':
if data == '-':
data = 0
data = int(data or 0)
elif field['type'] == 'double' or field['type'] == 'float':
data = float(data or 0.0)
block_data[field['name']] = data
return block_data
def OnReceiveData(self, res):
""" 요청 데이터 도착 Listener
self.GetFieldData(...)를 통해 전송받은 데이터 확인이 가능하다.
@arg res[str] 요청 res 파일명
"""
# decompress가 필요한 경우 압축해제
# TODO : OutBlock1 말고 다른 occurs가 있는 케이스 (ex. FOCCQ33600)
if self.decomp:
self.Decompress(res + 'OutBlock1')
for block in meta_res[res]['output'].keys():
# 해당 블럭이 occurs인 경우,
if meta_res[res]['output'][block]['occurs']:
row_res = []
for i in range(self.GetBlockCount(block)):
row_res.append(self.get_block_data(block, i))
# 해당 블럭이 단일 데이터인 경우,
else:
row_res = self.get_block_data(block)
self.response[block] = row_res
self.waiting = False
def query(res, send, cont=False, timeout=10):
""" Query 요청
@arg res[str]`t1102` 사용할 res 파일명
@arg send[dict] 전송할 데이터
{
'Block1': [{'Field1': 'Value1', 'Field2': 'Value2'}, {...}, {...}],
'Block2': {'Field3': 'Value3', 'Field4': 'Value4'}
}
단일 InBlock의 경우에는 아래와 같이 간단한 형식도 입력받음
{'Field1': 'Value1', 'Field2': 'Value2'}
@arg cont[*bool=False] 연속조회 여부
@arg timeout[*int=10] 서버 응답 최대 대기 시간, -1인 경우 infinite time
"""
# res 파일 로드
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
if not cont:
# 전송 현황 업데이트
if not res in _query_status:
_query_status[res] = []
while _query_status[res] and _query_status[res][-1] + 1 < time.time():
_query_status[res].pop()
# 초당 전송 횟수를 고려
tr_count_per_sec = _query.GetTRCountPerSec(res)
if len(_query_status[res]) >= tr_count_per_sec:
delay = max(_query_status[res][-1] + 1.05 - time.time(), 0)
time.sleep(delay)
# 기간(10분)당 전송 횟수를 고려
# TODO : 10분 제한이 걸리면 blocking state 진입
tr_count_limit = _query.GetTRCountLimit(res)
while tr_count_limit and _query.GetTRCountRequest(res) >= tr_count_limit:
time.sleep(1)
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
# simplified 된 input를 받았을 경우
send_first_value = list(send.values())[0]
if not (
isinstance (send_first_value, list) or
isinstance (send_first_value, dict)
):
send = { '{}InBlock'.format(res): send }
# 전송할 데이터를 설정
for block in send.keys():
if isinstance(send[block], dict):
for (k, v) in send[block].items():
_query.set_data(block, k, v)
elif isinstance(send[block], list):
for i in range(len(send[block])):
for (k, v) in send[block][i].items():
_query.set_data(block, k, v, i)
else:
raise ValueError('알 수 없는 형태의 데이터입니다')
else:
time.sleep(0.05)
# 데이터 요청
_query.Request(cont)
now = time.time()
if not cont:
_query_status[res].insert(0, now)
_query.waiting = True
while _query.waiting:
if timeout >= 0 and now + timeout < time.time():
_query.waiting = False
raise TimeoutError('Query Timeout')
PumpWaitingMessages()
return _query.response
class _RealtimeHandler:
def OnReceiveRealData(self, res):
response = {}
for field in meta_res[res]['output']['OutBlock']['fields']:
response[field['name']] = self.GetFieldData('OutBlock', field['name'])
self.callback(res, response)
class Realtime:
def __init__(self, res, callback):
self._res = res
self._instance = DispatchWithEvents('XA_DataSet.XAReal', _RealtimeHandler)
self._instance.LoadFromResFile(f'/Res/{res}.res')
self._instance.callback = callback
self.subscribed_keys = []
def subscribe(self, key=None):
if key in self.subscribed_keys:
print(f'{self._res}는 이미 {key} 데이터를 수신 중입니다.')
return None
if key:
self._instance.SetFieldData('InBlock', meta_res[self._res]['input']['InBlock']['fields'][0]['name'], key)
self._instance.AdviseRealData()
|
self.subscribed_keys.append(key)
def unsubscirbe(self, key=None):
if key is None:
self._instance.UnadviseRealData()
else:
if key not in self.subscr | identifier_body |
|
ebest.py | ' : _session.GetAcctNickname(acc_no)
})
return accounts
def account(index=0):
""" 계좌번호
@arg index[*int=0] 불러올 계좌의 순번
"""
return _session.GetAccountList(index)
""" Query
"""
_query_status = {}
class _QueryHandler:
def __init__(self):
self.response = {}
self.decomp = False
self.qrycnt = None
self.waiting = False
self.res = None
def init(self, res):
self.LoadFromResFile('/Res/{}.res'.format(res))
self.res = res
def set_data(self, block, k, v, index=0):
if k == 'comp_yn' and v.lower() == 'y':
self.decomp = True
elif k == 'qrycnt':
self.qrycnt = int(v)
self.SetFieldData(block, k, index, v)
def get_block_data(self, block, index=0):
block_data = {}
for field in meta_res[self.res]['output'][block]['fields']:
data = self.GetFieldData(block, field['name'], index)
if field['type'] == 'long':
if data == '-':
data = 0
data = int(data or 0)
elif field['type'] == 'double' or field['type'] == 'float':
data = float(data or 0.0)
block_data[field['name']] = data
return block_data
def OnReceiveData(self, res):
""" 요청 데이터 도착 Listener
self.GetFieldData(...)를 통해 전송받은 데이터 확인이 가능하다.
@arg res[str] 요청 res 파일명
"""
# decompress가 필요한 경우 압축해제
# TODO : OutBlock1 말고 다른 occurs가 있는 케이스 (ex. FOCCQ33600)
if self.decomp:
self.Decompress(res + 'OutBlock1')
for block in meta_res[res]['output'].keys():
# 해당 블럭이 occurs인 경우,
if meta_res[res]['output'][block]['occurs']:
row_res = []
for i in range(self.GetBlockCount(block)):
row_res.append(self.get_block_data(block, i))
# 해당 블럭이 단일 데이터인 경우,
else:
row_res = self.get_block_data(block)
self.response[block] = row_res
self.waiting = False
def query(res, send, cont=False, timeout=10):
""" Query 요청
@arg res[str]`t1102` 사용할 res 파일명
@arg send[dict] 전송할 데이터
{
'Block1': [{'Field1': 'Value1', 'Field2': 'Value2'}, {...}, {...}],
'Block2': {'Field3': 'Value3', 'Field4': 'Value4'}
}
단일 InBlock의 경우에는 아래와 같이 간단한 형식도 입력받음
{'Field1': 'Value1', 'Field2': 'Value2'}
@arg cont[*bool=False] 연속조회 여부
@arg timeout[*int=10] 서버 응답 최대 대기 시간, -1인 경우 infinite time
"""
# res 파일 로드
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
if not cont:
# 전송 현황 업데이트
if not res in _query_status:
_query_status[res] = []
while _query_status[res] and _query_status[res][-1] + 1 < time.time():
_query_status[res].pop()
# 초당 전송 횟수를 고려
tr_count_per_sec = _query.GetTRCountPerSec(res)
if len(_query_status[res]) >= tr_count_per_sec:
delay = max(_query_status[res][-1] + 1.05 - time.time(), 0)
time.sleep(delay)
# 기간(10분)당 전송 횟수를 고려
# TODO : 10분 제한이 걸리면 blocking state 진입
tr_count_limit = _query.GetTRCountLimit(res)
while tr_count_limit and _query.GetTRCountRequest(res) >= tr_count_limit:
time.sleep(1)
_query = DispatchWithEvents('XA_DataSet.XAQuery', _QueryHandler)
_query.init(res)
# simplified 된 input를 받았을 경우
send_first_value = list(send.values())[0]
if not (
isinstance (send_first_value, list) or
isinstance (send_first_value, dict)
):
send = { '{}InBlock'.format(res): send }
# 전송할 데이터를 설정
for block in send.keys():
if isinstance(send[block], dict):
for (k, v) in send[block].items():
_query.set_data(block, k, v)
elif isinstance(send[block], list):
for i in range(len(send[block])):
for (k, v) in send[block][i].items():
_query.set_data(block, k, v, i)
else:
raise ValueError('알 수 없는 형태의 데이터입니다')
else:
time.sleep(0.05)
# 데이터 요청
_query.Request(cont)
now = time.time()
if not cont:
_query_status[res].insert(0, now)
_query.waiting = True
while _query.waiting:
if timeout >= 0 and now + timeout < time.time():
_query.waiting = False
raise TimeoutError('Query Timeout')
PumpWaitingMessages()
return _query.response
class _RealtimeHandler:
def OnReceiveRealData(self, res):
response = {}
for field in meta_res[res]['output']['OutBlock']['fields']:
response[field['name']] = self.GetFieldData('OutBlock', field['name'])
self.callback(res, response)
class Realtime:
def __init__(self, res, callback):
self._res = res
self._instance = DispatchWithEvents('XA_DataSet.XAReal', _RealtimeHandler)
self._instance.LoadFromResFile(f'/Res/{res}.res')
self._instance.callback = callback
self.subscribed_keys = []
def subscribe(self, key=None):
if key in self.subscribed_keys:
print(f'{self._res}는 이미 {key} 데이터를 수신 중입니다.')
return None
if key:
self._instance.SetFieldData('InBlock', meta_res[self._res]['input']['InBlock']['fields'][0]['name'], key)
self._instance.AdviseRealData()
self.subscribed_keys.append(key)
def unsubscirbe(self, key=None):
if key is None:
self._instance.UnadviseRealData()
else:
if key not in self.subscribed_keys:
raise ValueError(f'{self._res}는 {key} 데이터를 수신하고 있지 않습니다.')
self._instnace.UnadviseRealDataWithKey(key)
@staticmethod
def listen(delay=.01):
while True:
PumpWaitingMessages()
time.sleep(delay)
""" Wrapper Functions
"""
def transactions_tick(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' '
cts_time = ' '
while True:
response = query('t8411', {
'shcode': shcode,
'ncnt': interval,
'qrycnt': 2000,
'nday': '0',
'sdate': sdate,
'edate': edate,
'cts_date': cts_date,
'cts_time': cts_time,
'comp_yn': 'Y'
}, len(data) > 0)
data = response['t8411OutBlock1'] + data
cts_date = response['t8411OutBlock']['cts_date']
cts_time = response['t8411OutBlock']['cts_time']
if not (cts_date or cts_time):
break
data = pd.DataFrame(data)
data.index = (data['date'] + data['time']).apply(lambda t: datetime.strptime(t, '%Y%m%d%H%M%S').astimezone(pytz.timezone('Asia/Seoul')))
data = data.rename(columns={'jdiff_vol': 'volumn'})
data = data[['open', 'high', 'low', 'close', 'volumn', 'jongchk', 'rate']]
return data
def transactions_min(shcode, interval=1, sdate=None, edate=None):
edate = edate or datetime.now().strftime('%Y%m%d')
sdate = sdate or edate
data = []
cts_date = ' ' | cts_time = ' '
while True:
response = query('t8412', {
'shcode': shcode, | random_line_split |
|
rbac_utils.py | License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import sys
import time
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest import config
from patrole_tempest_plugin import rbac_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class RbacUtils(object):
"""Utility class responsible for switching ``os_primary`` role.
This class is responsible for overriding the value of the primary Tempest
credential's role (i.e. ``os_primary`` role). By doing so, it is possible
to seamlessly swap between admin credentials, needed for setup and clean
up, and primary credentials, needed to perform the API call which does
policy enforcement. The primary credentials always cycle between roles
defined by ``CONF.identity.admin_role`` and
``CONF.patrole.rbac_test_role``.
"""
def __init__(self, test_obj):
"""Constructor for ``RbacUtils``.
:param test_obj: An instance of `tempest.test.BaseTestCase`.
"""
# Intialize the admin roles_client to perform role switching.
admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
if test_obj.get_identity_version() == 'v3':
admin_roles_client = admin_mgr.roles_v3_client
else:
admin_roles_client = admin_mgr.roles_client
self.admin_roles_client = admin_roles_client
self._override_role(test_obj, False)
admin_role_id = None
rbac_role_id = None
@contextmanager
def override_role(self, test_obj):
"""Override the role used by ``os_primary`` Tempest credentials.
Temporarily change the role used by ``os_primary`` credentials to:
* ``[patrole] rbac_test_role`` before test execution
* ``[identity] admin_role`` after test execution
Automatically switches to admin role after test execution.
:param test_obj: Instance of ``tempest.test.BaseTestCase``.
:returns: None
.. warning::
This function can alter user roles for pre-provisioned credentials.
Work is underway to safely clean up after this function.
Example::
@rbac_rule_validation.action(service='test',
rule='a:test:rule')
def test_foo(self):
# Allocate test-level resources here.
with self.rbac_utils.override_role(self):
# The role for `os_primary` has now been overridden. Within
# this block, call the API endpoint that enforces the
# expected policy specified by "rule" in the decorator.
self.foo_service.bar_api_call()
# The role is switched back to admin automatically. Note that
# if the API call above threw an exception, any code below this
# point in the test is not executed.
"""
test_obj._set_override_role_called()
self._override_role(test_obj, True)
try:
# Execute the test.
yield
finally:
# Check whether an exception was raised. If so, remember that
# for future validation.
exc = sys.exc_info()[0]
if exc is not None:
test_obj._set_override_role_caught_exc()
# This code block is always executed, no matter the result of the
# test. Automatically switch back to the admin role for test clean
# up.
self._override_role(test_obj, False)
def _override_role(self, test_obj, toggle_rbac_role=False):
"""Private helper for overriding ``os_primary`` Tempest credentials.
:param test_obj: instance of :py:class:`tempest.test.BaseTestCase`
:param toggle_rbac_role: Boolean value that controls the role that
overrides default role of ``os_primary`` credentials.
* If True: role is set to ``[patrole] rbac_test_role``
* If False: role is set to ``[identity] admin_role``
"""
self.user_id = test_obj.os_primary.credentials.user_id
self.project_id = test_obj.os_primary.credentials.tenant_id
self.token = test_obj.os_primary.auth_provider.get_token()
LOG.debug('Overriding role to: %s.', toggle_rbac_role)
role_already_present = False
try:
if not all([self.admin_role_id, self.rbac_role_id]):
self._get_roles_by_name()
target_role = (
self.rbac_role_id if toggle_rbac_role else self.admin_role_id)
role_already_present = self._list_and_clear_user_roles_on_project(
target_role)
# Do not override roles if `target_role` already exists.
if not role_already_present:
self._create_user_role_on_project(target_role)
except Exception as exp:
with excutils.save_and_reraise_exception():
LOG.exception(exp)
finally:
auth_providers = test_obj.get_auth_providers()
for provider in auth_providers:
provider.clear_auth()
# Fernet tokens are not subsecond aware so sleep to ensure we are
# passing the second boundary before attempting to authenticate.
# Only sleep if a token revocation occurred as a result of role
# overriding. This will optimize test runtime in the case where
# ``[identity] admin_role`` == ``[patrole] rbac_test_role``.
if not role_already_present:
time.sleep(1)
for provider in auth_providers:
provider.set_auth()
def _get_roles_by_name(self):
available_roles = self.admin_roles_client.list_roles()['roles']
role_map = {r['name']: r['id'] for r in available_roles}
LOG.debug('Available roles: %s', list(role_map.keys()))
admin_role_id = role_map.get(CONF.identity.admin_role)
rbac_role_id = role_map.get(CONF.patrole.rbac_test_role)
if not all([admin_role_id, rbac_role_id]):
missing_roles = []
msg = ("Could not find `[patrole] rbac_test_role` or "
"`[identity] admin_role`, both of which are required for "
"RBAC testing.")
if not admin_role_id:
missing_roles.append(CONF.identity.admin_role)
if not rbac_role_id:
missing_roles.append(CONF.patrole.rbac_test_role)
msg += " Following roles were not found: %s." % (
", ".join(missing_roles))
msg += " Available roles: %s." % ", ".join(list(role_map.keys()))
raise rbac_exceptions.RbacResourceSetupFailed(msg)
self.admin_role_id = admin_role_id
self.rbac_role_id = rbac_role_id
def _create_user_role_on_project(self, role_id):
self.admin_roles_client.create_user_role_on_project(
self.project_id, self.user_id, role_id)
def _list_and_clear_user_roles_on_project(self, role_id):
roles = self.admin_roles_client.list_user_roles_on_project(
self.project_id, self.user_id)['roles']
role_ids = [role['id'] for role in roles]
# NOTE(felipemonteiro): We do not use ``role_id in role_ids`` here to
# avoid over-permission errors: if the current list of roles on the
# project includes "admin" and "Member", and we are switching to the
# "Member" role, then we must delete the "admin" role. Thus, we only
# return early if the user's roles on the project are an exact match.
if [role_id] == role_ids:
return True
for role in roles:
self.admin_roles_client.delete_role_from_user_on_project(
self.project_id, self.user_id, role['id'])
return False
class RbacUtilsMixin(object):
"""Mixin class to be used alongside an instance of
:py:class:`tempest.test.BaseTestCase`.
Should be used to perform Patrole class setup for a base RBAC class. Child
classes should not use this mixin.
Example::
class BaseRbacTest(rbac_utils.RbacUtilsMixin, base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(BaseRbacTest, cls).skip_checks()
cls.skip_rbac_checks()
@classmethod
def setup_clients(cls):
super(BaseRbacTest, cls).setup_clients()
cls.setup_rbac_utils()
"""
# Shows if override_role was called.
__override_role_called = False
# Shows if exception raised during override_role.
__override_role_caught_exc = False
@classmethod
def get_auth_providers(cls):
"""Returns list of auth_providers used within test.
Tests may redefine this method to include their own or third party
client auth_providers.
"""
return [cls.os_primary.auth_provider]
@classmethod
def | (cls):
if not CONF.patrole.enable_rbac:
deprecation_msg = ("The `[patrole].enable_rbac` option is "
"deprecated and will be removed in the S "
"release. Patrole tests will always be enabled "
"following installation of the Patrole Tempest "
"plugin. Use a regex to skip tests")
versionutils.report_deprecated_feature(LOG, deprecation_msg)
raise cls.skipException(
'Patrole testing not enabled so skipping %s.' % cls.__name__)
@classmethod
| skip_rbac_checks | identifier_name |
rbac_utils.py | License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import sys
import time
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest import config
from patrole_tempest_plugin import rbac_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class RbacUtils(object):
"""Utility class responsible for switching ``os_primary`` role.
This class is responsible for overriding the value of the primary Tempest
credential's role (i.e. ``os_primary`` role). By doing so, it is possible
to seamlessly swap between admin credentials, needed for setup and clean
up, and primary credentials, needed to perform the API call which does
policy enforcement. The primary credentials always cycle between roles
defined by ``CONF.identity.admin_role`` and
``CONF.patrole.rbac_test_role``.
"""
def __init__(self, test_obj):
|
admin_role_id = None
rbac_role_id = None
@contextmanager
def override_role(self, test_obj):
"""Override the role used by ``os_primary`` Tempest credentials.
Temporarily change the role used by ``os_primary`` credentials to:
* ``[patrole] rbac_test_role`` before test execution
* ``[identity] admin_role`` after test execution
Automatically switches to admin role after test execution.
:param test_obj: Instance of ``tempest.test.BaseTestCase``.
:returns: None
.. warning::
This function can alter user roles for pre-provisioned credentials.
Work is underway to safely clean up after this function.
Example::
@rbac_rule_validation.action(service='test',
rule='a:test:rule')
def test_foo(self):
# Allocate test-level resources here.
with self.rbac_utils.override_role(self):
# The role for `os_primary` has now been overridden. Within
# this block, call the API endpoint that enforces the
# expected policy specified by "rule" in the decorator.
self.foo_service.bar_api_call()
# The role is switched back to admin automatically. Note that
# if the API call above threw an exception, any code below this
# point in the test is not executed.
"""
test_obj._set_override_role_called()
self._override_role(test_obj, True)
try:
# Execute the test.
yield
finally:
# Check whether an exception was raised. If so, remember that
# for future validation.
exc = sys.exc_info()[0]
if exc is not None:
test_obj._set_override_role_caught_exc()
# This code block is always executed, no matter the result of the
# test. Automatically switch back to the admin role for test clean
# up.
self._override_role(test_obj, False)
def _override_role(self, test_obj, toggle_rbac_role=False):
"""Private helper for overriding ``os_primary`` Tempest credentials.
:param test_obj: instance of :py:class:`tempest.test.BaseTestCase`
:param toggle_rbac_role: Boolean value that controls the role that
overrides default role of ``os_primary`` credentials.
* If True: role is set to ``[patrole] rbac_test_role``
* If False: role is set to ``[identity] admin_role``
"""
self.user_id = test_obj.os_primary.credentials.user_id
self.project_id = test_obj.os_primary.credentials.tenant_id
self.token = test_obj.os_primary.auth_provider.get_token()
LOG.debug('Overriding role to: %s.', toggle_rbac_role)
role_already_present = False
try:
if not all([self.admin_role_id, self.rbac_role_id]):
self._get_roles_by_name()
target_role = (
self.rbac_role_id if toggle_rbac_role else self.admin_role_id)
role_already_present = self._list_and_clear_user_roles_on_project(
target_role)
# Do not override roles if `target_role` already exists.
if not role_already_present:
self._create_user_role_on_project(target_role)
except Exception as exp:
with excutils.save_and_reraise_exception():
LOG.exception(exp)
finally:
auth_providers = test_obj.get_auth_providers()
for provider in auth_providers:
provider.clear_auth()
# Fernet tokens are not subsecond aware so sleep to ensure we are
# passing the second boundary before attempting to authenticate.
# Only sleep if a token revocation occurred as a result of role
# overriding. This will optimize test runtime in the case where
# ``[identity] admin_role`` == ``[patrole] rbac_test_role``.
if not role_already_present:
time.sleep(1)
for provider in auth_providers:
provider.set_auth()
def _get_roles_by_name(self):
available_roles = self.admin_roles_client.list_roles()['roles']
role_map = {r['name']: r['id'] for r in available_roles}
LOG.debug('Available roles: %s', list(role_map.keys()))
admin_role_id = role_map.get(CONF.identity.admin_role)
rbac_role_id = role_map.get(CONF.patrole.rbac_test_role)
if not all([admin_role_id, rbac_role_id]):
missing_roles = []
msg = ("Could not find `[patrole] rbac_test_role` or "
"`[identity] admin_role`, both of which are required for "
"RBAC testing.")
if not admin_role_id:
missing_roles.append(CONF.identity.admin_role)
if not rbac_role_id:
missing_roles.append(CONF.patrole.rbac_test_role)
msg += " Following roles were not found: %s." % (
", ".join(missing_roles))
msg += " Available roles: %s." % ", ".join(list(role_map.keys()))
raise rbac_exceptions.RbacResourceSetupFailed(msg)
self.admin_role_id = admin_role_id
self.rbac_role_id = rbac_role_id
def _create_user_role_on_project(self, role_id):
self.admin_roles_client.create_user_role_on_project(
self.project_id, self.user_id, role_id)
def _list_and_clear_user_roles_on_project(self, role_id):
roles = self.admin_roles_client.list_user_roles_on_project(
self.project_id, self.user_id)['roles']
role_ids = [role['id'] for role in roles]
# NOTE(felipemonteiro): We do not use ``role_id in role_ids`` here to
# avoid over-permission errors: if the current list of roles on the
# project includes "admin" and "Member", and we are switching to the
# "Member" role, then we must delete the "admin" role. Thus, we only
# return early if the user's roles on the project are an exact match.
if [role_id] == role_ids:
return True
for role in roles:
self.admin_roles_client.delete_role_from_user_on_project(
self.project_id, self.user_id, role['id'])
return False
class RbacUtilsMixin(object):
"""Mixin class to be used alongside an instance of
:py:class:`tempest.test.BaseTestCase`.
Should be used to perform Patrole class setup for a base RBAC class. Child
classes should not use this mixin.
Example::
class BaseRbacTest(rbac_utils.RbacUtilsMixin, base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(BaseRbacTest, cls).skip_checks()
cls.skip_rbac_checks()
@classmethod
def setup_clients(cls):
super(BaseRbacTest, cls).setup_clients()
cls.setup_rbac_utils()
"""
# Shows if override_role was called.
__override_role_called = False
# Shows if exception raised during override_role.
__override_role_caught_exc = False
@classmethod
def get_auth_providers(cls):
"""Returns list of auth_providers used within test.
Tests may redefine this method to include their own or third party
client auth_providers.
"""
return [cls.os_primary.auth_provider]
@classmethod
def skip_rbac_checks(cls):
if not CONF.patrole.enable_rbac:
deprecation_msg = ("The `[patrole].enable_rbac` option is "
"deprecated and will be removed in the S "
"release. Patrole tests will always be enabled "
"following installation of the Patrole Tempest "
"plugin. Use a regex to skip tests")
versionutils.report_deprecated_feature(LOG, deprecation_msg)
raise cls.skipException(
'Patrole testing not enabled so skipping %s.' % cls.__name__)
@classmethod
| """Constructor for ``RbacUtils``.
:param test_obj: An instance of `tempest.test.BaseTestCase`.
"""
# Intialize the admin roles_client to perform role switching.
admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
if test_obj.get_identity_version() == 'v3':
admin_roles_client = admin_mgr.roles_v3_client
else:
admin_roles_client = admin_mgr.roles_client
self.admin_roles_client = admin_roles_client
self._override_role(test_obj, False) | identifier_body |
rbac_utils.py | License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import sys
import time
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest import config
from patrole_tempest_plugin import rbac_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class RbacUtils(object):
"""Utility class responsible for switching ``os_primary`` role.
This class is responsible for overriding the value of the primary Tempest
credential's role (i.e. ``os_primary`` role). By doing so, it is possible
to seamlessly swap between admin credentials, needed for setup and clean
up, and primary credentials, needed to perform the API call which does
policy enforcement. The primary credentials always cycle between roles
defined by ``CONF.identity.admin_role`` and
``CONF.patrole.rbac_test_role``.
"""
def __init__(self, test_obj):
"""Constructor for ``RbacUtils``.
:param test_obj: An instance of `tempest.test.BaseTestCase`.
"""
# Intialize the admin roles_client to perform role switching.
admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
if test_obj.get_identity_version() == 'v3':
admin_roles_client = admin_mgr.roles_v3_client
else:
admin_roles_client = admin_mgr.roles_client
self.admin_roles_client = admin_roles_client
self._override_role(test_obj, False)
admin_role_id = None
rbac_role_id = None
@contextmanager
def override_role(self, test_obj):
"""Override the role used by ``os_primary`` Tempest credentials.
Temporarily change the role used by ``os_primary`` credentials to:
* ``[patrole] rbac_test_role`` before test execution
* ``[identity] admin_role`` after test execution
Automatically switches to admin role after test execution.
:param test_obj: Instance of ``tempest.test.BaseTestCase``.
:returns: None
.. warning::
This function can alter user roles for pre-provisioned credentials.
Work is underway to safely clean up after this function.
Example::
@rbac_rule_validation.action(service='test',
rule='a:test:rule')
def test_foo(self):
# Allocate test-level resources here.
with self.rbac_utils.override_role(self):
# The role for `os_primary` has now been overridden. Within
# this block, call the API endpoint that enforces the
# expected policy specified by "rule" in the decorator.
self.foo_service.bar_api_call()
# The role is switched back to admin automatically. Note that
# if the API call above threw an exception, any code below this
# point in the test is not executed.
"""
test_obj._set_override_role_called()
self._override_role(test_obj, True)
try:
# Execute the test.
yield
finally:
# Check whether an exception was raised. If so, remember that
# for future validation.
exc = sys.exc_info()[0]
if exc is not None:
test_obj._set_override_role_caught_exc()
# This code block is always executed, no matter the result of the
# test. Automatically switch back to the admin role for test clean
# up.
self._override_role(test_obj, False)
def _override_role(self, test_obj, toggle_rbac_role=False):
"""Private helper for overriding ``os_primary`` Tempest credentials.
:param test_obj: instance of :py:class:`tempest.test.BaseTestCase`
:param toggle_rbac_role: Boolean value that controls the role that
overrides default role of ``os_primary`` credentials.
* If True: role is set to ``[patrole] rbac_test_role``
* If False: role is set to ``[identity] admin_role``
"""
self.user_id = test_obj.os_primary.credentials.user_id
self.project_id = test_obj.os_primary.credentials.tenant_id
self.token = test_obj.os_primary.auth_provider.get_token()
LOG.debug('Overriding role to: %s.', toggle_rbac_role)
role_already_present = False
try:
if not all([self.admin_role_id, self.rbac_role_id]):
self._get_roles_by_name()
target_role = (
self.rbac_role_id if toggle_rbac_role else self.admin_role_id)
role_already_present = self._list_and_clear_user_roles_on_project(
target_role)
# Do not override roles if `target_role` already exists.
if not role_already_present:
self._create_user_role_on_project(target_role)
except Exception as exp:
with excutils.save_and_reraise_exception():
LOG.exception(exp)
finally:
auth_providers = test_obj.get_auth_providers()
for provider in auth_providers:
provider.clear_auth()
# Fernet tokens are not subsecond aware so sleep to ensure we are
# passing the second boundary before attempting to authenticate.
# Only sleep if a token revocation occurred as a result of role
# overriding. This will optimize test runtime in the case where
# ``[identity] admin_role`` == ``[patrole] rbac_test_role``.
if not role_already_present:
time.sleep(1)
for provider in auth_providers:
provider.set_auth()
def _get_roles_by_name(self):
available_roles = self.admin_roles_client.list_roles()['roles']
role_map = {r['name']: r['id'] for r in available_roles}
LOG.debug('Available roles: %s', list(role_map.keys()))
admin_role_id = role_map.get(CONF.identity.admin_role)
rbac_role_id = role_map.get(CONF.patrole.rbac_test_role)
if not all([admin_role_id, rbac_role_id]):
missing_roles = []
msg = ("Could not find `[patrole] rbac_test_role` or "
"`[identity] admin_role`, both of which are required for "
"RBAC testing.")
if not admin_role_id:
|
if not rbac_role_id:
missing_roles.append(CONF.patrole.rbac_test_role)
msg += " Following roles were not found: %s." % (
", ".join(missing_roles))
msg += " Available roles: %s." % ", ".join(list(role_map.keys()))
raise rbac_exceptions.RbacResourceSetupFailed(msg)
self.admin_role_id = admin_role_id
self.rbac_role_id = rbac_role_id
def _create_user_role_on_project(self, role_id):
self.admin_roles_client.create_user_role_on_project(
self.project_id, self.user_id, role_id)
def _list_and_clear_user_roles_on_project(self, role_id):
roles = self.admin_roles_client.list_user_roles_on_project(
self.project_id, self.user_id)['roles']
role_ids = [role['id'] for role in roles]
# NOTE(felipemonteiro): We do not use ``role_id in role_ids`` here to
# avoid over-permission errors: if the current list of roles on the
# project includes "admin" and "Member", and we are switching to the
# "Member" role, then we must delete the "admin" role. Thus, we only
# return early if the user's roles on the project are an exact match.
if [role_id] == role_ids:
return True
for role in roles:
self.admin_roles_client.delete_role_from_user_on_project(
self.project_id, self.user_id, role['id'])
return False
class RbacUtilsMixin(object):
"""Mixin class to be used alongside an instance of
:py:class:`tempest.test.BaseTestCase`.
Should be used to perform Patrole class setup for a base RBAC class. Child
classes should not use this mixin.
Example::
class BaseRbacTest(rbac_utils.RbacUtilsMixin, base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(BaseRbacTest, cls).skip_checks()
cls.skip_rbac_checks()
@classmethod
def setup_clients(cls):
super(BaseRbacTest, cls).setup_clients()
cls.setup_rbac_utils()
"""
# Shows if override_role was called.
__override_role_called = False
# Shows if exception raised during override_role.
__override_role_caught_exc = False
@classmethod
def get_auth_providers(cls):
"""Returns list of auth_providers used within test.
Tests may redefine this method to include their own or third party
client auth_providers.
"""
return [cls.os_primary.auth_provider]
@classmethod
def skip_rbac_checks(cls):
if not CONF.patrole.enable_rbac:
deprecation_msg = ("The `[patrole].enable_rbac` option is "
"deprecated and will be removed in the S "
"release. Patrole tests will always be enabled "
"following installation of the Patrole Tempest "
"plugin. Use a regex to skip tests")
versionutils.report_deprecated_feature(LOG, deprecation_msg)
raise cls.skipException(
'Patrole testing not enabled so skipping %s.' % cls.__name__)
@classmethod
| missing_roles.append(CONF.identity.admin_role) | conditional_block |
rbac_utils.py | License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import sys
import time
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest import config
from patrole_tempest_plugin import rbac_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class RbacUtils(object):
"""Utility class responsible for switching ``os_primary`` role.
This class is responsible for overriding the value of the primary Tempest
credential's role (i.e. ``os_primary`` role). By doing so, it is possible
to seamlessly swap between admin credentials, needed for setup and clean
up, and primary credentials, needed to perform the API call which does
policy enforcement. The primary credentials always cycle between roles
defined by ``CONF.identity.admin_role`` and
``CONF.patrole.rbac_test_role``.
"""
def __init__(self, test_obj):
"""Constructor for ``RbacUtils``.
:param test_obj: An instance of `tempest.test.BaseTestCase`.
"""
# Intialize the admin roles_client to perform role switching.
admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
if test_obj.get_identity_version() == 'v3':
admin_roles_client = admin_mgr.roles_v3_client
else:
admin_roles_client = admin_mgr.roles_client
self.admin_roles_client = admin_roles_client
self._override_role(test_obj, False)
admin_role_id = None
rbac_role_id = None
@contextmanager
def override_role(self, test_obj):
"""Override the role used by ``os_primary`` Tempest credentials.
Temporarily change the role used by ``os_primary`` credentials to:
* ``[patrole] rbac_test_role`` before test execution
* ``[identity] admin_role`` after test execution
Automatically switches to admin role after test execution.
:param test_obj: Instance of ``tempest.test.BaseTestCase``.
:returns: None
.. warning::
This function can alter user roles for pre-provisioned credentials.
Work is underway to safely clean up after this function.
Example::
@rbac_rule_validation.action(service='test',
rule='a:test:rule')
def test_foo(self):
# Allocate test-level resources here.
with self.rbac_utils.override_role(self):
# The role for `os_primary` has now been overridden. Within
# this block, call the API endpoint that enforces the
# expected policy specified by "rule" in the decorator.
self.foo_service.bar_api_call()
# The role is switched back to admin automatically. Note that
# if the API call above threw an exception, any code below this
# point in the test is not executed.
"""
test_obj._set_override_role_called()
self._override_role(test_obj, True)
try:
# Execute the test.
yield
finally:
# Check whether an exception was raised. If so, remember that
# for future validation.
exc = sys.exc_info()[0]
if exc is not None:
test_obj._set_override_role_caught_exc()
# This code block is always executed, no matter the result of the
# test. Automatically switch back to the admin role for test clean
# up.
self._override_role(test_obj, False)
def _override_role(self, test_obj, toggle_rbac_role=False):
"""Private helper for overriding ``os_primary`` Tempest credentials. |
:param test_obj: instance of :py:class:`tempest.test.BaseTestCase`
:param toggle_rbac_role: Boolean value that controls the role that
overrides default role of ``os_primary`` credentials.
* If True: role is set to ``[patrole] rbac_test_role``
* If False: role is set to ``[identity] admin_role``
"""
self.user_id = test_obj.os_primary.credentials.user_id
self.project_id = test_obj.os_primary.credentials.tenant_id
self.token = test_obj.os_primary.auth_provider.get_token()
LOG.debug('Overriding role to: %s.', toggle_rbac_role)
role_already_present = False
try:
if not all([self.admin_role_id, self.rbac_role_id]):
self._get_roles_by_name()
target_role = (
self.rbac_role_id if toggle_rbac_role else self.admin_role_id)
role_already_present = self._list_and_clear_user_roles_on_project(
target_role)
# Do not override roles if `target_role` already exists.
if not role_already_present:
self._create_user_role_on_project(target_role)
except Exception as exp:
with excutils.save_and_reraise_exception():
LOG.exception(exp)
finally:
auth_providers = test_obj.get_auth_providers()
for provider in auth_providers:
provider.clear_auth()
# Fernet tokens are not subsecond aware so sleep to ensure we are
# passing the second boundary before attempting to authenticate.
# Only sleep if a token revocation occurred as a result of role
# overriding. This will optimize test runtime in the case where
# ``[identity] admin_role`` == ``[patrole] rbac_test_role``.
if not role_already_present:
time.sleep(1)
for provider in auth_providers:
provider.set_auth()
def _get_roles_by_name(self):
available_roles = self.admin_roles_client.list_roles()['roles']
role_map = {r['name']: r['id'] for r in available_roles}
LOG.debug('Available roles: %s', list(role_map.keys()))
admin_role_id = role_map.get(CONF.identity.admin_role)
rbac_role_id = role_map.get(CONF.patrole.rbac_test_role)
if not all([admin_role_id, rbac_role_id]):
missing_roles = []
msg = ("Could not find `[patrole] rbac_test_role` or "
"`[identity] admin_role`, both of which are required for "
"RBAC testing.")
if not admin_role_id:
missing_roles.append(CONF.identity.admin_role)
if not rbac_role_id:
missing_roles.append(CONF.patrole.rbac_test_role)
msg += " Following roles were not found: %s." % (
", ".join(missing_roles))
msg += " Available roles: %s." % ", ".join(list(role_map.keys()))
raise rbac_exceptions.RbacResourceSetupFailed(msg)
self.admin_role_id = admin_role_id
self.rbac_role_id = rbac_role_id
def _create_user_role_on_project(self, role_id):
self.admin_roles_client.create_user_role_on_project(
self.project_id, self.user_id, role_id)
def _list_and_clear_user_roles_on_project(self, role_id):
roles = self.admin_roles_client.list_user_roles_on_project(
self.project_id, self.user_id)['roles']
role_ids = [role['id'] for role in roles]
# NOTE(felipemonteiro): We do not use ``role_id in role_ids`` here to
# avoid over-permission errors: if the current list of roles on the
# project includes "admin" and "Member", and we are switching to the
# "Member" role, then we must delete the "admin" role. Thus, we only
# return early if the user's roles on the project are an exact match.
if [role_id] == role_ids:
return True
for role in roles:
self.admin_roles_client.delete_role_from_user_on_project(
self.project_id, self.user_id, role['id'])
return False
class RbacUtilsMixin(object):
"""Mixin class to be used alongside an instance of
:py:class:`tempest.test.BaseTestCase`.
Should be used to perform Patrole class setup for a base RBAC class. Child
classes should not use this mixin.
Example::
class BaseRbacTest(rbac_utils.RbacUtilsMixin, base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(BaseRbacTest, cls).skip_checks()
cls.skip_rbac_checks()
@classmethod
def setup_clients(cls):
super(BaseRbacTest, cls).setup_clients()
cls.setup_rbac_utils()
"""
# Shows if override_role was called.
__override_role_called = False
# Shows if exception raised during override_role.
__override_role_caught_exc = False
@classmethod
def get_auth_providers(cls):
"""Returns list of auth_providers used within test.
Tests may redefine this method to include their own or third party
client auth_providers.
"""
return [cls.os_primary.auth_provider]
@classmethod
def skip_rbac_checks(cls):
if not CONF.patrole.enable_rbac:
deprecation_msg = ("The `[patrole].enable_rbac` option is "
"deprecated and will be removed in the S "
"release. Patrole tests will always be enabled "
"following installation of the Patrole Tempest "
"plugin. Use a regex to skip tests")
versionutils.report_deprecated_feature(LOG, deprecation_msg)
raise cls.skipException(
'Patrole testing not enabled so skipping %s.' % cls.__name__)
@classmethod
| random_line_split |
|
lib.rs | use std::num::ParseIntError;
use std::{error, iter};
use std::fmt;
use serde_json;
use serde_with::{ serde_as, DefaultOnError };
use crate::ParseError::MissingNode;
use lazy_static::lazy_static; // 1.3.0
use regex::Regex;
use serde::{Deserializer, Deserialize, Serialize, de};
use serde_json::{Error, Value};
use web_sys::console;
use std::collections::HashMap;
use serde::export::PhantomData;
use crate::utils::set_panic_hook;
use std::collections::hash_map::RandomState;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// Regexes
lazy_static! {
static ref COUNTRY_TAG: Regex = Regex::new(r"^[A-Z]{3}$").unwrap();
static ref PROVINCE_TAG: Regex = Regex::new(r"^[0-9]*$").unwrap();
}
#[wasm_bindgen]
extern {
fn alert(s: &str);
}
struct Product<'a> {
name: &'a str,
world_quantity: f64,
price: f64,
price_history: Vec<f64>,
// assert discovered good true
}
#[derive(Deserialize, Debug, PartialEq)]
pub struct Pop {
/// Presumably the money deposited in the national bank
#[serde(default)]
bank: f64,
/// Presumably the money on-hand
money: f64,
/// The pop size
size: i64,
/// The pop ID
id: i32,
}
impl Pop {
pub fn new(bank: f64, money: f64, size: i64, id: i32) -> Self {
Pop { bank, money, size, id }
}
}
#[serde_as]
#[derive(Deserialize, Debug, PartialEq)]
pub struct Province {
name: String,
#[serde(default)]
owner: Option<String>,
/// Small hack: make the remainder pops.
/// This, shockingly, actually works for any subfield we can think of,
/// so it's actually the magic backtracking we were looking for all along
#[serde(flatten)]
#[serde_as(as="HashMap<DefaultOnError, DefaultOnError>")]
pops: HashMap<String, SingleOrMany<Pop>>,
}
impl Province {
pub fn new(name: String, owner: Option<String>, pops: HashMap<String, SingleOrMany<Pop>, RandomState>) -> Self {
Province { name, owner, pops }
}
}
#[derive(Deserialize, Debug)]
struct Building {
#[serde(rename = "building")]
name: String,
money: f64,
}
#[derive(Deserialize, Debug)]
struct StateID {
// Name in a localization file
id: i32,
#[serde(rename = "type")]
state_type: i32,
}
/// A state owned by a country
#[derive(Deserialize, Debug)]
struct State {
#[serde(rename = "state_buildings", default)]
buildings: SingleOrMany<Building>,
// What are these?
#[serde(default)]
savings: f64,
#[serde(default)]
interest: f64,
id: StateID,
#[serde(rename = "provinces")]
province_ids: SingleOrMany<i32>,
}
#[derive(Deserialize, Debug)]
struct Country {
money: f64,
tax_base: f64,
// Don't count single-state countries rn
#[serde(rename="state", default)]
states: SingleOrMany<State>,
}
#[derive(Deserialize, Debug, PartialEq)]
#[serde(untagged)]
pub enum SingleOrMany<T> {
Single(T),
Many(Vec<T>),
None,
}
impl<T> Default for SingleOrMany<T> {
fn default() -> Self {
SingleOrMany::None
}
}
impl<T> SingleOrMany<T> {
// https://stackoverflow.com/a/30220832/998335
fn values(&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0. | use peg;
use chrono::NaiveDate; | random_line_split |
|
lib.rs | )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id , (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1 ..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
}
} else {
map.insert(name.to_string(), object.clone());
}
}
/// In-place modify to be parseable.
/// See the comment above for countries for rationale.
/// Call on root.
pub fn raise(&mut self) {
if let Node::List(nodes) = self {
// Get the first country index
for (name, tag) in [("provinces", &*PROVINCE_TAG), ("countries", &*COUNTRY_TAG)].iter() {
if let Some(country_index) = nodes.iter().position(|x| x.is_matching(tag)) {
// Drain all countries
let country_list: Vec<Node> = nodes.drain_filter(|x| x.is_matching(tag)).collect();
nodes.insert(country_index, Node::Line((name, country_list)));
}
}
}
}
fn is_matching(&self, re: &Regex) -> bool | {
match self {
Node::Line((name, _)) => re.is_match(name),
_ => false,
}
} | identifier_body |
|
lib.rs | fn | (&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id , (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + | values | identifier_name |
lib.rs | ize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id , (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1 ..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else | {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
} | conditional_block |
|
prepareApply-submit-list.component.ts | }
taxrateTotalFun=function(){//税率相关值 计算
if (typeof(this.rate) == "undefined" || this.rate == null) {
this.windowService.alert({ message: "税率未选择", type: "warn" });
} else {
this._purchaseData.taxAmount = Number((this._purchaseData.untaxAmount * (1 + Number(this.rate))).toFixed(2));//含税总金额
}
}
currencyDiffeFun=function(){//币种变化时 重新辨别计算总额
if(!this.tempAmountPrice){//无总额 不计算
this.onPurchaseDataChange.emit(this._purchaseData);
return;
}
if(this.IsRMB){//人民币 情况
this._purchaseData.untaxAmount=this.tempAmountPrice;//未税总金额
this.taxrateTotalFun();
this.onPurchaseDataChange.emit(this._purchaseData);
}else{//外币 情况
this._purchaseData.foreignAmount=this.tempAmountPrice;//外币总金额
this.shareMethodService.getRateConvertPrice(this._purchaseData.foreignAmount,this.currency)
.then(data => {//根据最新汇率 计算总额
this._purchaseData.untaxAmount = data;
this._purchaseData.taxAmount = data;
this.onPurchaseDataChange.emit(this._purchaseData);
});
}
}
calculateTotalTax(changeType) {//计算总价
// 因为此方法中包括异步请求(getRateConvertPrice),所以把返回数据(emit)写在此方法的所有情况中,调用此方法后可省略emit返回
switch(changeType){//不同变化情况下 计算
case "rateChange":
if(this.IsRMB){
this.taxrateTotalFun();
}
this.onPurchaseDataChange.emit(this._purchaseData);
break;
case "currencyChange":
this.currencyDiffeFun();
break;
case "numberChange":
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.tempAmountPrice=0;
this._purchaseData.procurementList.forEach(item => {
if (item.Count) {
this.numAmount = Number(this.numAmount + Number(item.Count));//物料数量合计
}
if (item.Count && item.Price) {
this.tempAmountPrice = Number((this.tempAmountPrice + item.Count * item.Price).toFixed(2));
}
})
this.currencyDiffeFun();
break;
}
// this.onAmountMoney.emit();//当金额变化时,请求获取新的审批人序列
}
delProcurementItem(index) {//删除一项采购清单
let reCount=true;
if(!this._purchaseData.procurementList[index]["Count"]
&& !this._purchaseData.procurementList[index]["Price"]){//如果删除的行没有数量和单价 不需要重新计算
reCount=false;
}
if(this._purchaseData.procurementList[index].checked){
this.checkedNum--;//选项减一
if(!this.checkedNum){//减最后一项
this.fullChecked = false;
this.fullCheckedIndeterminate = false;
}
}
this._purchaseData.procurementList.splice(index, 1);
if(reCount){
this.calculateTotalTax("numberChange");
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
this._purchaseData.procurementList=JSON.parse(JSON.stringify(this._purchaseData.procurementList));
}
addProcurementItem() {//增加一项采购清单
this._purchaseData.procurementList.push(new PurchaseRequisitionDetailsList());
if(this.hasContract && this.contractListLength==1){//若合同列表只有一项,直接选入
let len=this._purchaseData.procurementList.length;
this._purchaseData.procurementList[len-1].MaterialSource=this.contractList[0].SC_Code;
}
if(this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
this.onPurchaseDataChange.emit(this._purchaseData);
}
downloadTpl(){//下载采购清单模板
if(this.hasContract){
window.open(dbomsPath+'assets/downloadtpl/预下有合同采购申请-采购清单.xlsx' );
}else{
window.open(dbomsPath+'assets/downloadtpl/预下无合同采购申请-采购清单.xlsx' );
}
}
materialTraceno(index,no){//需求跟踪号的校验
if(!no){//为空不校验
return;
}
let validName="traceno"+index;
if(this.purchaseListForm.controls[validName].invalid){//格式校验未通过
this.windowService.alert({ message: '只允许输入数字和26位英文字符', type: 'success' });
return;
}
this._purchaseData.procurementList[index]["traceno"] = this._purchaseData.procurementList[index]["traceno"].toUpperCase();//转大写
this.shareMethodService.checkApplyTracenoExist(this._purchaseData.procurementList[index]["traceno"],this.purchaseRequisitioIid)
.then(data => {
if (!data) {
this.windowService.alert({ message:"该需求跟踪号已经存在,请重新输入", type: 'fail' });
this._purchaseData.procurementList[index]["traceno"]="";
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
});
}
getMaterialData(index, id) {//根据物料号读取信息
if (id) {
this._purchaseData.procurementList[index].MaterialNumber=id.trim();//首尾去空格
this.getMaterial(this._purchaseData.procurementList[index].MaterialNumber).then(response => {
if (response.Data["MAKTX_ZH"]) {//获取描述
this._purchaseData.procurementList[index].MaterialDescription = response.Data["MAKTX_ZH"];
} else {
this._purchaseData.procurementList[index].MaterialDescription = "";
this.windowService.alert({ message: "该物料不存在", type: "warn" });
}
this.onPurchaseDataChange.emit(this._purchaseData);
})
}else{//清空物料 也需返回
this._purchaseData.procurementList[index].MaterialDescription = "";
this.onPurchaseDataChange.emit(this._purchaseData);
}
}
isLoading(e){//批量上传时loadiung现象
if(e){
this.loading=true;
}
}
uploadPurchase(e) {//批量上传返回
console.log(e);
if (e.Result) {
this.matchContractPrompt=false;
let result = e.Data;
if(result && result.length && this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
result.forEach(item => {
if(this.hasContract){//有合同时需要匹配
item["MaterialSource"]=this.matchContract(item["MaterialSource"]);
}
item.Price=Number(item.Price).toFixed(2);
item.Amount=Number(item.Amount).toFixed(2);
item.Batch=item.Batch.toUpperCase();//转化大写
delete item.AddTime;delete item.ID;
delete item.purchaserequisitionid;delete item.traceno;
});
let newArr=this._purchaseData.procurementList.concat(result);
this._purchaseData.procurementList=newArr;//把excel中列表显示页面
this.calculateTotalTax("numberChange");
this.onAmountMoney.emit();//发送事件获取新的审批人序列
} else {
this.windowService.alert({ message: e.Message, type: "warn" });
}
this.loading=false;
}
showOrder() {//预览采购清单
console.log(this._purchaseData);
let modalData = {
procurementList: this._purchaseData.procurementList,
untaxAmount: this._purchaseData.untaxAmount,
factory: this.factory,
vendor: this.vendor
}
this.applyListModal.show(modalData);
}
backPurchaseData() {//返回数据
this.onPurchaseDataChange.emit(this._purchaseData);
}
deleteList(){//批量删除采购清单列表
if(!this.checkedNum){
this.windowService.alert({ message: "还未选择项", type: "warn" });
return;
}
if(this.fullChecked){//全选删除
this._purchaseData.procurementList=[];
this.tempAmountPrice=0;
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.fullChecked=false;
this.fullCheckedIndeterminate = false;
this.onPurchaseDataChange.emit(this._purchaseData);
return;
}
this.fullCheckedIndeterminate = false;
let i; let item;
let len = this._purchaseData.procurementList.length;
for (i = 0; i < len; i++) {
item=this._purchaseData.procurementList[i];
if (item.checked === true) {
this._purchaseData.procurementList.splice(i, 1);
this._purchaseData.procurementList=JSON.parse(JSON.stringify( this._purchaseData.procurementList));// 重新物料列表,用来重置form表单的绑定项 | len--; | random_line_split |
|
prepareApply-submit-list.component.ts | ;//币种
@Input() purchaseRequisitioIid:'';//采购申请id
@Input() isSubmit=false;//是否提交
@Input() set setHasContract(value){//读入预下采购申请类型 有合同-true,无合同-false
this.hasContract=value;
if(value){//有合同
this.clospanNum=12;
}else{
this.clospanNum=11;
}
}
@Input() set listNumberAmount(value){//采购清单 总数量
if(value){//编辑时 传入总数量
this.numAmount=value;
}
};
@Input() set purchaseData(value) {//读入采购清单数据
this._purchaseData = value;
}
@Output() onPurchaseDataChange = new EventEmitter<any>();//当 采购清单信息 变化
@Output() onPurchaseFormValidChange = new EventEmitter<any>();//当 采购清单校验 变化
@Output() onAmountMoney = new EventEmitter();//当 未税金额变化时
@Input() public orderType:string;//订单类型
@Input() public istoerp:boolean;//是否创建ERP订单
constructor(private http: Http,
private windowService: WindowService,
private xcModalService: XcModalService,
private shareMethodService: ShareMethodService,
private dbHttp: HttpServer,
private changeDetectorRef:ChangeDetectorRef) { }
ngOnInit() {
if(this.hasContract){
this.contractList = JSON.parse(window.localStorage.getItem("prepareContractList"));//获取合同
console.log("预下采购合同:");
console.log(this.contractList);
if(this.contractList && this.contractList.length){//保存合同长度
this.contractListLength=this.contractList.length;
this.upLoadData = {
ContractCount: this.contractList.length
}
}
}
this.applyListModal = this.xcModalService.createModal(ApplyListModalComponent);//预览采购清单
}
ngOnChanges(changes: SimpleChanges){
if(changes["rate"]){
if(changes["rate"].currentValue!=changes["rate"].previousValue){//税率变化
if (typeof(changes["rate"].currentValue) == "undefined" || changes["rate"].currentValue == null) {//变化为无值
this._purchaseData.taxAmount=0;
this.onPurchaseDataChange.emit(this._purchaseData);
} else {
this.calculateTotalTax("rateChange");//重新计算
}
}
}
if(changes["currency"]){
if(changes["currency"].currentValue!=changes["currency"].previousValue){//币种 变化
this.calculateTotalTax("currencyChange");//重新计算
}
}
}
ngDoCheck() {
if (this.purchaseListForm.valid != this.beforePurchaseFormValid) {//表单校验变化返回
this.beforePurchaseFormValid = this.purchaseListForm.valid;
this.onPurchaseFormValidChange.emit(this.purchaseListForm.valid);
}
if(this._purchaseData.procurementList && this._purchaseData.procurementList.length>=10){//出现滚动条的宽度调整
$(".w40").addClass("w46");
$(".addApp-ch-before tbody").addClass("auto");
}else{
$(".w40").removeClass("w46");
$(".addApp-ch-before tbody").removeClass("auto");
}
if (this.hasContract &&
JSON.stringif | }
}
CheckIndeterminate(v) {//检查是否全选
this.fullCheckedIndeterminate = v;
}
calculateTotal(index) {//改变数量和单价时
let item = this._purchaseData.procurementList[index];
if (item.Count && item.Price) {
let num = item.Count * item.Price;
item.Amount = Number(num.toFixed(2));//未税总价
}else{
item.Amount = 0;
}
this.calculateTotalTax("numberChange");
}
taxrateTotalFun=function(){//税率相关值 计算
if (typeof(this.rate) == "undefined" || this.rate == null) {
this.windowService.alert({ message: "税率未选择", type: "warn" });
} else {
this._purchaseData.taxAmount = Number((this._purchaseData.untaxAmount
* (1 + Number(this.rate))).toFixed(2));//含税总金额
}
}
currencyDiffeFun=function(){//币种变化时 重新辨别计算总额
if(!this.tempAmountPrice){//无总额 不计算
this.onPurchaseDataChange.emit(this._purchaseData);
return;
}
if(this.IsRMB){//人民币 情况
this._purchaseData.untaxAmount=this.tempAmountPrice;//未税总金额
this.taxrateTotalFun();
this.onPurchaseDataChange.emit(this._purchaseData);
}else{//外币 情况
this._purchaseData.foreignAmount=this.tempAmountPrice;//外币总金额
this.shareMethodService.getRateConvertPrice(this._purchaseData.foreignAmount,this.currency)
.then(data => {//根据最新汇率 计算总额
this._purchaseData.untaxAmount = data;
this._purchaseData.taxAmount = data;
this.onPurchaseDataChange.emit(this._purchaseData);
});
}
}
calculateTotalTax(changeType) {//计算总价
// 因为此方法中包括异步请求(getRateConvertPrice),所以把返回数据(emit)写在此方法的所有情况中,调用此方法后可省略emit返回
switch(changeType){//不同变化情况下 计算
case "rateChange":
if(this.IsRMB){
this.taxrateTotalFun();
}
this.onPurchaseDataChange.emit(this._purchaseData);
break;
case "currencyChange":
this.currencyDiffeFun();
break;
case "numberChange":
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.tempAmountPrice=0;
this._purchaseData.procurementList.forEach(item => {
if (item.Count) {
this.numAmount = Number(this.numAmount + Number(item.Count));//物料数量合计
}
if (item.Count && item.Price) {
this.tempAmountPrice = Number((this.tempAmountPrice + item.Count * item.Price).toFixed(2));
}
})
this.currencyDiffeFun();
break;
}
// this.onAmountMoney.emit();//当金额变化时,请求获取新的审批人序列
}
delProcurementItem(index) {//删除一项采购清单
let reCount=true;
if(!this._purchaseData.procurementList[index]["Count"]
&& !this._purchaseData.procurementList[index]["Price"]){//如果删除的行没有数量和单价 不需要重新计算
reCount=false;
}
if(this._purchaseData.procurementList[index].checked){
this.checkedNum--;//选项减一
if(!this.checkedNum){//减最后一项
this.fullChecked = false;
this.fullCheckedIndeterminate = false;
}
}
this._purchaseData.procurementList.splice(index, 1);
if(reCount){
this.calculateTotalTax("numberChange");
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
this._purchaseData.procurementList=JSON.parse(JSON.stringify(this._purchaseData.procurementList));
}
addProcurementItem() {//增加一项采购清单
this._purchaseData.procurementList.push(new PurchaseRequisitionDetailsList());
if(this.hasContract && this.contractListLength==1){//若合同列表只有一项,直接选入
let len=this._purchaseData.procurementList.length;
this._purchaseData.procurementList[len-1].MaterialSource=this.contractList[0].SC_Code;
}
if(this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
this.onPurchaseDataChange.emit(this._purchaseData);
}
downloadTpl(){//下载采购清单模板
if(this.hasContract){
window.open(dbomsPath+'assets/downloadtpl/预下有合同采购申请-采购清单.xlsx' );
}else{
window.open(dbomsPath+'assets/downloadtpl/预下无合同采购申请-采购清单.xlsx' );
}
| y(this.contractList) != window.localStorage.getItem("prepareContractList")) {//合同列表变化
this.contractList = JSON.parse(window.localStorage.getItem("prepareContractList"));
if(this.contractList && this.contractList.length){
this.contractListLength=this.contractList.length;
this.upLoadData = {
ContractCount: this.contractList.length
}
}
this.changeDetectorRef.detectChanges();//需要强制刷新
for(let i=0,len=this._purchaseData.procurementList.length;i<len;i++){//重新检查设置已选
let pro=this._purchaseData.procurementList[i];
let list=this.OnlySCCodeContract(pro["MaterialSource"]);
if(!list){
pro["MaterialSource"]=list;//为空
}else{
pro["MaterialSource"]=list["em"]["SC_Code"];//val
$("#materialSource"+i)[0].selectedIndex = list["index"]+1; //index
$("#materialSource"+i)[0].text=list["em"]["MainContractCode"]; //text
}
} | identifier_body |
prepareApply-submit-list.component.ts | this.onPurchaseDataChange.emit(this._purchaseData);
break;
case "currencyChange":
this.currencyDiffeFun();
break;
case "numberChange":
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.tempAmountPrice=0;
this._purchaseData.procurementList.forEach(item => {
if (item.Count) {
this.numAmount = Number(this.numAmount + Number(item.Count));//物料数量合计
}
if (item.Count && item.Price) {
this.tempAmountPrice = Number((this.tempAmountPrice + item.Count * item.Price).toFixed(2));
}
})
this.currencyDiffeFun();
break;
}
// this.onAmountMoney.emit();//当金额变化时,请求获取新的审批人序列
}
delProcurementItem(index) {//删除一项采购清单
let reCount=true;
if(!this._purchaseData.procurementList[index]["Count"]
&& !this._purchaseData.procurementList[index]["Price"]){//如果删除的行没有数量和单价 不需要重新计算
reCount=false;
}
if(this._purchaseData.procurementList[index].checked){
this.checkedNum--;//选项减一
if(!this.checkedNum){//减最后一项
this.fullChecked = false;
this.fullCheckedIndeterminate = false;
}
}
this._purchaseData.procurementList.splice(index, 1);
if(reCount){
this.calculateTotalTax("numberChange");
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
this._purchaseData.procurementList=JSON.parse(JSON.stringify(this._purchaseData.procurementList));
}
addProcurementItem() {//增加一项采购清单
this._purchaseData.procurementList.push(new PurchaseRequisitionDetailsList());
if(this.hasContract && this.contractListLength==1){//若合同列表只有一项,直接选入
let len=this._purchaseData.procurementList.length;
this._purchaseData.procurementList[len-1].MaterialSource=this.contractList[0].SC_Code;
}
if(this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
this.onPurchaseDataChange.emit(this._purchaseData);
}
downloadTpl(){//下载采购清单模板
if(this.hasContract){
window.open(dbomsPath+'assets/downloadtpl/预下有合同采购申请-采购清单.xlsx' );
}else{
window.open(dbomsPath+'assets/downloadtpl/预下无合同采购申请-采购清单.xlsx' );
}
}
materialTraceno(index,no){//需求跟踪号的校验
if(!no){//为空不校验
return;
}
let validName="traceno"+index;
if(this.purchaseListForm.controls[validName].invalid){//格式校验未通过
this.windowService.alert({ message: '只允许输入数字和26位英文字符', type: 'success' });
return;
}
this._purchaseData.procurementList[index]["traceno"] = this._purchaseData.procurementList[index]["traceno"].toUpperCase();//转大写
this.shareMethodService.checkApplyTracenoExist(this._purchaseData.procurementList[index]["traceno"],this.purchaseRequisitioIid)
.then(data => {
if (!data) {
this.windowService.alert({ message:"该需求跟踪号已经存在,请重新输入", type: 'fail' });
this._purchaseData.procurementList[index]["traceno"]="";
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
});
}
getMaterialData(index, id) {//根据物料号读取信息
if (id) {
this._purchaseData.procurementList[index].MaterialNumber=id.trim();//首尾去空格
this.getMaterial(this._purchaseData.procurementList[index].MaterialNumber).then(response => {
if (response.Data["MAKTX_ZH"]) {//获取描述
this._purchaseData.procurementList[index].MaterialDescription = response.Data["MAKTX_ZH"];
} else {
this._purchaseData.procurementList[index].MaterialDescription = "";
this.windowService.alert({ message: "该物料不存在", type: "warn" });
}
this.onPurchaseDataChange.emit(this._purchaseData);
})
}else{//清空物料 也需返回
this._purchaseData.procurementList[index].MaterialDescription = "";
this.onPurchaseDataChange.emit(this._purchaseData);
}
}
isLoading(e){//批量上传时loadiung现象
if(e){
this.loading=true;
}
}
uploadPurchase(e) {//批量上传返回
console.log(e);
if (e.Result) {
this.matchContractPrompt=false;
let result = e.Data;
if(result && result.length && this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
result.forEach(item => {
if(this.hasContract){//有合同时需要匹配
item["MaterialSource"]=this.matchContract(item["MaterialSource"]);
}
item.Price=Number(item.Price).toFixed(2);
item.Amount=Number(item.Amount).toFixed(2);
item.Batch=item.Batch.toUpperCase();//转化大写
delete item.AddTime;delete item.ID;
delete item.purchaserequisitionid;delete item.traceno;
});
let newArr=this._purchaseData.procurementList.concat(result);
this._purchaseData.procurementList=newArr;//把excel中列表显示页面
this.calculateTotalTax("numberChange");
this.onAmountMoney.emit();//发送事件获取新的审批人序列
} else {
this.windowService.alert({ message: e.Message, type: "warn" });
}
this.loading=false;
}
showOrder() {//预览采购清单
console.log(this._purchaseData);
let modalData = {
procurementList: this._purchaseData.procurementList,
untaxAmount: this._purchaseData.untaxAmount,
factory: this.factory,
vendor: this.vendor
}
this.applyListModal.show(modalData);
}
backPurchaseData() {//返回数据
this.onPurchaseDataChange.emit(this._purchaseData);
}
deleteList(){//批量删除采购清单列表
if(!this.checkedNum){
this.windowService.alert({ message: "还未选择项", type: "warn" });
return;
}
if(this.fullChecked){//全选删除
this._purchaseData.procurementList=[];
this.tempAmountPrice=0;
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.fullChecked=false;
this.fullCheckedIndeterminate = false;
this.onPurchaseDataChange.emit(this._purchaseData);
return;
}
this.fullCheckedIndeterminate = false;
let i; let item;
let len = this._purchaseData.procurementList.length;
for (i = 0; i < len; i++) {
item=this._purchaseData.procurementList[i];
if (item.checked === true) {
this._purchaseData.procurementList.splice(i, 1);
this._purchaseData.procurementList=JSON.parse(JSON.stringify( this._purchaseData.procurementList));// 重新物料列表,用来重置form表单的绑定项
len--;
i--;
}
}
this.calculateTotalTax("numberChange");
this.onAmountMoney.emit();//发送事件获取新的审批人序列
}
hoverText(i){//select hover显示字段
return $("#materialSource"+i+" option:selected").text();
}
matchContract(name){//根据名称(MainContractCode) 匹配合同 (批量导入时)
let len=this.contractList.length;
let i;let item;
for(i=0;i<len;i++){
item=this.contractList[i];
if(item.MainContractCode==name){
return item.SC_Code;
}
}
if(!this.matchContractPrompt && this.contractListLength!=1){
this.windowService.alert({ message: '导入列表中销售合同有未在所选合同中', type: 'warn' });
this.matchContractPrompt=true;
}
if(this.contractListLength==1){//若合同列表只有一项,直接选入
return this.contractList[0].SC_Code;
}else{
return '';
}
}
getMaterial(id) {//获取物料信息
let headers = new Headers({ 'Content-Type': 'application/json' });
let options = new RequestOptions({ headers: headers });
return this.http.get("api/PurchaseManage/GetMaterialInfo/"+id, options)
.toPromise()
.then(response => response.json())
}
OnlySCCodeContract(scCode){//根据合同唯一(scCode)标识 匹配合同
let list={
| em:"",
index:""
}
let len=this.contractList.length;
let i;let item;
for(i=0;i<len;i++){
item=this.contractList[i];
if(item.SC_Code==scCode){
list.em=item;
list.index=i;
return list;
| conditional_block |
|
prepareApply-submit-list.component.ts | 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.tempAmountPrice=0;
this._purchaseData.procurementList.forEach(item => {
if (item.Count) {
this.numAmount = Number(this.numAmount + Number(item.Count));//物料数量合计
}
if (item.Count && item.Price) {
this.tempAmountPrice = Number((this.tempAmountPrice + item.Count * item.Price).toFixed(2));
}
})
this.currencyDiffeFun();
break;
}
// this.onAmountMoney.emit();//当金额变化时,请求获取新的审批人序列
}
delProcurementItem(index) {//删除一项采购清单
let reCount=true;
if(!this._purchaseData.procurementList[index]["Count"]
&& !this._purchaseData.procurementList[index]["Price"]){//如果删除的行没有数量和单价 不需要重新计算
reCount=false;
}
if(this._purchaseData.procurementList[index].checked){
this.checkedNum--;//选项减一
if(!this.checkedNum){//减最后一项
this.fullChecked = false;
this.fullCheckedIndeterminate = false;
}
}
this._purchaseData.procurementList.splice(index, 1);
if(reCount){
this.calculateTotalTax("numberChange");
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
this._purchaseData.procurementList=JSON.parse(JSON.stringify(this._purchaseData.procurementList));
}
addProcurementItem() {//增加一项采购清单
this._purchaseData.procurementList.push(new PurchaseRequisitionDetailsList());
if(this.hasContract && this.contractListLength==1){//若合同列表只有一项,直接选入
let len=this._purchaseData.procurementList.length;
this._purchaseData.procurementList[len-1].MaterialSource=this.contractList[0].SC_Code;
}
if(this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
this.onPurchaseDataChange.emit(this._purchaseData);
}
downloadTpl(){//下载采购清单模板
if(this.hasContract){
window.open(dbomsPath+'assets/downloadtpl/预下有合同采购申请-采购清单.xlsx' );
}else{
window.open(dbomsPath+'assets/downloadtpl/预下无合同采购申请-采购清单.xlsx' );
}
}
materialTraceno(index,no){//需求跟踪号的校验
if(!no){//为空不校验
return;
}
let validName="traceno"+index;
if(this.purchaseListForm.controls[validName].invalid){//格式校验未通过
this.windowService.alert({ message: '只允许输入数字和26位英文字符', type: 'success' });
return;
}
this._purchaseData.procurementList[index]["traceno"] = this._purchaseData.procurementList[index]["traceno"].toUpperCase();//转大写
this.shareMethodService.checkApplyTracenoExist(this._purchaseData.procurementList[index]["traceno"],this.purchaseRequisitioIid)
.then(data => {
if (!data) {
this.windowService.alert({ message:"该需求跟踪号已经存在,请重新输入", type: 'fail' });
this._purchaseData.procurementList[index]["traceno"]="";
}else{
this.onPurchaseDataChange.emit(this._purchaseData);
}
});
}
getMaterialData(index, id) {//根据物料号读取信息
if (id) {
this._purchaseData.procurementList[index].MaterialNumber=id.trim();//首尾去空格
this.getMaterial(this._purchaseData.procurementList[index].MaterialNumber).then(response => {
if (response.Data["MAKTX_ZH"]) {//获取描述
this._purchaseData.procurementList[index].MaterialDescription = response.Data["MAKTX_ZH"];
} else {
this._purchaseData.procurementList[index].MaterialDescription = "";
this.windowService.alert({ message: "该物料不存在", type: "warn" });
}
this.onPurchaseDataChange.emit(this._purchaseData);
})
}else{//清空物料 也需返回
this._purchaseData.procurementList[index].MaterialDescription = "";
this.onPurchaseDataChange.emit(this._purchaseData);
}
}
isLoading(e){//批量上传时loadiung现象
if(e){
this.loading=true;
}
}
uploadPurchase(e) {//批量上传返回
console.log(e);
if (e.Result) {
this.matchContractPrompt=false;
let result = e.Data;
if(result && result.length && this.fullChecked){//如果全选,变成半选
this.fullChecked=false;
this.fullCheckedIndeterminate = true;
}
result.forEach(item => {
if(this.hasContract){//有合同时需要匹配
item["MaterialSource"]=this.matchContract(item["MaterialSource"]);
}
item.Price=Number(item.Price).toFixed(2);
item.Amount=Number(item.Amount).toFixed(2);
item.Batch=item.Batch.toUpperCase();//转化大写
delete item.AddTime;delete item.ID;
delete item.purchaserequisitionid;delete item.traceno;
});
let newArr=this._purchaseData.procurementList.concat(result);
this._purchaseData.procurementList=newArr;//把excel中列表显示页面
this.calculateTotalTax("numberChange");
this.onAmountMoney.emit();//发送事件获取新的审批人序列
} else {
this.windowService.alert({ message: e.Message, type: "warn" });
}
this.loading=false;
}
showOrder() {//预览采购清单
console.log(this._purchaseData);
let modalData = {
procurementList: this._purchaseData.procurementList,
untaxAmount: this._purchaseData.untaxAmount,
factory: this.factory,
vendor: this.vendor
}
this.applyListModal.show(modalData);
}
backPurchaseData() {//返回数据
this.onPurchaseDataChange.emit(this._purchaseData);
}
deleteList(){//批量删除采购清单列表
if(!this.checkedNum){
this.windowService.alert({ message: "还未选择项", type: "warn" });
return;
}
if(this.fullChecked){//全选删除
this._purchaseData.procurementList=[];
this.tempAmountPrice=0;
this.numAmount = 0;
this._purchaseData.untaxAmount = 0;
this._purchaseData.taxAmount = 0;
this._purchaseData.foreignAmount = 0;
this.fullChecked=false;
this.fullCheckedIndeterminate = false;
this.onPurchaseDataChange.emit(this._purchaseData);
return;
}
this.fullCheckedIndeterminate = false;
let i; let item;
let len = this._purchaseData.procurementList.length;
for (i = 0; i < len; i++) {
item=this._purchaseData.procurementList[i];
if (item.checked === true) {
this._purchaseData.procurementList.splice(i, 1);
this._purchaseData.procurementList=JSON.parse(JSON.stringify( this._purchaseData.procurementList));// 重新物料列表,用来重置form表单的绑定项
len--;
i--;
}
}
this.calculateTotalTax("numberChange");
this.onAmountMoney.emit();//发送事件获取新的审批人序列
}
hoverText(i){//select hover显示字段
return $("#materialSource"+i+" option:selected").text();
}
matchContract(name){//根据名称(MainContractCode) 匹配合同 (批量导入时)
let len=this.contractList.length;
let i;let item;
for(i=0;i<len;i++){
item=this.contractList[i];
if(item.MainContractCode==name){
return item.SC_Code;
}
}
if(!this.matchContractPrompt && this.contractListLength!=1){
this.windowService.alert({ message: '导入列表中销售合同有未在所选合同中', type: 'warn' });
this.matchContractPrompt=true;
}
if(this.contractListLength==1){//若合同列表只有一项,直接选入
return this.contractList[0].SC_Code;
}else{
return '';
}
}
getMaterial(id) {//获取物料信息
let headers = new Headers({ 'Content-Type': 'application/json' });
let options = new RequestOptions({ headers: headers });
return this.http.get("api/PurchaseManage/GetMaterialInfo/"+id, options)
.toPromise()
.then(response => response.json())
}
OnlySCCodeContract(scCode){//根据合同唯一(scCode)标识 匹配合同
let list={
em:"",
index:""
}
let len=this.contractList.length;
let i;let item;
for(i=0;i<len;i++){
item=this.contractList[i];
if(item.SC_Code==scCode){
list.em=item;
list.index=i;
return list;
}
}
return "";//已选的已经不在合同列表中 则置空
}
//当数量或金额发生变化时,发送事件,获取审批流程
onPriceOrCountChange(){
this.onAmountMoney.emit();//发送事件
}
} | identifier_name |
||
action.go |
// DisplayMode is the configured display mode
func (a *Action) DisplayMode() string {
return a.Display
}
// AggregateResultJSON receives a JSON reply and aggregate all the data found in it
func (a *Action) AggregateResultJSON(jres []byte) error {
res := make(map[string]interface{})
err := json.Unmarshal(jres, &res)
if err != nil {
return fmt.Errorf("could not parse result as JSON data: %s", err)
}
return a.AggregateResult(res)
}
// AggregateResult receives a result and aggregate all the data found in it, most
// errors are squashed since aggregation are called during processing of replies
// and we do not want to fail a reply just because aggregation failed, thus this
// is basically a best efforts kind of thing on purpose
func (a *Action) AggregateResult(result map[string]interface{}) error {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
for k, v := range result {
a.agg.aggregateItem(k, v)
}
return nil
}
// AggregateSummaryJSON produce a JSON representation of aggregate results for every output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryJSON() ([]byte, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.action.agg.resultJSON(), nil
}
// AggregateSummaryStrings produce a map of results for every output item that
// has a aggregate summary defined
func (a *Action) AggregateSummaryStrings() (map[string]map[string]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStrings(), nil
}
// AggregateSummaryFormattedStrings produce a formatted string for each output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryFormattedStrings() (map[string][]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStringsFormatted(), nil
}
// InputNames retrieves all valid input names
func (a *Action) InputNames() (names []string) {
names = []string{}
for k := range a.Input {
names = append(names, k)
}
sort.Strings(names)
return names
}
// OutputNames retrieves all valid output names
func (a *Action) OutputNames() (names []string) {
for k := range a.Output {
names = append(names, k)
}
sort.Strings(names)
return names
}
// SetOutputDefaults adds items to results that have defaults declared in the DDL but not found in the result
func (a *Action) SetOutputDefaults(results map[string]interface{}) {
for _, k := range a.OutputNames() {
_, ok := results[k]
if ok {
continue
}
if a.Output[k].Default != nil {
results[k] = a.Output[k].Default
}
}
}
// RequiresInput reports if an input is required
func (a *Action) RequiresInput(input string) bool {
i, ok := a.Input[input]
if !ok {
return false
}
return !i.Optional
}
// ValidateAndConvertToDDLTypes takes a map of strings like you might receive from the CLI, convert each
// item to the correct type according to the DDL type hints associated with inputs, validates its valid
// according to the DDL hints and returns a map of interface{} ready for conversion to JSON that would
// then have the correct types
func (a *Action) ValidateAndConvertToDDLTypes(args map[string]string) (result map[string]interface{}, warnings []string, err error) {
result = make(map[string]interface{})
warnings = []string{}
for k, v := range args {
kname := strings.ToLower(k)
input, ok := a.Input[kname]
if !ok {
// ruby rpc was forgiving about this, but its time really
return result, warnings, fmt.Errorf("input '%s' has not been declared", kname)
}
converted, err := ValToDDLType(input.Type, v)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
w, err := a.ValidateInputValue(kname, converted)
warnings = append(warnings, w...)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
result[kname] = converted
}
for _, iname := range a.InputNames() {
input := a.Input[iname]
_, ok := result[iname]
if !ok {
if !input.Optional && input.Default == nil {
return result, warnings, fmt.Errorf("input '%s' is required", iname)
}
if input.Default != nil {
result[iname] = input.Default
}
}
}
return result, warnings, nil
}
// ValidateRequestJSON receives request data in JSON format and validates it against the DDL
func (a *Action) ValidateRequestJSON(req json.RawMessage) (warnings []string, err error) {
reqdata := make(map[string]interface{})
err = json.Unmarshal(req, &reqdata)
if err != nil {
return []string{}, err
}
return a.ValidateRequestData(reqdata)
}
// ValidateRequestData validates request data against the DDL
func (a *Action) ValidateRequestData(data map[string]interface{}) (warnings []string, err error) {
validNames := a.InputNames()
// We currently ignore the process_results flag that may be set by the MCO RPC CLI
delete(data, "process_results")
for _, input := range validNames {
val, ok := data[input]
// didnt get a input but needs it
if !ok && a.RequiresInput(input) {
return []string{}, fmt.Errorf("input '%s' is required", input)
}
// didnt get a input and dont need it so nothing to do
if !ok {
continue
}
warnings, err = a.ValidateInputValue(input, val)
if err != nil {
return warnings, fmt.Errorf("validation failed for input '%s': %s", input, err)
}
}
if len(validNames) == 0 && len(data) > 0 {
return warnings, fmt.Errorf("request contains inputs while none are declared in the DDL")
}
for iname := range data {
matched := false
for _, vname := range validNames {
if vname == iname {
matched = true
continue
}
}
if matched {
continue
}
return warnings, fmt.Errorf("request contains an input '%s' that is not declared in the DDL. Valid inputs are: %s", iname, strings.Join(validNames, ", "))
}
return []string{}, err
}
// ValidateInputString attempts to convert a string to the correct type and validate it based on the DDL spec
func (a *Action) ValidateInputString(input string, val string) error {
i, ok := a.Input[input]
if !ok {
return fmt.Errorf("unknown input '%s'", input)
}
converted, err := ValToDDLType(i.Type, val)
if err != nil {
return err
}
_, err = a.ValidateInputValue(input, converted)
if err != nil {
return err
}
return nil
}
// ValidateInputValue validates the input matches requirements in the DDL
func (a *Action) ValidateInputValue(input string, val interface{}) (warnings []string, err error) {
warnings = []string{}
i, ok := a.Input[input]
if !ok {
return warnings, fmt.Errorf("unknown input '%s'", input)
}
switch strings.ToLower(i.Type) {
case "integer":
if !isAnyInt(val) {
return warnings, fmt.Errorf("is not an integer")
}
case "number":
if !isNumber(val) {
return warnings, fmt.Errorf("is not a number")
}
case "float":
if !isFloat64(val) {
return warnings, fmt.Errorf("is not a float")
}
case "string":
if !isString(val) {
return warnings, fmt.Errorf("is not a string")
}
if i.MaxLength == 0 {
return warnings, nil
}
sval := val.(string)
if len(sval) > i.MaxLength {
return warnings, fmt.Errorf("is longer than %d characters", i.MaxLength)
}
if i.Validation != "" {
w, err := validateStringValidation(i.Validation, sval)
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
}
case "boolean":
if !isBool(val) {
return warnings, fmt.Errorf("is not a boolean")
}
case "list":
if len(i.Enum) == 0 {
return warnings, fmt.Errorf("input type of list without a valid list of items in DDL")
}
valstr, ok := val.(string)
| {
output, ok := a.Output[o]
return output, ok
} | identifier_body |
|
action.go | .Output[o]
return output, ok
}
// DisplayMode is the configured display mode
func (a *Action) DisplayMode() string {
return a.Display
}
// AggregateResultJSON receives a JSON reply and aggregate all the data found in it
func (a *Action) AggregateResultJSON(jres []byte) error {
res := make(map[string]interface{})
err := json.Unmarshal(jres, &res)
if err != nil {
return fmt.Errorf("could not parse result as JSON data: %s", err)
}
return a.AggregateResult(res)
}
// AggregateResult receives a result and aggregate all the data found in it, most
// errors are squashed since aggregation are called during processing of replies
// and we do not want to fail a reply just because aggregation failed, thus this
// is basically a best efforts kind of thing on purpose
func (a *Action) AggregateResult(result map[string]interface{}) error {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
for k, v := range result {
a.agg.aggregateItem(k, v)
}
return nil
}
// AggregateSummaryJSON produce a JSON representation of aggregate results for every output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryJSON() ([]byte, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.action.agg.resultJSON(), nil
}
// AggregateSummaryStrings produce a map of results for every output item that
// has a aggregate summary defined
func (a *Action) AggregateSummaryStrings() (map[string]map[string]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStrings(), nil
}
// AggregateSummaryFormattedStrings produce a formatted string for each output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryFormattedStrings() (map[string][]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStringsFormatted(), nil
}
// InputNames retrieves all valid input names
func (a *Action) InputNames() (names []string) {
names = []string{}
for k := range a.Input {
names = append(names, k)
}
sort.Strings(names)
return names
}
// OutputNames retrieves all valid output names
func (a *Action) OutputNames() (names []string) {
for k := range a.Output {
names = append(names, k)
}
sort.Strings(names)
return names
}
// SetOutputDefaults adds items to results that have defaults declared in the DDL but not found in the result
func (a *Action) SetOutputDefaults(results map[string]interface{}) {
for _, k := range a.OutputNames() {
_, ok := results[k]
if ok {
continue
}
if a.Output[k].Default != nil {
results[k] = a.Output[k].Default
}
}
}
// RequiresInput reports if an input is required
func (a *Action) RequiresInput(input string) bool {
i, ok := a.Input[input]
if !ok {
return false
}
return !i.Optional
}
// ValidateAndConvertToDDLTypes takes a map of strings like you might receive from the CLI, convert each
// item to the correct type according to the DDL type hints associated with inputs, validates its valid
// according to the DDL hints and returns a map of interface{} ready for conversion to JSON that would
// then have the correct types
func (a *Action) ValidateAndConvertToDDLTypes(args map[string]string) (result map[string]interface{}, warnings []string, err error) {
result = make(map[string]interface{})
warnings = []string{}
for k, v := range args {
kname := strings.ToLower(k)
input, ok := a.Input[kname]
if !ok {
// ruby rpc was forgiving about this, but its time really
return result, warnings, fmt.Errorf("input '%s' has not been declared", kname) | }
converted, err := ValToDDLType(input.Type, v)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
w, err := a.ValidateInputValue(kname, converted)
warnings = append(warnings, w...)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
result[kname] = converted
}
for _, iname := range a.InputNames() {
input := a.Input[iname]
_, ok := result[iname]
if !ok {
if !input.Optional && input.Default == nil {
return result, warnings, fmt.Errorf("input '%s' is required", iname)
}
if input.Default != nil {
result[iname] = input.Default
}
}
}
return result, warnings, nil
}
// ValidateRequestJSON receives request data in JSON format and validates it against the DDL
func (a *Action) ValidateRequestJSON(req json.RawMessage) (warnings []string, err error) {
reqdata := make(map[string]interface{})
err = json.Unmarshal(req, &reqdata)
if err != nil {
return []string{}, err
}
return a.ValidateRequestData(reqdata)
}
// ValidateRequestData validates request data against the DDL
func (a *Action) ValidateRequestData(data map[string]interface{}) (warnings []string, err error) {
validNames := a.InputNames()
// We currently ignore the process_results flag that may be set by the MCO RPC CLI
delete(data, "process_results")
for _, input := range validNames {
val, ok := data[input]
// didnt get a input but needs it
if !ok && a.RequiresInput(input) {
return []string{}, fmt.Errorf("input '%s' is required", input)
}
// didnt get a input and dont need it so nothing to do
if !ok {
continue
}
warnings, err = a.ValidateInputValue(input, val)
if err != nil {
return warnings, fmt.Errorf("validation failed for input '%s': %s", input, err)
}
}
if len(validNames) == 0 && len(data) > 0 {
return warnings, fmt.Errorf("request contains inputs while none are declared in the DDL")
}
for iname := range data {
matched := false
for _, vname := range validNames {
if vname == iname {
matched = true
continue
}
}
if matched {
continue
}
return warnings, fmt.Errorf("request contains an input '%s' that is not declared in the DDL. Valid inputs are: %s", iname, strings.Join(validNames, ", "))
}
return []string{}, err
}
// ValidateInputString attempts to convert a string to the correct type and validate it based on the DDL spec
func (a *Action) ValidateInputString(input string, val string) error {
i, ok := a.Input[input]
if !ok {
return fmt.Errorf("unknown input '%s'", input)
}
converted, err := ValToDDLType(i.Type, val)
if err != nil {
return err
}
_, err = a.ValidateInputValue(input, converted)
if err != nil {
return err
}
return nil
}
// ValidateInputValue validates the input matches requirements in the DDL
func (a *Action) ValidateInputValue(input string, val interface{}) (warnings []string, err error) {
warnings = []string{}
i, ok := a.Input[input]
if !ok {
return warnings, fmt.Errorf("unknown input '%s'", input)
}
switch strings.ToLower(i.Type) {
case "integer":
if !isAnyInt(val) {
return warnings, fmt.Errorf("is not an integer")
}
case "number":
if !isNumber(val) {
return warnings, fmt.Errorf("is not a number")
}
case "float":
if !isFloat64(val) {
return warnings, fmt.Errorf("is not a float")
}
case "string":
if !isString(val) {
return warnings, fmt.Errorf("is not a string")
}
if i.MaxLength == 0 {
return warnings, nil
}
sval := val.(string)
if len(sval) > i.MaxLength {
return warnings, fmt.Errorf("is longer than %d characters", i.MaxLength)
}
if i.Validation != "" {
w, err := validateStringValidation(i.Validation, sval)
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
}
case "boolean":
if !isBool(val) {
return warnings, fmt.Errorf("is not a boolean")
}
case "list":
if len(i.Enum) == 0 {
return warnings, fmt.Errorf("input type of list without a valid list of items in DDL")
}
valstr, ok := val.(string)
if !ok {
return | random_line_split |
|
action.go | .Output[o]
return output, ok
}
// DisplayMode is the configured display mode
func (a *Action) DisplayMode() string {
return a.Display
}
// AggregateResultJSON receives a JSON reply and aggregate all the data found in it
func (a *Action) AggregateResultJSON(jres []byte) error {
res := make(map[string]interface{})
err := json.Unmarshal(jres, &res)
if err != nil {
return fmt.Errorf("could not parse result as JSON data: %s", err)
}
return a.AggregateResult(res)
}
// AggregateResult receives a result and aggregate all the data found in it, most
// errors are squashed since aggregation are called during processing of replies
// and we do not want to fail a reply just because aggregation failed, thus this
// is basically a best efforts kind of thing on purpose
func (a *Action) AggregateResult(result map[string]interface{}) error {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
for k, v := range result {
a.agg.aggregateItem(k, v)
}
return nil
}
// AggregateSummaryJSON produce a JSON representation of aggregate results for every output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryJSON() ([]byte, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.action.agg.resultJSON(), nil
}
// AggregateSummaryStrings produce a map of results for every output item that
// has a aggregate summary defined
func (a *Action) AggregateSummaryStrings() (map[string]map[string]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStrings(), nil
}
// AggregateSummaryFormattedStrings produce a formatted string for each output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryFormattedStrings() (map[string][]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStringsFormatted(), nil
}
// InputNames retrieves all valid input names
func (a *Action) InputNames() (names []string) {
names = []string{}
for k := range a.Input {
names = append(names, k)
}
sort.Strings(names)
return names
}
// OutputNames retrieves all valid output names
func (a *Action) OutputNames() (names []string) {
for k := range a.Output {
names = append(names, k)
}
sort.Strings(names)
return names
}
// SetOutputDefaults adds items to results that have defaults declared in the DDL but not found in the result
func (a *Action) SetOutputDefaults(results map[string]interface{}) {
for _, k := range a.OutputNames() {
_, ok := results[k]
if ok {
continue
}
if a.Output[k].Default != nil {
results[k] = a.Output[k].Default
}
}
}
// RequiresInput reports if an input is required
func (a *Action) RequiresInput(input string) bool {
i, ok := a.Input[input]
if !ok {
return false
}
return !i.Optional
}
// ValidateAndConvertToDDLTypes takes a map of strings like you might receive from the CLI, convert each
// item to the correct type according to the DDL type hints associated with inputs, validates its valid
// according to the DDL hints and returns a map of interface{} ready for conversion to JSON that would
// then have the correct types
func (a *Action) ValidateAndConvertToDDLTypes(args map[string]string) (result map[string]interface{}, warnings []string, err error) {
result = make(map[string]interface{})
warnings = []string{}
for k, v := range args {
kname := strings.ToLower(k)
input, ok := a.Input[kname]
if !ok {
// ruby rpc was forgiving about this, but its time really
return result, warnings, fmt.Errorf("input '%s' has not been declared", kname)
}
converted, err := ValToDDLType(input.Type, v)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
w, err := a.ValidateInputValue(kname, converted)
warnings = append(warnings, w...)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
result[kname] = converted
}
for _, iname := range a.InputNames() {
input := a.Input[iname]
_, ok := result[iname]
if !ok {
if !input.Optional && input.Default == nil {
return result, warnings, fmt.Errorf("input '%s' is required", iname)
}
if input.Default != nil |
}
}
return result, warnings, nil
}
// ValidateRequestJSON receives request data in JSON format and validates it against the DDL
func (a *Action) ValidateRequestJSON(req json.RawMessage) (warnings []string, err error) {
reqdata := make(map[string]interface{})
err = json.Unmarshal(req, &reqdata)
if err != nil {
return []string{}, err
}
return a.ValidateRequestData(reqdata)
}
// ValidateRequestData validates request data against the DDL
func (a *Action) ValidateRequestData(data map[string]interface{}) (warnings []string, err error) {
validNames := a.InputNames()
// We currently ignore the process_results flag that may be set by the MCO RPC CLI
delete(data, "process_results")
for _, input := range validNames {
val, ok := data[input]
// didnt get a input but needs it
if !ok && a.RequiresInput(input) {
return []string{}, fmt.Errorf("input '%s' is required", input)
}
// didnt get a input and dont need it so nothing to do
if !ok {
continue
}
warnings, err = a.ValidateInputValue(input, val)
if err != nil {
return warnings, fmt.Errorf("validation failed for input '%s': %s", input, err)
}
}
if len(validNames) == 0 && len(data) > 0 {
return warnings, fmt.Errorf("request contains inputs while none are declared in the DDL")
}
for iname := range data {
matched := false
for _, vname := range validNames {
if vname == iname {
matched = true
continue
}
}
if matched {
continue
}
return warnings, fmt.Errorf("request contains an input '%s' that is not declared in the DDL. Valid inputs are: %s", iname, strings.Join(validNames, ", "))
}
return []string{}, err
}
// ValidateInputString attempts to convert a string to the correct type and validate it based on the DDL spec
func (a *Action) ValidateInputString(input string, val string) error {
i, ok := a.Input[input]
if !ok {
return fmt.Errorf("unknown input '%s'", input)
}
converted, err := ValToDDLType(i.Type, val)
if err != nil {
return err
}
_, err = a.ValidateInputValue(input, converted)
if err != nil {
return err
}
return nil
}
// ValidateInputValue validates the input matches requirements in the DDL
func (a *Action) ValidateInputValue(input string, val interface{}) (warnings []string, err error) {
warnings = []string{}
i, ok := a.Input[input]
if !ok {
return warnings, fmt.Errorf("unknown input '%s'", input)
}
switch strings.ToLower(i.Type) {
case "integer":
if !isAnyInt(val) {
return warnings, fmt.Errorf("is not an integer")
}
case "number":
if !isNumber(val) {
return warnings, fmt.Errorf("is not a number")
}
case "float":
if !isFloat64(val) {
return warnings, fmt.Errorf("is not a float")
}
case "string":
if !isString(val) {
return warnings, fmt.Errorf("is not a string")
}
if i.MaxLength == 0 {
return warnings, nil
}
sval := val.(string)
if len(sval) > i.MaxLength {
return warnings, fmt.Errorf("is longer than %d characters", i.MaxLength)
}
if i.Validation != "" {
w, err := validateStringValidation(i.Validation, sval)
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
}
case "boolean":
if !isBool(val) {
return warnings, fmt.Errorf("is not a boolean")
}
case "list":
if len(i.Enum) == 0 {
return warnings, fmt.Errorf("input type of list without a valid list of items in DDL")
}
valstr, ok := val.(string)
if !ok {
| {
result[iname] = input.Default
} | conditional_block |
action.go | .Output[o]
return output, ok
}
// DisplayMode is the configured display mode
func (a *Action) DisplayMode() string {
return a.Display
}
// AggregateResultJSON receives a JSON reply and aggregate all the data found in it
func (a *Action) AggregateResultJSON(jres []byte) error {
res := make(map[string]interface{})
err := json.Unmarshal(jres, &res)
if err != nil {
return fmt.Errorf("could not parse result as JSON data: %s", err)
}
return a.AggregateResult(res)
}
// AggregateResult receives a result and aggregate all the data found in it, most
// errors are squashed since aggregation are called during processing of replies
// and we do not want to fail a reply just because aggregation failed, thus this
// is basically a best efforts kind of thing on purpose
func (a *Action) AggregateResult(result map[string]interface{}) error {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
for k, v := range result {
a.agg.aggregateItem(k, v)
}
return nil
}
// AggregateSummaryJSON produce a JSON representation of aggregate results for every output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryJSON() ([]byte, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.action.agg.resultJSON(), nil
}
// AggregateSummaryStrings produce a map of results for every output item that
// has a aggregate summary defined
func (a *Action) AggregateSummaryStrings() (map[string]map[string]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStrings(), nil
}
// AggregateSummaryFormattedStrings produce a formatted string for each output
// item that has a aggregate summary defined
func (a *Action) AggregateSummaryFormattedStrings() (map[string][]string, error) {
a.Lock()
defer a.Unlock()
if a.agg == nil {
a.agg = newActionAggregators(a)
}
return a.agg.resultStringsFormatted(), nil
}
// InputNames retrieves all valid input names
func (a *Action) InputNames() (names []string) {
names = []string{}
for k := range a.Input {
names = append(names, k)
}
sort.Strings(names)
return names
}
// OutputNames retrieves all valid output names
func (a *Action) OutputNames() (names []string) {
for k := range a.Output {
names = append(names, k)
}
sort.Strings(names)
return names
}
// SetOutputDefaults adds items to results that have defaults declared in the DDL but not found in the result
func (a *Action) | (results map[string]interface{}) {
for _, k := range a.OutputNames() {
_, ok := results[k]
if ok {
continue
}
if a.Output[k].Default != nil {
results[k] = a.Output[k].Default
}
}
}
// RequiresInput reports if an input is required
func (a *Action) RequiresInput(input string) bool {
i, ok := a.Input[input]
if !ok {
return false
}
return !i.Optional
}
// ValidateAndConvertToDDLTypes takes a map of strings like you might receive from the CLI, convert each
// item to the correct type according to the DDL type hints associated with inputs, validates its valid
// according to the DDL hints and returns a map of interface{} ready for conversion to JSON that would
// then have the correct types
func (a *Action) ValidateAndConvertToDDLTypes(args map[string]string) (result map[string]interface{}, warnings []string, err error) {
result = make(map[string]interface{})
warnings = []string{}
for k, v := range args {
kname := strings.ToLower(k)
input, ok := a.Input[kname]
if !ok {
// ruby rpc was forgiving about this, but its time really
return result, warnings, fmt.Errorf("input '%s' has not been declared", kname)
}
converted, err := ValToDDLType(input.Type, v)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
w, err := a.ValidateInputValue(kname, converted)
warnings = append(warnings, w...)
if err != nil {
return result, warnings, fmt.Errorf("invalid value for '%s': %s", kname, err)
}
result[kname] = converted
}
for _, iname := range a.InputNames() {
input := a.Input[iname]
_, ok := result[iname]
if !ok {
if !input.Optional && input.Default == nil {
return result, warnings, fmt.Errorf("input '%s' is required", iname)
}
if input.Default != nil {
result[iname] = input.Default
}
}
}
return result, warnings, nil
}
// ValidateRequestJSON receives request data in JSON format and validates it against the DDL
func (a *Action) ValidateRequestJSON(req json.RawMessage) (warnings []string, err error) {
reqdata := make(map[string]interface{})
err = json.Unmarshal(req, &reqdata)
if err != nil {
return []string{}, err
}
return a.ValidateRequestData(reqdata)
}
// ValidateRequestData validates request data against the DDL
func (a *Action) ValidateRequestData(data map[string]interface{}) (warnings []string, err error) {
validNames := a.InputNames()
// We currently ignore the process_results flag that may be set by the MCO RPC CLI
delete(data, "process_results")
for _, input := range validNames {
val, ok := data[input]
// didnt get a input but needs it
if !ok && a.RequiresInput(input) {
return []string{}, fmt.Errorf("input '%s' is required", input)
}
// didnt get a input and dont need it so nothing to do
if !ok {
continue
}
warnings, err = a.ValidateInputValue(input, val)
if err != nil {
return warnings, fmt.Errorf("validation failed for input '%s': %s", input, err)
}
}
if len(validNames) == 0 && len(data) > 0 {
return warnings, fmt.Errorf("request contains inputs while none are declared in the DDL")
}
for iname := range data {
matched := false
for _, vname := range validNames {
if vname == iname {
matched = true
continue
}
}
if matched {
continue
}
return warnings, fmt.Errorf("request contains an input '%s' that is not declared in the DDL. Valid inputs are: %s", iname, strings.Join(validNames, ", "))
}
return []string{}, err
}
// ValidateInputString attempts to convert a string to the correct type and validate it based on the DDL spec
func (a *Action) ValidateInputString(input string, val string) error {
i, ok := a.Input[input]
if !ok {
return fmt.Errorf("unknown input '%s'", input)
}
converted, err := ValToDDLType(i.Type, val)
if err != nil {
return err
}
_, err = a.ValidateInputValue(input, converted)
if err != nil {
return err
}
return nil
}
// ValidateInputValue validates the input matches requirements in the DDL
func (a *Action) ValidateInputValue(input string, val interface{}) (warnings []string, err error) {
warnings = []string{}
i, ok := a.Input[input]
if !ok {
return warnings, fmt.Errorf("unknown input '%s'", input)
}
switch strings.ToLower(i.Type) {
case "integer":
if !isAnyInt(val) {
return warnings, fmt.Errorf("is not an integer")
}
case "number":
if !isNumber(val) {
return warnings, fmt.Errorf("is not a number")
}
case "float":
if !isFloat64(val) {
return warnings, fmt.Errorf("is not a float")
}
case "string":
if !isString(val) {
return warnings, fmt.Errorf("is not a string")
}
if i.MaxLength == 0 {
return warnings, nil
}
sval := val.(string)
if len(sval) > i.MaxLength {
return warnings, fmt.Errorf("is longer than %d characters", i.MaxLength)
}
if i.Validation != "" {
w, err := validateStringValidation(i.Validation, sval)
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
}
case "boolean":
if !isBool(val) {
return warnings, fmt.Errorf("is not a boolean")
}
case "list":
if len(i.Enum) == 0 {
return warnings, fmt.Errorf("input type of list without a valid list of items in DDL")
}
valstr, ok := val.(string)
if !ok {
| SetOutputDefaults | identifier_name |
bg.js | urls.push(url);
sample_urls.push(sampUrl);
urls_downloaded.push(false);
}
// called by some of the context menu options.
function addToList(info, tab) {
var url;
var type;
if(info.menuItemId == con1) // image
{
url = info.srcUrl;
listPush(url);
}
}
function addFromThumbnails(info, tab) {
// console.log(info);
// send message to content script
chrome.tabs.sendMessage(tab.id,
{"pageUrl": info.pageUrl,
"query": "imagesOfThumbnails",
"tabId": tab.id},
function(response) {
if(response == null || response.arr == null)
return;
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i], response.sampArr[i]);
}
});
}
// Tries to display the list in an alert to the user.
// alert has limits on what it can display, however.
function viewList(info, tab) {
var aler = "";
for(var i=0; i<urls.length; i++)
aler = aler + "\n" + urls[i];
alert(aler);
}
var cleanDownloadFlag = false;
var scanningFlag = false;
var scanningSize = 0;
// toggles the scanning feature. Scanning is performed in content.js in load() if scanningFlag is set to <true>.
function toggleScanning() {
if(scanningFlag)
scanningFlag = false;
else
{
scanningSize = prompt("min size of images in px?");
if(scanningSize != null && scanningSize >= 0)
scanningFlag = true;
}
return scanningFlag;
}
// returns an array of all urls of images above a size
// in current window, after adding them to the url list
function captureImages(callback) {
var captureSize = prompt("min size of images in px?");
var img = {};
img.urls = [];
if(captureSize != null && captureSize >= 0)
{
withTabsInCurrentWindow(function(tabArray) {
var recvd = 0;
for(var i = 0; i < tabArray.length; i++)
{
var tabId = tabArray[i].id;
chrome.tabs.sendMessage(tabId,
{"query": "urlsOfPageImagesGivenSize", "dimension": captureSize, "tabId": tabId },
function(response) {
recvd++;
if(response != null && response.arr != null && response.arr.length != 0)
{
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i]);
img.urls.push(response.arr[i]);
}
}
// console.log(recvd + "/" + tabArray.length);
if(recvd == tabArray.length) // finished processing all tabs
{
// console.log(callback);
// console.log(img);
callback(img);
}
});
}
});
}
}
// add all to list
function addEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
listPush(these_urls[i]);
}
// used to keep track of downloading items. All items on this are canceled if stop downloads button is hit
var dlItems = [];
// downloads the list stored in this script.
function downloadList(info, tab) {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(urls, 0);
}
// same as above, but with the parameter as the list.
function downloadEnclosed(these_urls) {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(these_urls, 0);
}
// Boolean flag that lets user stop downloads (communicates to inside the callback in downloadHelper)
var stopDownloadsFlag = false;
function | () {
stopDownloadsFlag = true;
for(var i = dlItems.length-1 ; i >= 0 ; i-- )
{
if(dlItems[i].state != "complete")
chrome.downloads.cancel(dlItems[i]);
}
}
// recursive helper. Downloads til the end, but does 1 at a time. Uses dlItems array to help if downloads
// need to be canceled.
function downloadHelper(dlist, filesDownloaded) {
if(filesDownloaded == dlist.length)
return;
else
{
var dlurl = dlist[filesDownloaded];
chrome.downloads.download({"url": dlurl,
conflictAction : "uniquify"},
function (dId) {
if(stopDownloadsFlag)
chrome.downloads.cancel(dId);
else
{
if(dId == undefined) // if download fails, don't add to list of "successfully downloaded"
{
// maybe keep track of these, so we can download from <img>-made blobs
console.log("download failed: " + dlurl);
}
else
{
dlItems.push(dId);
}
downloadHelper(dlist, filesDownloaded+1)
}
}
);
}
}
chrome.downloads.onChanged.addListener(function (downloadDelta) {
var id = downloadDelta.id;
if (downloadDelta.state != null)
{
var state = downloadDelta.state.current;
if(state == "complete" || state == "interrupted") {
var isSuccessful = (state == "complete" ? true : false);
chrome.downloads.search({"id" : id}, function(itemArray) { // get download url
if(itemArray.length == 1) { // sanity
var url = itemArray[0].url;
var index = urls.indexOf(url);
urls_downloaded[index] = isSuccessful; // update own record of download
// tell Popup that that download finished.
chrome.runtime.sendMessage({"query" : "downloadEnded",
"success" : isSuccessful,
"url": url});
// inserted here because it needs to be done AFTER the item is looked at for url
if(cleanDownloadFlag)
chrome.downloads.erase({"id" : id});
}
});
}
}
});
chrome.downloads.onDeterminingFilename.addListener(function(item, suggest) {
// for this chrome extension, so fix the file extension
if(item.byExtensionId != null && item.byExtensionId == chrome.runtime.id)
{
var filename = "Image Saver/" + item.filename;
var ext = "";
switch(item.mime){ // fix mime types
case "image/png":
ext = ".png";
break;
case "image/jpeg":
case "image/jpg":
ext = ".jpg";
break;
case "image/gif":
ext = ".gif";
break;
default:
break;
}
if(ext.length != 0) // if one of the above, replace extension.
filename = filename.substring(0, filename.lastIndexOf(".")) + ext;
suggest({"filename": filename, conflictAction: "uniquify"});
}
else
suggest(); // not for this extension, so ignore it
});
function clearList() {
urls = [];
sample_urls = [];
urls_downloaded = [];
}
// add all to list
function clearEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
{
var index = urls.indexOf(these_urls[i]);
urls.splice(index, 1);
sample_urls.splice(index, 1);
urls_downloaded.splice(index, 1);
}
}
function inCurrentTab(callback) {
chrome.tabs.query( {"active":true, "currentWindow":true}, function(tabArr){
var tab = tabArr[0];
callback(tabArr[0]);
});
}
function withTabsInCurrentWindow(callback) {
chrome.tabs.query( {"currentWindow":true}, function(tabArr){
callback(tabArr);
});
}
// receives messages from other scripts.
chrome.runtime.onMessage.addListener(
function(request, sender, sendResponse) {
switch(request.query)
{
case "urlList":
sendResponse({ "urls" : urls,
"samples" : sample_urls,
"download_status" : urls_downloaded,
"scanning" : scanningFlag,
"cleanDL" : cleanDownloadFlag});
break;
case "clearList":
clearList();
break;
case "clearEnclosed":
clearEnclosed(request.urls);
break;
case "stopDownloads":
stopDownloads();
break;
case "scanningFlagAndTabId":
sendResponse({ "scanning" : scanningFlag, "scanSize" : scanningSize, "tabId": sender.tab.id });
break;
case "downloadEnclosed":
if(request.urls != null)
downloadEnclosed(request.urls);
break;
case "addEnclosed":
addEnclosed(request.arr);
break;
case "toggleScanning":
sendResponse({ "result" : toggleScanning() });
break;
case "toggleCleanDownload":
cleanDownloadFlag = !cleanDownloadFlag;
break;
case "captureImages":
captureImages(sendResponse);
return true;
break;
default:
break;
}
}
);
// listens for hotkeys
chrome.commands.onCommand.addListener(function (command) {
switch(command){
case 'saveImage':
inCurrentTab(function(tab){
chrome.tabs.sendMessage(tab.id, {"query": "urlsOfPageImages", "tabId": tab.id},
function(response) {
if(response.error === " | stopDownloads | identifier_name |
bg.js | .push(url);
sample_urls.push(sampUrl);
urls_downloaded.push(false);
}
// called by some of the context menu options.
function addToList(info, tab) {
var url;
var type;
if(info.menuItemId == con1) // image
{
url = info.srcUrl;
listPush(url);
}
}
function addFromThumbnails(info, tab) {
// console.log(info);
// send message to content script
chrome.tabs.sendMessage(tab.id,
{"pageUrl": info.pageUrl,
"query": "imagesOfThumbnails",
"tabId": tab.id},
function(response) {
if(response == null || response.arr == null)
return;
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i], response.sampArr[i]);
}
});
}
// Tries to display the list in an alert to the user.
// alert has limits on what it can display, however.
function viewList(info, tab) {
var aler = "";
for(var i=0; i<urls.length; i++)
aler = aler + "\n" + urls[i];
alert(aler);
}
var cleanDownloadFlag = false;
var scanningFlag = false;
var scanningSize = 0;
// toggles the scanning feature. Scanning is performed in content.js in load() if scanningFlag is set to <true>.
function toggleScanning() {
if(scanningFlag)
scanningFlag = false;
else
{
scanningSize = prompt("min size of images in px?");
if(scanningSize != null && scanningSize >= 0)
scanningFlag = true;
}
return scanningFlag;
}
// returns an array of all urls of images above a size
// in current window, after adding them to the url list
function captureImages(callback) {
var captureSize = prompt("min size of images in px?");
var img = {};
img.urls = [];
if(captureSize != null && captureSize >= 0)
{
withTabsInCurrentWindow(function(tabArray) {
var recvd = 0;
for(var i = 0; i < tabArray.length; i++)
{
var tabId = tabArray[i].id;
chrome.tabs.sendMessage(tabId,
{"query": "urlsOfPageImagesGivenSize", "dimension": captureSize, "tabId": tabId },
function(response) {
recvd++;
if(response != null && response.arr != null && response.arr.length != 0)
{
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i]);
img.urls.push(response.arr[i]);
}
}
// console.log(recvd + "/" + tabArray.length);
if(recvd == tabArray.length) // finished processing all tabs
{
// console.log(callback);
// console.log(img);
callback(img);
}
});
}
});
}
}
// add all to list
function addEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
listPush(these_urls[i]);
}
// used to keep track of downloading items. All items on this are canceled if stop downloads button is hit
var dlItems = [];
// downloads the list stored in this script.
function downloadList(info, tab) |
// same as above, but with the parameter as the list.
function downloadEnclosed(these_urls) {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(these_urls, 0);
}
// Boolean flag that lets user stop downloads (communicates to inside the callback in downloadHelper)
var stopDownloadsFlag = false;
function stopDownloads() {
stopDownloadsFlag = true;
for(var i = dlItems.length-1 ; i >= 0 ; i-- )
{
if(dlItems[i].state != "complete")
chrome.downloads.cancel(dlItems[i]);
}
}
// recursive helper. Downloads til the end, but does 1 at a time. Uses dlItems array to help if downloads
// need to be canceled.
function downloadHelper(dlist, filesDownloaded) {
if(filesDownloaded == dlist.length)
return;
else
{
var dlurl = dlist[filesDownloaded];
chrome.downloads.download({"url": dlurl,
conflictAction : "uniquify"},
function (dId) {
if(stopDownloadsFlag)
chrome.downloads.cancel(dId);
else
{
if(dId == undefined) // if download fails, don't add to list of "successfully downloaded"
{
// maybe keep track of these, so we can download from <img>-made blobs
console.log("download failed: " + dlurl);
}
else
{
dlItems.push(dId);
}
downloadHelper(dlist, filesDownloaded+1)
}
}
);
}
}
chrome.downloads.onChanged.addListener(function (downloadDelta) {
var id = downloadDelta.id;
if (downloadDelta.state != null)
{
var state = downloadDelta.state.current;
if(state == "complete" || state == "interrupted") {
var isSuccessful = (state == "complete" ? true : false);
chrome.downloads.search({"id" : id}, function(itemArray) { // get download url
if(itemArray.length == 1) { // sanity
var url = itemArray[0].url;
var index = urls.indexOf(url);
urls_downloaded[index] = isSuccessful; // update own record of download
// tell Popup that that download finished.
chrome.runtime.sendMessage({"query" : "downloadEnded",
"success" : isSuccessful,
"url": url});
// inserted here because it needs to be done AFTER the item is looked at for url
if(cleanDownloadFlag)
chrome.downloads.erase({"id" : id});
}
});
}
}
});
chrome.downloads.onDeterminingFilename.addListener(function(item, suggest) {
// for this chrome extension, so fix the file extension
if(item.byExtensionId != null && item.byExtensionId == chrome.runtime.id)
{
var filename = "Image Saver/" + item.filename;
var ext = "";
switch(item.mime){ // fix mime types
case "image/png":
ext = ".png";
break;
case "image/jpeg":
case "image/jpg":
ext = ".jpg";
break;
case "image/gif":
ext = ".gif";
break;
default:
break;
}
if(ext.length != 0) // if one of the above, replace extension.
filename = filename.substring(0, filename.lastIndexOf(".")) + ext;
suggest({"filename": filename, conflictAction: "uniquify"});
}
else
suggest(); // not for this extension, so ignore it
});
function clearList() {
urls = [];
sample_urls = [];
urls_downloaded = [];
}
// add all to list
function clearEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
{
var index = urls.indexOf(these_urls[i]);
urls.splice(index, 1);
sample_urls.splice(index, 1);
urls_downloaded.splice(index, 1);
}
}
function inCurrentTab(callback) {
chrome.tabs.query( {"active":true, "currentWindow":true}, function(tabArr){
var tab = tabArr[0];
callback(tabArr[0]);
});
}
function withTabsInCurrentWindow(callback) {
chrome.tabs.query( {"currentWindow":true}, function(tabArr){
callback(tabArr);
});
}
// receives messages from other scripts.
chrome.runtime.onMessage.addListener(
function(request, sender, sendResponse) {
switch(request.query)
{
case "urlList":
sendResponse({ "urls" : urls,
"samples" : sample_urls,
"download_status" : urls_downloaded,
"scanning" : scanningFlag,
"cleanDL" : cleanDownloadFlag});
break;
case "clearList":
clearList();
break;
case "clearEnclosed":
clearEnclosed(request.urls);
break;
case "stopDownloads":
stopDownloads();
break;
case "scanningFlagAndTabId":
sendResponse({ "scanning" : scanningFlag, "scanSize" : scanningSize, "tabId": sender.tab.id });
break;
case "downloadEnclosed":
if(request.urls != null)
downloadEnclosed(request.urls);
break;
case "addEnclosed":
addEnclosed(request.arr);
break;
case "toggleScanning":
sendResponse({ "result" : toggleScanning() });
break;
case "toggleCleanDownload":
cleanDownloadFlag = !cleanDownloadFlag;
break;
case "captureImages":
captureImages(sendResponse);
return true;
break;
default:
break;
}
}
);
// listens for hotkeys
chrome.commands.onCommand.addListener(function (command) {
switch(command){
case 'saveImage':
inCurrentTab(function(tab){
chrome.tabs.sendMessage(tab.id, {"query": "urlsOfPageImages", "tabId": tab.id},
function(response) {
if(response.error === " | {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(urls, 0);
} | identifier_body |
bg.js | urls.push(url);
sample_urls.push(sampUrl);
urls_downloaded.push(false);
}
// called by some of the context menu options.
function addToList(info, tab) {
var url;
var type;
if(info.menuItemId == con1) // image
{
url = info.srcUrl;
listPush(url);
}
}
function addFromThumbnails(info, tab) {
// console.log(info);
// send message to content script
chrome.tabs.sendMessage(tab.id,
{"pageUrl": info.pageUrl,
"query": "imagesOfThumbnails",
"tabId": tab.id},
function(response) {
if(response == null || response.arr == null)
return;
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i], response.sampArr[i]);
}
});
}
// Tries to display the list in an alert to the user.
// alert has limits on what it can display, however.
function viewList(info, tab) {
var aler = "";
for(var i=0; i<urls.length; i++)
aler = aler + "\n" + urls[i];
alert(aler);
}
var cleanDownloadFlag = false;
var scanningFlag = false;
var scanningSize = 0;
// toggles the scanning feature. Scanning is performed in content.js in load() if scanningFlag is set to <true>.
function toggleScanning() {
if(scanningFlag)
scanningFlag = false;
else
{
scanningSize = prompt("min size of images in px?");
if(scanningSize != null && scanningSize >= 0)
scanningFlag = true;
}
return scanningFlag;
}
// returns an array of all urls of images above a size
// in current window, after adding them to the url list
function captureImages(callback) {
var captureSize = prompt("min size of images in px?");
var img = {};
img.urls = [];
if(captureSize != null && captureSize >= 0)
{
withTabsInCurrentWindow(function(tabArray) {
var recvd = 0;
for(var i = 0; i < tabArray.length; i++)
{
var tabId = tabArray[i].id;
chrome.tabs.sendMessage(tabId,
{"query": "urlsOfPageImagesGivenSize", "dimension": captureSize, "tabId": tabId },
function(response) {
recvd++;
if(response != null && response.arr != null && response.arr.length != 0)
{
for(var i = 0; i < response.arr.length; i++)
{
listPush(response.arr[i]);
img.urls.push(response.arr[i]);
}
}
// console.log(recvd + "/" + tabArray.length);
if(recvd == tabArray.length) // finished processing all tabs
{
// console.log(callback);
// console.log(img);
callback(img);
}
});
}
});
}
}
// add all to list
function addEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
listPush(these_urls[i]);
}
// used to keep track of downloading items. All items on this are canceled if stop downloads button is hit
var dlItems = [];
// downloads the list stored in this script.
function downloadList(info, tab) {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(urls, 0);
}
// same as above, but with the parameter as the list.
function downloadEnclosed(these_urls) {
stopDownloadsFlag = false;
dlItems.length = 0;
downloadHelper(these_urls, 0);
}
// Boolean flag that lets user stop downloads (communicates to inside the callback in downloadHelper)
var stopDownloadsFlag = false;
function stopDownloads() {
stopDownloadsFlag = true;
for(var i = dlItems.length-1 ; i >= 0 ; i-- )
{
if(dlItems[i].state != "complete")
chrome.downloads.cancel(dlItems[i]);
}
}
// recursive helper. Downloads til the end, but does 1 at a time. Uses dlItems array to help if downloads
// need to be canceled.
function downloadHelper(dlist, filesDownloaded) {
if(filesDownloaded == dlist.length)
return;
else
{
var dlurl = dlist[filesDownloaded];
chrome.downloads.download({"url": dlurl,
conflictAction : "uniquify"},
function (dId) {
if(stopDownloadsFlag)
chrome.downloads.cancel(dId);
else
{
if(dId == undefined) // if download fails, don't add to list of "successfully downloaded"
{
// maybe keep track of these, so we can download from <img>-made blobs
console.log("download failed: " + dlurl);
}
else
{
dlItems.push(dId);
}
downloadHelper(dlist, filesDownloaded+1)
}
}
);
}
}
chrome.downloads.onChanged.addListener(function (downloadDelta) {
var id = downloadDelta.id;
if (downloadDelta.state != null)
{
var state = downloadDelta.state.current;
if(state == "complete" || state == "interrupted") {
var isSuccessful = (state == "complete" ? true : false);
chrome.downloads.search({"id" : id}, function(itemArray) { // get download url
if(itemArray.length == 1) { // sanity
var url = itemArray[0].url;
var index = urls.indexOf(url);
urls_downloaded[index] = isSuccessful; // update own record of download
// tell Popup that that download finished.
chrome.runtime.sendMessage({"query" : "downloadEnded",
"success" : isSuccessful,
"url": url});
// inserted here because it needs to be done AFTER the item is looked at for url
if(cleanDownloadFlag)
chrome.downloads.erase({"id" : id});
}
});
}
}
});
chrome.downloads.onDeterminingFilename.addListener(function(item, suggest) {
// for this chrome extension, so fix the file extension
if(item.byExtensionId != null && item.byExtensionId == chrome.runtime.id)
{
var filename = "Image Saver/" + item.filename;
var ext = "";
switch(item.mime){ // fix mime types
case "image/png":
ext = ".png";
break;
case "image/jpeg":
case "image/jpg":
ext = ".jpg";
break;
case "image/gif":
ext = ".gif";
break;
default:
break;
}
if(ext.length != 0) // if one of the above, replace extension.
filename = filename.substring(0, filename.lastIndexOf(".")) + ext;
suggest({"filename": filename, conflictAction: "uniquify"});
}
else
suggest(); // not for this extension, so ignore it
});
function clearList() {
urls = [];
sample_urls = [];
urls_downloaded = [];
}
// add all to list
function clearEnclosed(these_urls) {
for(var i = 0; i < these_urls.length; i++)
{
var index = urls.indexOf(these_urls[i]);
urls.splice(index, 1);
sample_urls.splice(index, 1);
urls_downloaded.splice(index, 1);
}
}
function inCurrentTab(callback) {
chrome.tabs.query( {"active":true, "currentWindow":true}, function(tabArr){
var tab = tabArr[0];
callback(tabArr[0]);
});
}
function withTabsInCurrentWindow(callback) {
chrome.tabs.query( {"currentWindow":true}, function(tabArr){
callback(tabArr);
});
}
// receives messages from other scripts.
chrome.runtime.onMessage.addListener(
function(request, sender, sendResponse) {
switch(request.query)
{
case "urlList":
sendResponse({ "urls" : urls,
"samples" : sample_urls,
"download_status" : urls_downloaded,
"scanning" : scanningFlag,
"cleanDL" : cleanDownloadFlag});
break;
case "clearList":
clearList();
break;
case "clearEnclosed":
clearEnclosed(request.urls);
break;
case "stopDownloads":
stopDownloads();
break;
case "scanningFlagAndTabId":
sendResponse({ "scanning" : scanningFlag, "scanSize" : scanningSize, "tabId": sender.tab.id });
break;
case "downloadEnclosed":
if(request.urls != null)
downloadEnclosed(request.urls);
break;
case "addEnclosed":
addEnclosed(request.arr);
break;
case "toggleScanning":
sendResponse({ "result" : toggleScanning() });
break;
case "toggleCleanDownload":
cleanDownloadFlag = !cleanDownloadFlag;
break;
case "captureImages":
captureImages(sendResponse);
return true;
break;
default:
break;
}
}
);
// listens for hotkeys
chrome.commands.onCommand.addListener(function (command) {
switch(command){
case 'saveImage':
inCurrentTab(function(tab){
chrome.tabs.sendMessage(tab.id, {"query": "urlsOfPageImages", "tabId": tab.id},
function(response) {
if(response.error === " | random_line_split |
||
panel.go | p.thirdIndex)
if dateLen == 0 || !date.Before(p.dates[dateLen-1]) {
p.dates = append(p.dates, date)
p.values = append(p.values, value)
} else {
insertP := sort.Search(dateLen, func(i int) bool {
return p.dates[i].After(date)
})
newDates := make([]time.Time, dateLen+1)
newValues := make([][][]float64, dateLen+1)
newDfs := make([]DataFrame, dateLen+1)
copy(newDates, p.dates[:insertP])
copy(newValues, p.values[:insertP])
newDates[insertP] = date
newValues[insertP] = value
newDfs[insertP] = *df
copy(newDates[insertP+1:], p.dates[insertP:])
copy(newValues[insertP+1:], p.values[insertP:])
p.dates = newDates
p.values = newValues
}
}
func (p *TimePanel) IAddDataFrame(date time.Time, df *DataFrame) {
p.AddMat(date, df.values)
}
// SecondaryLeftReplace 替换Panel Secondary 索引中部分列的值, 时间索引以本地为主, 发生替换时,数值取新数值中该时间之前最接近时间点的数值
// 该 API 目前并不是经济的, 时间开销并没有进行优化
// 如果目标的第三索引与本地不相同,则不进行处理
func (p *TimePanel) SecondaryLeftReplace(target TimePanelRO) {
if p.Length() == 0 {
return
}
tThird := target.Thirdly()
if p.thirdIndex.Length() != tThird.Length() {
return
}
for i := 0; i < p.thirdIndex.Length(); i++ {
if p.thirdIndex.String(i) != tThird.String(i) {
return
}
}
targetIdx := target.Index(p.dates[0])
if targetIdx == target.Length() || target.IDate(targetIdx).After(p.dates[0]) {
targetIdx--
}
for i := 0; i < p.Length(); i++ {
df := p.IGet(i)
localDate := p.IDate(i)
for targetIdx+1 < target.Length() && !target.IDate(targetIdx+1).After(localDate) {
targetIdx++
}
for j := 0; j < target.Secondary().Length(); j++ {
name := target.Secondary().String(j)
localJ := df.Major().Index(name)
if localJ < 0 {
// this column do not exist
continue
}
localSeries := df.IGet(localJ)
if targetIdx < 0 {
// invalid target, fill with nan
for k := range localSeries.values {
localSeries.values[k] = math.NaN()
}
} else {
targetValue := target.IGet(targetIdx).IGet(j).values
if len(localSeries.values) < len(targetValue) {
localSeries.values = make([]float64, len(targetValue))
}
copy(localSeries.values, targetValue)
}
}
}
}
//Slice get part of TimePanel result >= from < to
func (p *TimePanel) Slice(from, to time.Time) TimePanelRO {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(from)
})
j := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(to)
})
return p.ISlice(i, j)
}
//ISlice get port of TimePanel result >=i <j
func (p *TimePanel) ISlice(i, j int) TimePanelRO {
if i < 0 {
i = 0
}
if j > len(p.dates) {
j = len(p.dates)
}
if i > j {
i = j
}
return &TimePanel{
values: p.values[i:j],
dates: p.dates[i:j],
secondIndex: p.secondIndex,
thirdIndex: p.thirdIndex,
}
}
//CutHead cut value head until
func (p *TimePanel) CutHead(until time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(until)
})
p.ICutHead(i)
}
//ICutHead cut value head until
func (p *TimePanel) ICutHead(until int) {
if until < 0 {
until = 0
}
if until > p.Length() {
until = p.Length()
}
for i := 0; i < until; i++ {
p.values[i] = nil
p.dates[i] = time.Time{}
}
p.values = p.values[until:]
p.dates = p.dates[until:]
}
//Get get the first DataFrame one big or equal to date
func (p *TimePanel) Get(date time.Time) (*DataFrame, time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
df := p.IGet(i)
date = p.IDate(i)
return df, date
}
//Index give the first idx big or equal to date
func (p *TimePanel) Index(date time.Time) int {
return sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
}
//SetMajor replace the index with input
func (p *TimePanel) SetMajor(dates []time.Time) {
p.dates = dates
}
//SetSecond replace the index with input
func (p *TimePanel) SetSecond(index Index) {
p.secondIndex = index
}
//SetThird replace the index with input
func (p *TimePanel) SetThird(index Index) {
p.thirdIndex = index
}
//Length return length of data
func (p *TimePanel) Length() int {
if p == nil {
return 0
}
return len(p.dates)
}
//IG | n data on index
func (p *TimePanel) IGet(i int) *DataFrame {
if p == nil {
return nil
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return nil
}
return NewDataFrame(p.values[i], p.secondIndex, p.thirdIndex)
}
//IDate return date on index
func (p *TimePanel) IDate(i int) time.Time {
if p == nil {
return AncientTime
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return AncientTime
}
return p.dates[i]
}
//Secondary return second index
func (p *TimePanel) Secondary() Index {
return p.secondIndex
}
//Thirdly return third index
func (p *TimePanel) Thirdly() Index {
return p.thirdIndex
}
//Raw return the raw data
func (p *TimePanel) Raw() [][][]float64 {
return p.values
}
//ToProtoBuf transfer to ProtoBuf
func (p *TimePanel) Marshal() ([]byte, error) {
data := make([]float64, p.Length()*p.secondIndex.Length()*p.thirdIndex.Length())
dates := make([]uint64, p.Length())
secondary := make([]string, p.secondIndex.Length())
thirdly := make([]string, p.thirdIndex.Length())
idx := 0
thirdLen := p.thirdIndex.Length()
for i := range p.values {
for j := range p.values[i] {
copy(data[idx:], p.values[i][j])
idx += thirdLen
}
}
for i := range dates {
dates[i] = uint64(p.dates[i].UnixNano())
}
for i := range secondary {
secondary[i] = p.secondIndex.String(i)
}
for i := range thirdly {
thirdly[i] = p.thirdIndex.String(i)
}
return proto.Marshal(&FlyTimePanel{
Data: data,
Dates: dates,
Secondary: secondary,
Thirdly: thirdly,
})
}
//FromProtoBuf transfer from ProtoBuf
func (p *TimePanel) Unmarshal(bytes []byte) error {
fp := &FlyTimePanel{}
err := proto.Unmarshal(bytes, fp)
if err != nil {
return err
}
p.dates = make([]time.Time, len(fp.Dates))
for i := range fp.Dates {
p.dates[i] = time.Unix(0, int64(fp.Dates[i]))
}
p.secondIndex = NewStringIndex(fp.Secondary, true)
p.thirdIndex = NewStringIndex(fp.Thirdly, true)
p.values = make([][][]float64, len(p.dates))
idx := 0
for i := range p.dates {
p.values[i] = make([][]float64, p.secondIndex.Length())
for j := range p.values[i] {
p.values[i][j] = make([]float64 | et retur | identifier_name |
panel.go | , p.thirdIndex)
if dateLen == 0 || !date.Before(p.dates[dateLen-1]) {
p.dates = append(p.dates, date)
p.values = append(p.values, value)
} else {
insertP := sort.Search(dateLen, func(i int) bool {
return p.dates[i].After(date)
})
newDates := make([]time.Time, dateLen+1)
newValues := make([][][]float64, dateLen+1)
newDfs := make([]DataFrame, dateLen+1)
copy(newDates, p.dates[:insertP])
copy(newValues, p.values[:insertP])
newDates[insertP] = date
newValues[insertP] = value
newDfs[insertP] = *df
copy(newDates[insertP+1:], p.dates[insertP:])
copy(newValues[insertP+1:], p.values[insertP:])
p.dates = newDates
p.values = newValues
}
}
func (p *TimePanel) IAddDataFrame(date time.Time, df *DataFrame) {
p.AddMat(date, df.values)
}
// SecondaryLeftReplace 替换Panel Secondary 索引中部分列的值, 时间索引以本地为主, 发生替换时,数值取新数值中该时间之前最接近时间点的数值
// 该 API 目前并不是经济的, 时间开销并没有进行优化
// 如果目标的第三索引与本地不相同,则不进行处理
func (p *TimePanel) SecondaryLeftReplace(target TimePanelRO) {
if p.Length() == 0 {
return
}
tThird := target.Thirdly()
if p.thirdIndex.Length() != tThird.Length() {
return
}
for i := 0; i < p.thirdIndex.Length(); i++ {
if p.thirdIndex.String(i) != tThird.String(i) {
return
}
}
targetIdx := target.Index(p.dates[0])
if targetIdx == target.Length() || target.IDate(targetIdx).After(p.dates[0]) {
targetIdx--
}
for i := 0; i < p.Length(); i++ {
df := p.IGet(i)
localDate := p.IDate(i)
for targetIdx+1 < target.Length() && !target.IDate(targetIdx+1).After(localDate) {
targetIdx++
}
for j := 0; j < target.Secondary().Length(); j++ {
name := target.Secondary().String(j)
localJ := df.Major().Index(name)
if localJ < 0 {
// this column do not exist
continue
}
localSeries := df.IGet(localJ)
if targetIdx < 0 {
// invalid target, fill with nan
for k := range localSeries.values {
localSeries.values[k] = math.NaN()
}
} else {
targetValue := target.IGet(targetIdx).IGet(j).values
if len(localSeries.values) < len(targetValue) {
localSeries.values = make([]float64, len(targetValue))
}
copy(localSeries.values, targetValue)
}
}
}
}
//Slice get part of TimePanel result >= from < to
func (p *TimePanel) Slice(from, to time.Time) TimePanelRO {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(from)
})
j := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(to)
})
return p.ISlice(i, j)
}
//ISlice get port of TimePanel result >=i <j
func (p *TimePanel) ISlice(i, j int) TimePanelRO {
if i < 0 {
i = 0
}
if j > len(p.dates) {
j = len(p.dates)
}
if i > j {
i = j
}
return &TimePanel{
values: p.values[i:j],
dates: p.dates[i:j],
secondIndex: p.secondIndex,
thirdIndex: p.thirdIndex,
}
}
//CutHead cut value head until
func (p *TimePanel) CutHead(until time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(until)
})
p.ICutHead(i)
}
//ICutHead cut value head until
func (p *TimePanel) ICutHead(u | until; i++ {
p.values[i] = nil
p.dates[i] = time.Time{}
}
p.values = p.values[until:]
p.dates = p.dates[until:]
}
//Get get the first DataFrame one big or equal to date
func (p *TimePanel) Get(date time.Time) (*DataFrame, time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
df := p.IGet(i)
date = p.IDate(i)
return df, date
}
//Index give the first idx big or equal to date
func (p *TimePanel) Index(date time.Time) int {
return sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
}
//SetMajor replace the index with input
func (p *TimePanel) SetMajor(dates []time.Time) {
p.dates = dates
}
//SetSecond replace the index with input
func (p *TimePanel) SetSecond(index Index) {
p.secondIndex = index
}
//SetThird replace the index with input
func (p *TimePanel) SetThird(index Index) {
p.thirdIndex = index
}
//Length return length of data
func (p *TimePanel) Length() int {
if p == nil {
return 0
}
return len(p.dates)
}
//IGet return data on index
func (p *TimePanel) IGet(i int) *DataFrame {
if p == nil {
return nil
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return nil
}
return NewDataFrame(p.values[i], p.secondIndex, p.thirdIndex)
}
//IDate return date on index
func (p *TimePanel) IDate(i int) time.Time {
if p == nil {
return AncientTime
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return AncientTime
}
return p.dates[i]
}
//Secondary return second index
func (p *TimePanel) Secondary() Index {
return p.secondIndex
}
//Thirdly return third index
func (p *TimePanel) Thirdly() Index {
return p.thirdIndex
}
//Raw return the raw data
func (p *TimePanel) Raw() [][][]float64 {
return p.values
}
//ToProtoBuf transfer to ProtoBuf
func (p *TimePanel) Marshal() ([]byte, error) {
data := make([]float64, p.Length()*p.secondIndex.Length()*p.thirdIndex.Length())
dates := make([]uint64, p.Length())
secondary := make([]string, p.secondIndex.Length())
thirdly := make([]string, p.thirdIndex.Length())
idx := 0
thirdLen := p.thirdIndex.Length()
for i := range p.values {
for j := range p.values[i] {
copy(data[idx:], p.values[i][j])
idx += thirdLen
}
}
for i := range dates {
dates[i] = uint64(p.dates[i].UnixNano())
}
for i := range secondary {
secondary[i] = p.secondIndex.String(i)
}
for i := range thirdly {
thirdly[i] = p.thirdIndex.String(i)
}
return proto.Marshal(&FlyTimePanel{
Data: data,
Dates: dates,
Secondary: secondary,
Thirdly: thirdly,
})
}
//FromProtoBuf transfer from ProtoBuf
func (p *TimePanel) Unmarshal(bytes []byte) error {
fp := &FlyTimePanel{}
err := proto.Unmarshal(bytes, fp)
if err != nil {
return err
}
p.dates = make([]time.Time, len(fp.Dates))
for i := range fp.Dates {
p.dates[i] = time.Unix(0, int64(fp.Dates[i]))
}
p.secondIndex = NewStringIndex(fp.Secondary, true)
p.thirdIndex = NewStringIndex(fp.Thirdly, true)
p.values = make([][][]float64, len(p.dates))
idx := 0
for i := range p.dates {
p.values[i] = make([][]float64, p.secondIndex.Length())
for j := range p.values[i] {
p.values[i][j] = make([]float64 | ntil int) {
if until < 0 {
until = 0
}
if until > p.Length() {
until = p.Length()
}
for i := 0; i < | identifier_body |
panel.go | , p.thirdIndex)
if dateLen == 0 || !date.Before(p.dates[dateLen-1]) {
p.dates = append(p.dates, date)
p.values = append(p.values, value)
} else {
insertP := sort.Search(dateLen, func(i int) bool {
return p.dates[i].After(date)
})
newDates := make([]time.Time, dateLen+1)
newValues := make([][][]float64, dateLen+1)
newDfs := make([]DataFrame, dateLen+1)
copy(newDates, p.dates[:insertP])
copy(newValues, p.values[:insertP])
newDates[insertP] = date
newValues[insertP] = value
newDfs[insertP] = *df
copy(newDates[insertP+1:], p.dates[insertP:])
copy(newValues[insertP+1:], p.values[insertP:])
p.dates = newDates
p.values = newValues
}
}
func (p *TimePanel) IAddDataFrame(date time.Time, df *DataFrame) {
p.AddMat(date, df.values)
}
// SecondaryLeftReplace 替换Panel Secondary 索引中部分列的值, 时间索引以本地为主, 发生替换时,数值取新数值中该时间之前最接近时间点的数值
// 该 API 目前并不是经济的, 时间开销并没有进行优化
// 如果目标的第三索引与本地不相同,则不进行处理
func (p *TimePanel) SecondaryLeftReplace(target TimePanelRO) {
if p.Length() == 0 {
return
}
tThird := target.Thirdly()
if p.thirdIndex.Length() != tThird.Length() {
return
}
for i := 0; i < p.thirdIndex.Length(); i++ {
if p.thirdIndex.String(i) != tThird.String(i) {
return
}
}
targetIdx := target.Index(p.dates[0])
if targetIdx == target.Length() || target.IDate(targetIdx).After(p.dates[0]) {
targetIdx--
}
for i := 0; i < p.Length(); i++ {
df := p.IGet(i)
localDate := p.IDate(i)
for targetIdx+1 < target.Length() && !target.IDate(targetIdx+1).After(localDate) {
targetIdx++
}
for j := 0; j < target.Secondary().Length(); j++ {
name := target.Secondary().String(j)
localJ := df.Major().Index(name)
if localJ < 0 {
// this column do not exist
continue
}
localSeries := df.IGet(localJ)
if targetIdx < 0 {
// invalid target, fill with nan
for k := range localSeries.values {
localSeries.values[k] = math.NaN()
}
} else {
targetValue := target.IGet(targetIdx).IGet(j).values
if len(localSeries.values) < len(targetValue) {
localSeries.values = make([]float64, len(targetValue))
}
copy(localSeries.values, targetValue)
}
}
}
}
//Slice get part of TimePanel result >= from < to
func (p *TimePanel) Slice(from, to time.Time) TimePanelRO {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(from)
})
j := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(to)
})
return p.ISlice(i, j)
}
//ISlice get port of TimePanel result >=i <j
func (p *TimePanel) ISlice(i, j int) TimePanelRO {
if i < 0 {
i = 0
}
if j > len(p.dates) {
j = len(p.dates)
}
if i > j {
i = j
}
return &TimePanel{
values: p.values[i:j],
dates: p.dates[i:j],
secondIndex: p.secondIndex,
thirdIndex: p | tHead cut value head until
func (p *TimePanel) CutHead(until time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(until)
})
p.ICutHead(i)
}
//ICutHead cut value head until
func (p *TimePanel) ICutHead(until int) {
if until < 0 {
until = 0
}
if until > p.Length() {
until = p.Length()
}
for i := 0; i < until; i++ {
p.values[i] = nil
p.dates[i] = time.Time{}
}
p.values = p.values[until:]
p.dates = p.dates[until:]
}
//Get get the first DataFrame one big or equal to date
func (p *TimePanel) Get(date time.Time) (*DataFrame, time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
df := p.IGet(i)
date = p.IDate(i)
return df, date
}
//Index give the first idx big or equal to date
func (p *TimePanel) Index(date time.Time) int {
return sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
}
//SetMajor replace the index with input
func (p *TimePanel) SetMajor(dates []time.Time) {
p.dates = dates
}
//SetSecond replace the index with input
func (p *TimePanel) SetSecond(index Index) {
p.secondIndex = index
}
//SetThird replace the index with input
func (p *TimePanel) SetThird(index Index) {
p.thirdIndex = index
}
//Length return length of data
func (p *TimePanel) Length() int {
if p == nil {
return 0
}
return len(p.dates)
}
//IGet return data on index
func (p *TimePanel) IGet(i int) *DataFrame {
if p == nil {
return nil
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return nil
}
return NewDataFrame(p.values[i], p.secondIndex, p.thirdIndex)
}
//IDate return date on index
func (p *TimePanel) IDate(i int) time.Time {
if p == nil {
return AncientTime
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return AncientTime
}
return p.dates[i]
}
//Secondary return second index
func (p *TimePanel) Secondary() Index {
return p.secondIndex
}
//Thirdly return third index
func (p *TimePanel) Thirdly() Index {
return p.thirdIndex
}
//Raw return the raw data
func (p *TimePanel) Raw() [][][]float64 {
return p.values
}
//ToProtoBuf transfer to ProtoBuf
func (p *TimePanel) Marshal() ([]byte, error) {
data := make([]float64, p.Length()*p.secondIndex.Length()*p.thirdIndex.Length())
dates := make([]uint64, p.Length())
secondary := make([]string, p.secondIndex.Length())
thirdly := make([]string, p.thirdIndex.Length())
idx := 0
thirdLen := p.thirdIndex.Length()
for i := range p.values {
for j := range p.values[i] {
copy(data[idx:], p.values[i][j])
idx += thirdLen
}
}
for i := range dates {
dates[i] = uint64(p.dates[i].UnixNano())
}
for i := range secondary {
secondary[i] = p.secondIndex.String(i)
}
for i := range thirdly {
thirdly[i] = p.thirdIndex.String(i)
}
return proto.Marshal(&FlyTimePanel{
Data: data,
Dates: dates,
Secondary: secondary,
Thirdly: thirdly,
})
}
//FromProtoBuf transfer from ProtoBuf
func (p *TimePanel) Unmarshal(bytes []byte) error {
fp := &FlyTimePanel{}
err := proto.Unmarshal(bytes, fp)
if err != nil {
return err
}
p.dates = make([]time.Time, len(fp.Dates))
for i := range fp.Dates {
p.dates[i] = time.Unix(0, int64(fp.Dates[i]))
}
p.secondIndex = NewStringIndex(fp.Secondary, true)
p.thirdIndex = NewStringIndex(fp.Thirdly, true)
p.values = make([][][]float64, len(p.dates))
idx := 0
for i := range p.dates {
p.values[i] = make([][]float64, p.secondIndex.Length())
for j := range p.values[i] {
p.values[i][j] = make([]float64 | .thirdIndex,
}
}
//Cu | conditional_block |
panel.go | , p.thirdIndex)
if dateLen == 0 || !date.Before(p.dates[dateLen-1]) {
p.dates = append(p.dates, date)
p.values = append(p.values, value)
} else {
insertP := sort.Search(dateLen, func(i int) bool {
return p.dates[i].After(date)
})
newDates := make([]time.Time, dateLen+1)
newValues := make([][][]float64, dateLen+1)
newDfs := make([]DataFrame, dateLen+1)
copy(newDates, p.dates[:insertP])
copy(newValues, p.values[:insertP])
newDates[insertP] = date
newValues[insertP] = value
newDfs[insertP] = *df
copy(newDates[insertP+1:], p.dates[insertP:])
copy(newValues[insertP+1:], p.values[insertP:])
p.dates = newDates
p.values = newValues
}
}
func (p *TimePanel) IAddDataFrame(date time.Time, df *DataFrame) {
p.AddMat(date, df.values)
}
// SecondaryLeftReplace 替换Panel Secondary 索引中部分列的值, 时间索引以本地为主, 发生替换时,数值取新数值中该时间之前最接近时间点的数值
// 该 API 目前并不是经济的, 时间开销并没有进行优化
// 如果目标的第三索引与本地不相同,则不进行处理
func (p *TimePanel) SecondaryLeftReplace(target TimePanelRO) {
if p.Length() == 0 {
return
}
tThird := target.Thirdly()
if p.thirdIndex.Length() != tThird.Length() {
return
}
for i := 0; i < p.thirdIndex.Length(); i++ {
if p.thirdIndex.String(i) != tThird.String(i) {
return
}
}
targetIdx := target.Index(p.dates[0])
if targetIdx == target.Length() || target.IDate(targetIdx).After(p.dates[0]) {
targetIdx--
}
for i := 0; i < p.Length(); i++ {
df := p.IGet(i)
localDate := p.IDate(i)
for targetIdx+1 < target.Length() && !target.IDate(targetIdx+1).After(localDate) {
targetIdx++
}
for j := 0; j < target.Secondary().Length(); j++ {
name := target.Secondary().String(j)
localJ := df.Major().Index(name)
if localJ < 0 {
// this column do not exist
continue
}
localSeries := df.IGet(localJ)
if targetIdx < 0 {
// invalid target, fill with nan
for k := range localSeries.values {
localSeries.values[k] = math.NaN()
}
} else {
targetValue := target.IGet(targetIdx).IGet(j).values
if len(localSeries.values) < len(targetValue) {
localSeries.values = make([]float64, len(targetValue))
}
copy(localSeries.values, targetValue)
}
}
}
}
//Slice get part of TimePanel result >= from < to
func (p *TimePanel) Slice(from, to time.Time) TimePanelRO {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(from)
})
j := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(to)
})
return p.ISlice(i, j)
}
| if j > len(p.dates) {
j = len(p.dates)
}
if i > j {
i = j
}
return &TimePanel{
values: p.values[i:j],
dates: p.dates[i:j],
secondIndex: p.secondIndex,
thirdIndex: p.thirdIndex,
}
}
//CutHead cut value head until
func (p *TimePanel) CutHead(until time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(until)
})
p.ICutHead(i)
}
//ICutHead cut value head until
func (p *TimePanel) ICutHead(until int) {
if until < 0 {
until = 0
}
if until > p.Length() {
until = p.Length()
}
for i := 0; i < until; i++ {
p.values[i] = nil
p.dates[i] = time.Time{}
}
p.values = p.values[until:]
p.dates = p.dates[until:]
}
//Get get the first DataFrame one big or equal to date
func (p *TimePanel) Get(date time.Time) (*DataFrame, time.Time) {
i := sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
df := p.IGet(i)
date = p.IDate(i)
return df, date
}
//Index give the first idx big or equal to date
func (p *TimePanel) Index(date time.Time) int {
return sort.Search(len(p.dates), func(i int) bool {
return !p.dates[i].Before(date)
})
}
//SetMajor replace the index with input
func (p *TimePanel) SetMajor(dates []time.Time) {
p.dates = dates
}
//SetSecond replace the index with input
func (p *TimePanel) SetSecond(index Index) {
p.secondIndex = index
}
//SetThird replace the index with input
func (p *TimePanel) SetThird(index Index) {
p.thirdIndex = index
}
//Length return length of data
func (p *TimePanel) Length() int {
if p == nil {
return 0
}
return len(p.dates)
}
//IGet return data on index
func (p *TimePanel) IGet(i int) *DataFrame {
if p == nil {
return nil
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return nil
}
return NewDataFrame(p.values[i], p.secondIndex, p.thirdIndex)
}
//IDate return date on index
func (p *TimePanel) IDate(i int) time.Time {
if p == nil {
return AncientTime
}
if i < 0 {
i += p.Length()
}
if i < 0 || i >= len(p.dates) {
return AncientTime
}
return p.dates[i]
}
//Secondary return second index
func (p *TimePanel) Secondary() Index {
return p.secondIndex
}
//Thirdly return third index
func (p *TimePanel) Thirdly() Index {
return p.thirdIndex
}
//Raw return the raw data
func (p *TimePanel) Raw() [][][]float64 {
return p.values
}
//ToProtoBuf transfer to ProtoBuf
func (p *TimePanel) Marshal() ([]byte, error) {
data := make([]float64, p.Length()*p.secondIndex.Length()*p.thirdIndex.Length())
dates := make([]uint64, p.Length())
secondary := make([]string, p.secondIndex.Length())
thirdly := make([]string, p.thirdIndex.Length())
idx := 0
thirdLen := p.thirdIndex.Length()
for i := range p.values {
for j := range p.values[i] {
copy(data[idx:], p.values[i][j])
idx += thirdLen
}
}
for i := range dates {
dates[i] = uint64(p.dates[i].UnixNano())
}
for i := range secondary {
secondary[i] = p.secondIndex.String(i)
}
for i := range thirdly {
thirdly[i] = p.thirdIndex.String(i)
}
return proto.Marshal(&FlyTimePanel{
Data: data,
Dates: dates,
Secondary: secondary,
Thirdly: thirdly,
})
}
//FromProtoBuf transfer from ProtoBuf
func (p *TimePanel) Unmarshal(bytes []byte) error {
fp := &FlyTimePanel{}
err := proto.Unmarshal(bytes, fp)
if err != nil {
return err
}
p.dates = make([]time.Time, len(fp.Dates))
for i := range fp.Dates {
p.dates[i] = time.Unix(0, int64(fp.Dates[i]))
}
p.secondIndex = NewStringIndex(fp.Secondary, true)
p.thirdIndex = NewStringIndex(fp.Thirdly, true)
p.values = make([][][]float64, len(p.dates))
idx := 0
for i := range p.dates {
p.values[i] = make([][]float64, p.secondIndex.Length())
for j := range p.values[i] {
p.values[i][j] = make([]float64, | //ISlice get port of TimePanel result >=i <j
func (p *TimePanel) ISlice(i, j int) TimePanelRO {
if i < 0 {
i = 0
} | random_line_split |
iter.rs | is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v != 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IsId").field(&self.0.get()).finish()
}
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher);
hasher.finish()
}
// get LF byte indexes
fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
let start_position = if let Some(index) = self.string_additional.find(value) {
index
} else {
self.string_additional.push_str(value);
self.string_additional.len() - value.len()
} as u32;
// ATTENTION: this type of span's end is last byte index + 1 (exactly the one you use in str[begin..end], not last char
let span = Span::new(start_position | IsId::POSITION_MASK, start_position + value.len() as u32);
self.string_id_to_span.push(span);
new_id
}
}
// intern string at location
pub fn intern_span(&mut self, location: Span) -> IsId {
let (start, end) = (location.start.0 as usize, location.end.0 as usize);
debug_assert!(start <= end, "invalid span");
debug_assert!(self.start_index <= start, "not this file span");
// does not check position for EOF because it is not expected to intern something include EOF
debug_assert!(end - self.start_index < self.content.len() - 3 && start - self.start_index < self.content.len() - 3, "position overflow");
let end_width = get_char_width(&self.content, end - self.start_index);
let hash = get_hash(&self.content[start - self.start_index..end - self.start_index + end_width]);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
self.string_id_to_span.push(location);
new_id
}
}
pub fn | get_file_id | identifier_name |
|
iter.rs | fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.end.0, "invalid span + position");
Span{ start: self.start, end: rhs }
}
}
// use `span += position` to update span
impl AddAssign<Position> for Span {
fn add_assign(&mut self, rhs: Position) {
debug_assert!(rhs.0 >= self.end.0, "invalid span += position");
self.end = rhs;
}
}
// or use `span1 += span2`
// ATTENTION: only allow left += right, while gap/overlap both acceptable
impl AddAssign<Span> for Span {
fn add_assign(&mut self, rhs: Span) {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span += span");
self.end = rhs.end;
}
}
// use `position.into()` to turn position directly into span
impl From<Position> for Span {
fn from(position: Position) -> Span {
Span::new(position, position)
}
}
/// a handle to an interned string
///
/// - IsId means InternedStringID, it is short because it is widely used
/// - it was named SymbolID or SymId or Sym but I found that
/// symbol (in symbol table) in compiler principle means a "name", a name to a variable, function, etc.
/// although I will call that a "Name", or a "TypeId", "VarId" etc. in my semantic analysis, but this name
/// may confuse reader or myself after, for example, 10 years (although I'm not confused after this 5 years)
/// - SymbolID, StringID or InternedStringID is too long,
/// Str or String makes reader think it is kind of string (a ptr, cal, len structure)
/// - it is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v != 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IsId").field(&self.0.get()).finish()
}
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher); | fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId:: | hasher.finish()
}
// get LF byte indexes | random_line_split |
iter.rs | fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.end.0, "invalid span + position");
Span{ start: self.start, end: rhs }
}
}
// use `span += position` to update span
impl AddAssign<Position> for Span {
fn add_assign(&mut self, rhs: Position) {
debug_assert!(rhs.0 >= self.end.0, "invalid span += position");
self.end = rhs;
}
}
// or use `span1 += span2`
// ATTENTION: only allow left += right, while gap/overlap both acceptable
impl AddAssign<Span> for Span {
fn add_assign(&mut self, rhs: Span) {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span += span");
self.end = rhs.end;
}
}
// use `position.into()` to turn position directly into span
impl From<Position> for Span {
fn from(position: Position) -> Span {
Span::new(position, position)
}
}
/// a handle to an interned string
///
/// - IsId means InternedStringID, it is short because it is widely used
/// - it was named SymbolID or SymId or Sym but I found that
/// symbol (in symbol table) in compiler principle means a "name", a name to a variable, function, etc.
/// although I will call that a "Name", or a "TypeId", "VarId" etc. in my semantic analysis, but this name
/// may confuse reader or myself after, for example, 10 years (although I'm not confused after this 5 years)
/// - SymbolID, StringID or InternedStringID is too long,
/// Str or String makes reader think it is kind of string (a ptr, cal, len structure)
/// - it is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v != 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher);
hasher.finish()
}
// get LF byte indexes
fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = | {
f.debug_tuple("IsId").field(&self.0.get()).finish()
} | identifier_body |
settings_class.py | import DataStore, ParseError
from npc.util.functions import merge_data_dicts, prepend_namespace
from .tags import make_deprecated_tag_specs
from .helpers import quiet_parse
from .systems import System
import logging
logger = logging.getLogger(__name__)
class Settings(DataStore):
"""Core settings class
On init, it loads the default settings, followed by settings in the personal_dir. The campaign_dir is saved
for later use.
Settings are stored in yaml files.
"""
def __init__(self, personal_dir: Path = None):
super().__init__()
if(personal_dir is None):
personal_dir = Path('~/.config/npc/').expanduser()
self.personal_dir: Path = personal_dir
self.campaign_dir: Path = None
self.install_base = resources.files("npc")
self.default_settings_path = self.install_base / "settings"
self.versions = {
"package": npc_version,
}
self.loaded_paths = {
"package": None,
}
# load defaults and user prefs
self.refresh()
def refresh(self) -> None:
"""
Clear internal data, and refresh the default and personal settings files
"""
self.data = {}
self.load_settings_file(self.default_settings_path / "settings.yaml", file_key="internal")
self.load_systems(self.default_settings_path / "systems")
self.load_settings_file(self.personal_dir / "settings.yaml", file_key="user")
self.load_systems(self.personal_dir / "systems")
def load_settings_file(self, settings_file: Path, namespace: str = None, *, file_key: str = None) -> None:
"""Open, parse, and merge settings from another file
This is the primary way to load more settings info. Passing in a file path that does not exist will
result in a log message and no error, since all setting files are technically optional.
The file_key for any given file should be unique. These are the keys in use right now:
* internal
* user
* campaign
Args:
settings_file (Path): The file to load
namespace (str): Optional namespace to use for new_data
file_key (str): Key to use when storing the file's stated npc version and path
"""
loaded: dict = quiet_parse(settings_file)
if loaded is None:
return
if file_key:
file_version = loaded.get("npc", {}).pop("version", None)
self.versions[file_key] = file_version
self.loaded_paths[file_key] = settings_file
self.merge_data(loaded, namespace)
def load_systems(self, systems_dir: Path) -> None:
"""Parse and load all system configs in systems_dir
Finds all yaml files in systems_dir and loads them as systems. Special handling allows deep
inheritance, and prevents circular dependencies between systems.
Args:
systems_dir (Path): Dir to check for system config files
"""
system_settings:list = systems_dir.glob("*.yaml")
dependencies = defaultdict(list)
for settings_file in system_settings:
loaded = quiet_parse(settings_file)
if loaded is None:
continue
system_name = next(iter(loaded))
loaded_contents = loaded[system_name]
if "extends" in loaded_contents:
dependencies[loaded_contents["extends"]].append(loaded)
continue
self.merge_data(loaded, namespace="npc.systems")
def load_dependencies(deps: dict):
"""Handle dependency loading
Unrecognized parents are stored away for the next iteration. Otherwise, children are merged with
their parent's attributes, then merged into self.
If the dependencies do not change for one iteration, then the remaining systems cannot be loaded
and are skipped.
Args:
deps (dict): Dict mapping parent system keys to child system configs
"""
new_deps = {}
for parent_name, children in deps.items():
if parent_name not in self.get("npc.systems"):
new_deps[parent_name] = children
continue
for child in children:
child_name = next(iter(child))
parent_conf = dict(self.get(f"npc.systems.{parent_name}"))
combined = merge_data_dicts(child[child_name], parent_conf)
self.merge_data(combined, namespace=f"npc.systems.{child_name}")
if not new_deps:
return
if new_deps == deps:
logger.error(f"Some systems could not be found: {deps.keys()}")
return
load_dependencies(new_deps)
load_dependencies(dependencies)
def load_types(self, types_dir: Path, *, system_key: str, namespace_root: str = "npc") -> None:
"""Load type definitions from a path for a given game system
Parses and stores type definitions found in types_dir. All yaml files in that dir are assumed to be
type defs. Files immediately in the dir are parsed first, then a subdir matching the given system key
is checked.
Parsed definitions are put into the "x.types.system" namespace. The root of this namespace is
determined by the namespace_root passed, and the system component uses the system key provided.
The sheet_path property is handled specially. If it's present in a type's yaml, then that value is
used. If not, a file whose name matches the type key is assumed to be the correct sheet contents file.
Args:
types_dir (Path): Path to look in for type definitions
system_key (str): Key of the game system these types are for
namespace_root (str): [description] (default: `"npc"`)
"""
def process_types_dir(search_dir: Path) -> None:
"""Load yaml files, expand sheet paths, handle implied sheets
This internal helper method scans all the files in search_dir and tries to load them by their type:
* yaml files are treated as type definitions and parsed. If they have a sheet_path property, it is
expanded into a fully qualified Path for later use
* All other files are set aside for later. After the types have been loaded, the base names of the
remaining files are compared against the loaded type keys within our current namespace. Any that
match are treated as the implicit sheet file for that type, and their Path is saved to the
type's sheet_path property.
Args:
search_dir (Path): Directory to search for type and sheet files
"""
discovered_sheets: dict = {}
for type_path in search_dir.glob("*.*"):
if type_path.suffix != ".yaml":
type_key: str = type_path.stem
discovered_sheets[type_key] = type_path
continue
typedef: dict = quiet_parse(type_path)
try:
type_key: str = next(iter(typedef))
except TypeError:
raise ParseError("Missing top-level key for type config", type_path)
if typedef[type_key].get("sheet_path"):
sheet_path = Path(typedef[type_key].get("sheet_path"))
if sheet_path.is_absolute():
typedef[type_key]["sheet_path"] = sheet_path.resolve()
else:
typedef[type_key]["sheet_path"] = search_dir.joinpath(sheet_path).resolve()
self.merge_data(typedef, types_namespace)
for type_key, sheet_path in discovered_sheets.items():
if type_key not in self.get(types_namespace, {}):
logger.info(f"Type {type_key} not defined, skipping potential sheet {sheet_path}")
continue
if "sheet_path" not in self.get(f"{types_namespace}.{type_key}"):
self.merge_data({type_key: {"sheet_path": sheet_path}}, types_namespace)
types_namespace: str = f"{namespace_root}.types.{system_key}"
process_types_dir(types_dir)
if self.get(f"npc.systems.{system_key}.extends"):
process_types_dir(types_dir / self.get(f"npc.systems.{system_key}.extends"))
process_types_dir(types_dir / system_key)
def get_system_keys(self) -> list[str]:
"""Get a list of valid system keys
This method only considers systems in the npc namespace.
Returns:
list[str]: List of system keys
"""
return self.get("npc.systems").keys()
def | (self, key: str) -> System:
"""Get a system object for the given system key
Creates a System object using the definition from the given key. If the key does not have a
definition, returns None.
Args:
key (str): System key name to use
Returns:
System: System object for the given key, or None if the key does not have a system def
"""
if key not in self.get("npc.systems"):
logger.error(f"System '{key}' is not defined")
return None
return System(key, self)
@cached_property
def deprecated_tags(self) -> dict:
"""Get the deprecated tag definitions
These specs describe tags that should no longer be used at all, due to changes in the way that NPC
works.
Returns:
dict: Dict of deprecated tag info, indexed by tag name
"""
return make_deprecated_tag_specs(self.get("npc.deprecated_tags", {}))
@property
def required_dirs(self) -> list:
"""Get the list of required campaign directories
This includes the dirs for character, session, and plot files, relative to self.campaign_dir
Returns:
list: List of required directory names
"""
return [
self.get("campaign.characters.path"),
self.get("campaign.session | get_system | identifier_name |
settings_class.py | .util import DataStore, ParseError
from npc.util.functions import merge_data_dicts, prepend_namespace
from .tags import make_deprecated_tag_specs
from .helpers import quiet_parse
from .systems import System
import logging
logger = logging.getLogger(__name__)
class Settings(DataStore):
"""Core settings class
On init, it loads the default settings, followed by settings in the personal_dir. The campaign_dir is saved
for later use.
Settings are stored in yaml files.
"""
def __init__(self, personal_dir: Path = None):
super().__init__()
if(personal_dir is None):
personal_dir = Path('~/.config/npc/').expanduser()
self.personal_dir: Path = personal_dir
self.campaign_dir: Path = None
self.install_base = resources.files("npc")
self.default_settings_path = self.install_base / "settings"
self.versions = {
"package": npc_version,
}
self.loaded_paths = {
"package": None,
}
# load defaults and user prefs
self.refresh()
def refresh(self) -> None:
"""
Clear internal data, and refresh the default and personal settings files
"""
self.data = {}
self.load_settings_file(self.default_settings_path / "settings.yaml", file_key="internal")
self.load_systems(self.default_settings_path / "systems")
self.load_settings_file(self.personal_dir / "settings.yaml", file_key="user")
self.load_systems(self.personal_dir / "systems")
def load_settings_file(self, settings_file: Path, namespace: str = None, *, file_key: str = None) -> None:
"""Open, parse, and merge settings from another file
This is the primary way to load more settings info. Passing in a file path that does not exist will
result in a log message and no error, since all setting files are technically optional.
The file_key for any given file should be unique. These are the keys in use right now:
* internal
* user
* campaign
Args:
settings_file (Path): The file to load
namespace (str): Optional namespace to use for new_data
file_key (str): Key to use when storing the file's stated npc version and path
"""
loaded: dict = quiet_parse(settings_file)
if loaded is None:
return
if file_key:
file_version = loaded.get("npc", {}).pop("version", None)
self.versions[file_key] = file_version
self.loaded_paths[file_key] = settings_file
self.merge_data(loaded, namespace)
def load_systems(self, systems_dir: Path) -> None:
"""Parse and load all system configs in systems_dir
Finds all yaml files in systems_dir and loads them as systems. Special handling allows deep
inheritance, and prevents circular dependencies between systems.
Args:
systems_dir (Path): Dir to check for system config files
"""
system_settings:list = systems_dir.glob("*.yaml")
dependencies = defaultdict(list)
for settings_file in system_settings:
loaded = quiet_parse(settings_file)
if loaded is None:
continue
system_name = next(iter(loaded))
loaded_contents = loaded[system_name]
if "extends" in loaded_contents:
dependencies[loaded_contents["extends"]].append(loaded)
continue
self.merge_data(loaded, namespace="npc.systems")
def load_dependencies(deps: dict):
"""Handle dependency loading
Unrecognized parents are stored away for the next iteration. Otherwise, children are merged with
their parent's attributes, then merged into self.
If the dependencies do not change for one iteration, then the remaining systems cannot be loaded
and are skipped.
Args:
deps (dict): Dict mapping parent system keys to child system configs
"""
new_deps = {}
for parent_name, children in deps.items():
if parent_name not in self.get("npc.systems"):
new_deps[parent_name] = children
continue
for child in children:
child_name = next(iter(child))
parent_conf = dict(self.get(f"npc.systems.{parent_name}"))
combined = merge_data_dicts(child[child_name], parent_conf)
self.merge_data(combined, namespace=f"npc.systems.{child_name}")
if not new_deps:
return
if new_deps == deps:
logger.error(f"Some systems could not be found: {deps.keys()}")
return
load_dependencies(new_deps)
load_dependencies(dependencies)
def load_types(self, types_dir: Path, *, system_key: str, namespace_root: str = "npc") -> None:
"""Load type definitions from a path for a given game system
Parses and stores type definitions found in types_dir. All yaml files in that dir are assumed to be
type defs. Files immediately in the dir are parsed first, then a subdir matching the given system key
is checked.
Parsed definitions are put into the "x.types.system" namespace. The root of this namespace is
determined by the namespace_root passed, and the system component uses the system key provided.
| The sheet_path property is handled specially. If it's present in a type's yaml, then that value is
used. If not, a file whose name matches the type key is assumed to be the correct sheet contents file.
Args:
types_dir (Path): Path to look in for type definitions
system_key (str): Key of the game system these types are for
namespace_root (str): [description] (default: `"npc"`)
"""
def process_types_dir(search_dir: Path) -> None:
"""Load yaml files, expand sheet paths, handle implied sheets
This internal helper method scans all the files in search_dir and tries to load them by their type:
* yaml files are treated as type definitions and parsed. If they have a sheet_path property, it is
expanded into a fully qualified Path for later use
* All other files are set aside for later. After the types have been loaded, the base names of the
remaining files are compared against the loaded type keys within our current namespace. Any that
match are treated as the implicit sheet file for that type, and their Path is saved to the
type's sheet_path property.
Args:
search_dir (Path): Directory to search for type and sheet files
"""
discovered_sheets: dict = {}
for type_path in search_dir.glob("*.*"):
if type_path.suffix != ".yaml":
type_key: str = type_path.stem
discovered_sheets[type_key] = type_path
continue
typedef: dict = quiet_parse(type_path)
try:
type_key: str = next(iter(typedef))
except TypeError:
raise ParseError("Missing top-level key for type config", type_path)
if typedef[type_key].get("sheet_path"):
sheet_path = Path(typedef[type_key].get("sheet_path"))
if sheet_path.is_absolute():
typedef[type_key]["sheet_path"] = sheet_path.resolve()
else:
typedef[type_key]["sheet_path"] = search_dir.joinpath(sheet_path).resolve()
self.merge_data(typedef, types_namespace)
for type_key, sheet_path in discovered_sheets.items():
if type_key not in self.get(types_namespace, {}):
logger.info(f"Type {type_key} not defined, skipping potential sheet {sheet_path}")
continue
if "sheet_path" not in self.get(f"{types_namespace}.{type_key}"):
self.merge_data({type_key: {"sheet_path": sheet_path}}, types_namespace)
types_namespace: str = f"{namespace_root}.types.{system_key}"
process_types_dir(types_dir)
if self.get(f"npc.systems.{system_key}.extends"):
process_types_dir(types_dir / self.get(f"npc.systems.{system_key}.extends"))
process_types_dir(types_dir / system_key)
def get_system_keys(self) -> list[str]:
"""Get a list of valid system keys
This method only considers systems in the npc namespace.
Returns:
list[str]: List of system keys
"""
return self.get("npc.systems").keys()
def get_system(self, key: str) -> System:
"""Get a system object for the given system key
Creates a System object using the definition from the given key. If the key does not have a
definition, returns None.
Args:
key (str): System key name to use
Returns:
System: System object for the given key, or None if the key does not have a system def
"""
if key not in self.get("npc.systems"):
logger.error(f"System '{key}' is not defined")
return None
return System(key, self)
@cached_property
def deprecated_tags(self) -> dict:
"""Get the deprecated tag definitions
These specs describe tags that should no longer be used at all, due to changes in the way that NPC
works.
Returns:
dict: Dict of deprecated tag info, indexed by tag name
"""
return make_deprecated_tag_specs(self.get("npc.deprecated_tags", {}))
@property
def required_dirs(self) -> list:
"""Get the list of required campaign directories
This includes the dirs for character, session, and plot files, relative to self.campaign_dir
Returns:
list: List of required directory names
"""
return [
self.get("campaign.characters.path"),
self.get("campaign.session | random_line_split |
|
settings_class.py | .util import DataStore, ParseError
from npc.util.functions import merge_data_dicts, prepend_namespace
from .tags import make_deprecated_tag_specs
from .helpers import quiet_parse
from .systems import System
import logging
logger = logging.getLogger(__name__)
class Settings(DataStore):
"""Core settings class
On init, it loads the default settings, followed by settings in the personal_dir. The campaign_dir is saved
for later use.
Settings are stored in yaml files.
"""
def __init__(self, personal_dir: Path = None):
super().__init__()
if(personal_dir is None):
personal_dir = Path('~/.config/npc/').expanduser()
self.personal_dir: Path = personal_dir
self.campaign_dir: Path = None
self.install_base = resources.files("npc")
self.default_settings_path = self.install_base / "settings"
self.versions = {
"package": npc_version,
}
self.loaded_paths = {
"package": None,
}
# load defaults and user prefs
self.refresh()
def refresh(self) -> None:
"""
Clear internal data, and refresh the default and personal settings files
"""
self.data = {}
self.load_settings_file(self.default_settings_path / "settings.yaml", file_key="internal")
self.load_systems(self.default_settings_path / "systems")
self.load_settings_file(self.personal_dir / "settings.yaml", file_key="user")
self.load_systems(self.personal_dir / "systems")
def load_settings_file(self, settings_file: Path, namespace: str = None, *, file_key: str = None) -> None:
"""Open, parse, and merge settings from another file
This is the primary way to load more settings info. Passing in a file path that does not exist will
result in a log message and no error, since all setting files are technically optional.
The file_key for any given file should be unique. These are the keys in use right now:
* internal
* user
* campaign
Args:
settings_file (Path): The file to load
namespace (str): Optional namespace to use for new_data
file_key (str): Key to use when storing the file's stated npc version and path
"""
loaded: dict = quiet_parse(settings_file)
if loaded is None:
return
if file_key:
file_version = loaded.get("npc", {}).pop("version", None)
self.versions[file_key] = file_version
self.loaded_paths[file_key] = settings_file
self.merge_data(loaded, namespace)
def load_systems(self, systems_dir: Path) -> None:
"""Parse and load all system configs in systems_dir
Finds all yaml files in systems_dir and loads them as systems. Special handling allows deep
inheritance, and prevents circular dependencies between systems.
Args:
systems_dir (Path): Dir to check for system config files
"""
system_settings:list = systems_dir.glob("*.yaml")
dependencies = defaultdict(list)
for settings_file in system_settings:
loaded = quiet_parse(settings_file)
if loaded is None:
|
system_name = next(iter(loaded))
loaded_contents = loaded[system_name]
if "extends" in loaded_contents:
dependencies[loaded_contents["extends"]].append(loaded)
continue
self.merge_data(loaded, namespace="npc.systems")
def load_dependencies(deps: dict):
"""Handle dependency loading
Unrecognized parents are stored away for the next iteration. Otherwise, children are merged with
their parent's attributes, then merged into self.
If the dependencies do not change for one iteration, then the remaining systems cannot be loaded
and are skipped.
Args:
deps (dict): Dict mapping parent system keys to child system configs
"""
new_deps = {}
for parent_name, children in deps.items():
if parent_name not in self.get("npc.systems"):
new_deps[parent_name] = children
continue
for child in children:
child_name = next(iter(child))
parent_conf = dict(self.get(f"npc.systems.{parent_name}"))
combined = merge_data_dicts(child[child_name], parent_conf)
self.merge_data(combined, namespace=f"npc.systems.{child_name}")
if not new_deps:
return
if new_deps == deps:
logger.error(f"Some systems could not be found: {deps.keys()}")
return
load_dependencies(new_deps)
load_dependencies(dependencies)
def load_types(self, types_dir: Path, *, system_key: str, namespace_root: str = "npc") -> None:
"""Load type definitions from a path for a given game system
Parses and stores type definitions found in types_dir. All yaml files in that dir are assumed to be
type defs. Files immediately in the dir are parsed first, then a subdir matching the given system key
is checked.
Parsed definitions are put into the "x.types.system" namespace. The root of this namespace is
determined by the namespace_root passed, and the system component uses the system key provided.
The sheet_path property is handled specially. If it's present in a type's yaml, then that value is
used. If not, a file whose name matches the type key is assumed to be the correct sheet contents file.
Args:
types_dir (Path): Path to look in for type definitions
system_key (str): Key of the game system these types are for
namespace_root (str): [description] (default: `"npc"`)
"""
def process_types_dir(search_dir: Path) -> None:
"""Load yaml files, expand sheet paths, handle implied sheets
This internal helper method scans all the files in search_dir and tries to load them by their type:
* yaml files are treated as type definitions and parsed. If they have a sheet_path property, it is
expanded into a fully qualified Path for later use
* All other files are set aside for later. After the types have been loaded, the base names of the
remaining files are compared against the loaded type keys within our current namespace. Any that
match are treated as the implicit sheet file for that type, and their Path is saved to the
type's sheet_path property.
Args:
search_dir (Path): Directory to search for type and sheet files
"""
discovered_sheets: dict = {}
for type_path in search_dir.glob("*.*"):
if type_path.suffix != ".yaml":
type_key: str = type_path.stem
discovered_sheets[type_key] = type_path
continue
typedef: dict = quiet_parse(type_path)
try:
type_key: str = next(iter(typedef))
except TypeError:
raise ParseError("Missing top-level key for type config", type_path)
if typedef[type_key].get("sheet_path"):
sheet_path = Path(typedef[type_key].get("sheet_path"))
if sheet_path.is_absolute():
typedef[type_key]["sheet_path"] = sheet_path.resolve()
else:
typedef[type_key]["sheet_path"] = search_dir.joinpath(sheet_path).resolve()
self.merge_data(typedef, types_namespace)
for type_key, sheet_path in discovered_sheets.items():
if type_key not in self.get(types_namespace, {}):
logger.info(f"Type {type_key} not defined, skipping potential sheet {sheet_path}")
continue
if "sheet_path" not in self.get(f"{types_namespace}.{type_key}"):
self.merge_data({type_key: {"sheet_path": sheet_path}}, types_namespace)
types_namespace: str = f"{namespace_root}.types.{system_key}"
process_types_dir(types_dir)
if self.get(f"npc.systems.{system_key}.extends"):
process_types_dir(types_dir / self.get(f"npc.systems.{system_key}.extends"))
process_types_dir(types_dir / system_key)
def get_system_keys(self) -> list[str]:
"""Get a list of valid system keys
This method only considers systems in the npc namespace.
Returns:
list[str]: List of system keys
"""
return self.get("npc.systems").keys()
def get_system(self, key: str) -> System:
"""Get a system object for the given system key
Creates a System object using the definition from the given key. If the key does not have a
definition, returns None.
Args:
key (str): System key name to use
Returns:
System: System object for the given key, or None if the key does not have a system def
"""
if key not in self.get("npc.systems"):
logger.error(f"System '{key}' is not defined")
return None
return System(key, self)
@cached_property
def deprecated_tags(self) -> dict:
"""Get the deprecated tag definitions
These specs describe tags that should no longer be used at all, due to changes in the way that NPC
works.
Returns:
dict: Dict of deprecated tag info, indexed by tag name
"""
return make_deprecated_tag_specs(self.get("npc.deprecated_tags", {}))
@property
def required_dirs(self) -> list:
"""Get the list of required campaign directories
This includes the dirs for character, session, and plot files, relative to self.campaign_dir
Returns:
list: List of required directory names
"""
return [
self.get("campaign.characters.path"),
self.get("campaign.session | continue | conditional_block |
settings_class.py | .util import DataStore, ParseError
from npc.util.functions import merge_data_dicts, prepend_namespace
from .tags import make_deprecated_tag_specs
from .helpers import quiet_parse
from .systems import System
import logging
logger = logging.getLogger(__name__)
class Settings(DataStore):
"""Core settings class
On init, it loads the default settings, followed by settings in the personal_dir. The campaign_dir is saved
for later use.
Settings are stored in yaml files.
"""
def __init__(self, personal_dir: Path = None):
super().__init__()
if(personal_dir is None):
personal_dir = Path('~/.config/npc/').expanduser()
self.personal_dir: Path = personal_dir
self.campaign_dir: Path = None
self.install_base = resources.files("npc")
self.default_settings_path = self.install_base / "settings"
self.versions = {
"package": npc_version,
}
self.loaded_paths = {
"package": None,
}
# load defaults and user prefs
self.refresh()
def refresh(self) -> None:
"""
Clear internal data, and refresh the default and personal settings files
"""
self.data = {}
self.load_settings_file(self.default_settings_path / "settings.yaml", file_key="internal")
self.load_systems(self.default_settings_path / "systems")
self.load_settings_file(self.personal_dir / "settings.yaml", file_key="user")
self.load_systems(self.personal_dir / "systems")
def load_settings_file(self, settings_file: Path, namespace: str = None, *, file_key: str = None) -> None:
| if file_key:
file_version = loaded.get("npc", {}).pop("version", None)
self.versions[file_key] = file_version
self.loaded_paths[file_key] = settings_file
self.merge_data(loaded, namespace)
def load_systems(self, systems_dir: Path) -> None:
"""Parse and load all system configs in systems_dir
Finds all yaml files in systems_dir and loads them as systems. Special handling allows deep
inheritance, and prevents circular dependencies between systems.
Args:
systems_dir (Path): Dir to check for system config files
"""
system_settings:list = systems_dir.glob("*.yaml")
dependencies = defaultdict(list)
for settings_file in system_settings:
loaded = quiet_parse(settings_file)
if loaded is None:
continue
system_name = next(iter(loaded))
loaded_contents = loaded[system_name]
if "extends" in loaded_contents:
dependencies[loaded_contents["extends"]].append(loaded)
continue
self.merge_data(loaded, namespace="npc.systems")
def load_dependencies(deps: dict):
"""Handle dependency loading
Unrecognized parents are stored away for the next iteration. Otherwise, children are merged with
their parent's attributes, then merged into self.
If the dependencies do not change for one iteration, then the remaining systems cannot be loaded
and are skipped.
Args:
deps (dict): Dict mapping parent system keys to child system configs
"""
new_deps = {}
for parent_name, children in deps.items():
if parent_name not in self.get("npc.systems"):
new_deps[parent_name] = children
continue
for child in children:
child_name = next(iter(child))
parent_conf = dict(self.get(f"npc.systems.{parent_name}"))
combined = merge_data_dicts(child[child_name], parent_conf)
self.merge_data(combined, namespace=f"npc.systems.{child_name}")
if not new_deps:
return
if new_deps == deps:
logger.error(f"Some systems could not be found: {deps.keys()}")
return
load_dependencies(new_deps)
load_dependencies(dependencies)
def load_types(self, types_dir: Path, *, system_key: str, namespace_root: str = "npc") -> None:
"""Load type definitions from a path for a given game system
Parses and stores type definitions found in types_dir. All yaml files in that dir are assumed to be
type defs. Files immediately in the dir are parsed first, then a subdir matching the given system key
is checked.
Parsed definitions are put into the "x.types.system" namespace. The root of this namespace is
determined by the namespace_root passed, and the system component uses the system key provided.
The sheet_path property is handled specially. If it's present in a type's yaml, then that value is
used. If not, a file whose name matches the type key is assumed to be the correct sheet contents file.
Args:
types_dir (Path): Path to look in for type definitions
system_key (str): Key of the game system these types are for
namespace_root (str): [description] (default: `"npc"`)
"""
def process_types_dir(search_dir: Path) -> None:
"""Load yaml files, expand sheet paths, handle implied sheets
This internal helper method scans all the files in search_dir and tries to load them by their type:
* yaml files are treated as type definitions and parsed. If they have a sheet_path property, it is
expanded into a fully qualified Path for later use
* All other files are set aside for later. After the types have been loaded, the base names of the
remaining files are compared against the loaded type keys within our current namespace. Any that
match are treated as the implicit sheet file for that type, and their Path is saved to the
type's sheet_path property.
Args:
search_dir (Path): Directory to search for type and sheet files
"""
discovered_sheets: dict = {}
for type_path in search_dir.glob("*.*"):
if type_path.suffix != ".yaml":
type_key: str = type_path.stem
discovered_sheets[type_key] = type_path
continue
typedef: dict = quiet_parse(type_path)
try:
type_key: str = next(iter(typedef))
except TypeError:
raise ParseError("Missing top-level key for type config", type_path)
if typedef[type_key].get("sheet_path"):
sheet_path = Path(typedef[type_key].get("sheet_path"))
if sheet_path.is_absolute():
typedef[type_key]["sheet_path"] = sheet_path.resolve()
else:
typedef[type_key]["sheet_path"] = search_dir.joinpath(sheet_path).resolve()
self.merge_data(typedef, types_namespace)
for type_key, sheet_path in discovered_sheets.items():
if type_key not in self.get(types_namespace, {}):
logger.info(f"Type {type_key} not defined, skipping potential sheet {sheet_path}")
continue
if "sheet_path" not in self.get(f"{types_namespace}.{type_key}"):
self.merge_data({type_key: {"sheet_path": sheet_path}}, types_namespace)
types_namespace: str = f"{namespace_root}.types.{system_key}"
process_types_dir(types_dir)
if self.get(f"npc.systems.{system_key}.extends"):
process_types_dir(types_dir / self.get(f"npc.systems.{system_key}.extends"))
process_types_dir(types_dir / system_key)
def get_system_keys(self) -> list[str]:
"""Get a list of valid system keys
This method only considers systems in the npc namespace.
Returns:
list[str]: List of system keys
"""
return self.get("npc.systems").keys()
def get_system(self, key: str) -> System:
"""Get a system object for the given system key
Creates a System object using the definition from the given key. If the key does not have a
definition, returns None.
Args:
key (str): System key name to use
Returns:
System: System object for the given key, or None if the key does not have a system def
"""
if key not in self.get("npc.systems"):
logger.error(f"System '{key}' is not defined")
return None
return System(key, self)
@cached_property
def deprecated_tags(self) -> dict:
"""Get the deprecated tag definitions
These specs describe tags that should no longer be used at all, due to changes in the way that NPC
works.
Returns:
dict: Dict of deprecated tag info, indexed by tag name
"""
return make_deprecated_tag_specs(self.get("npc.deprecated_tags", {}))
@property
def required_dirs(self) -> list:
"""Get the list of required campaign directories
This includes the dirs for character, session, and plot files, relative to self.campaign_dir
Returns:
list: List of required directory names
"""
return [
self.get("campaign.characters.path"),
self.get("campaign.session | """Open, parse, and merge settings from another file
This is the primary way to load more settings info. Passing in a file path that does not exist will
result in a log message and no error, since all setting files are technically optional.
The file_key for any given file should be unique. These are the keys in use right now:
* internal
* user
* campaign
Args:
settings_file (Path): The file to load
namespace (str): Optional namespace to use for new_data
file_key (str): Key to use when storing the file's stated npc version and path
"""
loaded: dict = quiet_parse(settings_file)
if loaded is None:
return
| identifier_body |
index.js | = {
orderInfo: '{\"busi_partner\":\"101001\",\"dt_order\":\"20170303105801\",\"money_order\":\"0.01\",\"no_order\":\"20170303105753974138\",\"notify_url\":\"http://114.80.125.5:8181/payment/lianlian/tradeNotifyUrl.action\",\"oid_partner\":\"201408071000001546\",\"risk_item\":\"{\\\"delivery_cycle\\\":\\\"other\\\",\\\"frms_ware_category\\\":\\\"4006\\\",\\\"logistics_mode\\\":\\\"2\\\",\\\"user_info_mercht_userno\\\":147281734}\",\"sign\":\"76395a6b69c8b00ed8f10a98ddd50fa5\",\"sign_type\":\"MD5\",\"user_id\":\"147281734\"}'
}
jsb("linkPay", option, param, cf);
}
var yiQianBaoPay = function(option, cf) {
var param = {
orderInfo: '{"cashierType":"0","protocolInfo":{"orderId":"1703010000460645"},"resultDisplayLevel":"N"}'
}
jsb("yiQianBaoPay", option, param, cf);
}
var bestPay = function(option, cf) {
var param = {
orderInfo: 'SERVICE=mobile.security.pay&MERCHANTID=02440103010150900&MERCHANTPWD=768231&SUBMERCHANTID=&BACKMERCHANTURL=http://114.80.125.5:8181/payment/yizhifu/appTradeNotifyYiZhiFu.action&ORDERSEQ=20170301164415605369&ORDERREQTRANSEQ=20170301164415605369&ORDERTIME=20170301164416&ORDERVALIDITYTIME=&CURTYPE=RMB&ORDERAMOUNT=29.0&SUBJECT=纯支付&PRODUCTID=04&PRODUCTDESC=APP翼支付&CUSTOMERID=20170301164415605369&SWTICHACC=false&SIGN=EDE379054221B625D9A6C772955EEE8D&PRODUCTAMOUNT=29.0&ATTACHAMOUNT=0.00&ATTACH=77&ACCOUNTID=&USERIP=10.6.30.85&BUSITYPE=04&EXTERNTOKEN=NO&SIGNTYPE=MD5'
}
jsb("bestPay", option, param, cf);
}
var share = function(option, cf) {
var param = {
"title": "中药", //分享的标题
"content": "可以用来养生", //分享的内容
"imgUrl": "https://eaifjfe.jpg", //分享的小图标url
"shareUrl": "https://www.baidu.com", //分享落地链接
//wxfriend:微信好友 wxzone:微信朋友圈 qqfriend:qq好友 qqzone:qq空间
//weibo:微博 sms:短信分享
//按传值顺序传几个显示几个
shareTypes: [
"wxfriend",
"qqfriend",
"weibo"
],
}
jsb("share", option, param, cf);
}
var pickImage = function(option, cf) {
var param = {
pickType: 0, //pickType 0:让用户选择拍照或选取图片 1:拍照 2:图片
allowEdit: 0, //0:读取图片后不允许用户编辑 1:允许
uploadUrl: "https://eaifjfe.com", //网路上传地址
}
jsb("pickImage", option, param, cf);
}
var forbidPanBack = function(option, cf) {
var param = {
forbidPanBack: true,
}
jsb("forbidPanBack", option, param, cf);
}
var networkReq = function(option, cf) {
var param = {
requestUrl: "https://mobi.fangkuaiyi.com", //shceme和host
param: {
"content": "东西用的不错",
"title": "还会再买"
}
}
jsb("networkReq", option, param, cf);
}
var hideNavigation = function(option, cf) {
var param = {
isHide: true
}
jsb("hideNavigation", option, param, cf);
}
var setupNavigation = function(option, cf,f1,f2) {
var param = {
left: {
hasBack: true,
hasShutdown: true,
callid: 9999
},
isShow:false, //是否显示原生导航,默认false
middle: {
title: ""
},
right: //最多有两个数据
[{
menuType: 0,
imgUrl: "",
buttonName: "",
callid: 9998
}]
}
jsb("setupNavigation", option, param, cf,f1,f2);
}
var getHistoryData = function(option, cf) {
jsb("getHistoryData", option, {}, cf);
}
var getAppCookie = function(option, cf) {
jsb("getAppCookie", option, {}, cf);
}
var scanning = function(option, cf) {
jsb("scanning", option, {}, cf);
}
var setProvince = function(option, cf) {
var param = {
provinceName: '上海'
}
jsb("setProvince", option, param, cf);
}
var synCartNumStatus = function(option, cf) {
var param = {
cartnum: 0
}
jsb("synCartNumStatus", option, param, cf);
}
var autoLogin = function(option, cf) {
jsb("autoLogin", option, {}, cf);
}
var updateAppStorage = function(option, cf) {
var param = {
key: "",
value: "",
isPersistence:false
}
jsb("updateAppStorage", option, param, cf);
}
var queryAppStorage = function(option, cf) {
var param = {
key: ""
}
jsb("queryAppStorage", option, param, cf);
}
var removeAllAppStorage = function(option, cf) {
jsb("removeAllAppStorage", option, {}, cf);
}
var hideTabbar = function(option, cf) {
var param = {
hide: true //true:隐藏 false:显示
}
jsb("hideTabbar", option, param, cf);
}
var goPay = function(option, cf) {
var param = {
topage: "bank",
animation: 0,
fixPage: false, //true:去特定页面 false:正常推出一个页面 默认false
params: {
paymentId: "AE233", //支付方式
isPOSOK: true, //是否支持pos机
isYiKaTongOK: true, //是否支持一卡通
isHuoDaoFuKuanOK: true, //是否支持货到付款
orderID: "883738733", //订单id,一般传nil
userCoin: "0" //支付金额,一般传0
}
}
jsb("forward", option, param, cf);
}
var synDemandNumStatus = function(option, cf) {
var param = {
demandnum: 0
};
jsb("synDemandNumStatus", option, param, cf);
}
var hideLoading = function(option, cf) {
jsb("hideLoading", option, {}, cf);
}
function init() {
//setupNavigation();
//hideNavigation();
}
function callbackListFunction(callid, cf) {
callBackList[callid] = cf || new Function();
}
function isApp() {
return "Browser" in window;
}
function nativeCallback(result) {
var res = "";
if(!navigator.userAgent.match(/Android/i) ? true : false){
res = result
}else{
res= JSON.parse(result);
}
callBackList[res.callid * 1](res);
}
var jsBridge = {
callback: nativeCallback,
toast: toast,
share: share,
toNativedetail: toNativedetail,
openWebView: openWebView,
back: back,
shutDown: shutDown,
isLogin: isLogin,
g | etNet | identifier_name |
|
index.js | 8350950",
noncestr: "1hl3lw1k1cC7jGAw",
partnerid: "1221847901",
prepayid: "wx20170301144909d8287b83680576459167",
package: "Sign=WXPay"
}
jsb("wxPay", option, param, cf);
}
var aliPay = function(option, cf) {
var param = {
orderInfo: 'partner="2088501903418573"&seller_id="[email protected]"&out_trade_no="20170301164415605369"&subject="1药网订单"&body="待结算"&total_fee="29.0"¬ify_url="http://114.80.125.5:8181/payment/alipay/appTradeNotifyTwo.action"&service="mobile.securitypay.pay"&_input_charset="utf-8"&payment_type="1"&sign_type="RSA"&sign="fXl0L5Oa6AuUixi138%2F4qWBcBGPU4IAZT6T95x6jjfkwgUh5IsBYIJhjhrnAn2bZQlolA470oZMPYkB0D1gdIXtAg8cGQcBJplvE%2FpUbB%2B2xd8QYuM2w%2Fa2ljVV%2FW1RX3NOmm838%2FjkgNk2Dkl%2FANCTTPzGJBOagh4ESQWuPMAo%3D"'
}
jsb("aliPay", option, param, cf);
}
var linkPay = function(option, cf) {
var param = {
orderInfo: '{\"busi_partner\":\"101001\",\"dt_order\":\"20170303105801\",\"money_order\":\"0.01\",\"no_order\":\"20170303105753974138\",\"notify_url\":\"http://114.80.125.5:8181/payment/lianlian/tradeNotifyUrl.action\",\"oid_partner\":\"201408071000001546\",\"risk_item\":\"{\\\"delivery_cycle\\\":\\\"other\\\",\\\"frms_ware_category\\\":\\\"4006\\\",\\\"logistics_mode\\\":\\\"2\\\",\\\"user_info_mercht_userno\\\":147281734}\",\"sign\":\"76395a6b69c8b00ed8f10a98ddd50fa5\",\"sign_type\":\"MD5\",\"user_id\":\"147281734\"}'
}
jsb("linkPay", option, param, cf);
}
var yiQianBaoPay = function(option, cf) {
var param = {
orderInfo: '{"cashierType":"0","protocolInfo":{"orderId":"1703010000460645"},"resultDisplayLevel":"N"}'
}
jsb("yiQianBaoPay", option, param, cf);
}
var bestPay = function(option, cf) {
var param = {
orderInfo: 'SERVICE=mobile.security.pay&MERCHANTID=02440103010150900&MERCHANTPWD=768231&SUBMERCHANTID=&BACKMERCHANTURL=http://114.80.125.5:8181/payment/yizhifu/appTradeNotifyYiZhiFu.action&ORDERSEQ=20170301164415605369&ORDERREQTRANSEQ=20170301164415605369&ORDERTIME=20170301164416&ORDERVALIDITYTIME=&CURTYPE=RMB&ORDERAMOUNT=29.0&SUBJECT=纯支付&PRODUCTID=04&PRODUCTDESC=APP翼支付&CUSTOMERID=20170301164415605369&SWTICHACC=false&SIGN=EDE379054221B625D9A6C772955EEE8D&PRODUCTAMOUNT=29.0&ATTACHAMOUNT=0.00&ATTACH=77&ACCOUNTID=&USERIP=10.6.30.85&BUSITYPE=04&EXTERNTOKEN=NO&SIGNTYPE=MD5'
}
jsb("bestPay", option, param, cf);
}
var share = function(option, cf) {
var param = {
"title": "中药", //分享的标题
"content": "可以用来养生", //分享的内容
"imgUrl": "https://eaifjfe.jpg", //分享的小图标url
"shareUrl": "https://www.baidu.com", //分享落地链接
//wxfriend:微信好友 wxzone:微信朋友圈 qqfriend:qq好友 qqzone:qq空间
//weibo:微博 sms:短信分享
//按传值顺序传几个显示几个
shareTypes: [
"wxfriend",
"qqfriend",
"weibo"
],
}
jsb("share", option, param, cf);
}
var pickImage = function(option, cf) {
var param = {
pickType: 0, //pickType 0:让用户选择拍照或选取图片 1:拍照 2:图片
allowEdit: 0, //0:读取图片后不允许用户编辑 1:允许
uploadUrl: "https://eaifjfe.com", //网路上传地址
}
jsb("pickImage", option, param, cf);
}
var forbidPanBack = function(option, cf) {
var param = {
forbidPanBack: true,
}
jsb("forbidPanBack", option, param, cf);
}
var networkReq = function(option, cf) {
var param = {
requestUrl: "https://mobi.fangkuaiyi.com", //shceme和host
param: {
"content": "东西用的不错",
"title": "还会再买"
}
}
jsb("networkReq", option, param, cf);
}
var hideNavigation = function(option, cf) {
var param = {
isHide: true
}
jsb("hideNavigation", option, param, cf);
}
var setupNavigation = function(option, cf,f1,f2) {
var param = {
left: {
hasBack: true,
hasShutdown: true,
callid: 9999
},
isShow:false, //是否显示原生导航,默认false
middle: {
title: ""
},
right: //最多有两个数据
[{
menuType: 0,
imgUrl: "",
buttonName: "",
callid: 9998
}]
}
jsb("setupNavigation", option, param, cf,f1,f2);
}
var getHistoryData = function(option, cf) {
jsb("getHistoryData", option, {}, cf);
}
var getAppCookie = function(option, cf) {
jsb("getAppCookie", option, {}, cf);
}
var scanning = function(option, cf) {
jsb("scanning", option, {}, cf);
}
var setProvince = function(option, cf) {
var param = {
provinceName: '上海'
}
jsb("setProvince", option, param, cf);
}
var synCartNumStatus = function(option, cf) {
var param = {
cartnum: 0
}
jsb("synCartNumStatus", option, param, cf);
}
var autoLogin = function(option, cf) {
jsb("autoLogin", option, {}, cf);
}
var updateAppStorage = function(option, cf) {
var param = {
key: "",
value: "",
isPersistence:false
}
jsb("updateAppStorage", option, param, cf);
}
var queryAppStorage = function(option, cf) {
var param = {
key: ""
}
jsb("queryAppStorage", option, param, cf);
}
var removeAllAppStorage = function(option, cf) {
jsb("removeAllAppStorage", option, {}, cf);
}
var hideTabbar = function(option, cf) {
var param = {
hide: true //true:隐藏 false:显示 | } | random_line_split |
|
index.js | 20170301164415605369&ORDERREQTRANSEQ=20170301164415605369&ORDERTIME=20170301164416&ORDERVALIDITYTIME=&CURTYPE=RMB&ORDERAMOUNT=29.0&SUBJECT=纯支付&PRODUCTID=04&PRODUCTDESC=APP翼支付&CUSTOMERID=20170301164415605369&SWTICHACC=false&SIGN=EDE379054221B625D9A6C772955EEE8D&PRODUCTAMOUNT=29.0&ATTACHAMOUNT=0.00&ATTACH=77&ACCOUNTID=&USERIP=10.6.30.85&BUSITYPE=04&EXTERNTOKEN=NO&SIGNTYPE=MD5'
}
jsb("bestPay", option, param, cf);
}
var share = function(option, cf) {
var param = {
"title": "中药", //分享的标题
"content": "可以用来养生", //分享的内容
"imgUrl": "https://eaifjfe.jpg", //分享的小图标url
"shareUrl": "https://www.baidu.com", //分享落地链接
//wxfriend:微信好友 wxzone:微信朋友圈 qqfriend:qq好友 qqzone:qq空间
//weibo:微博 sms:短信分享
//按传值顺序传几个显示几个
shareTypes: [
"wxfriend",
"qqfriend",
"weibo"
],
}
jsb("share", option, param, cf);
}
var pickImage = function(option, cf) {
var param = {
pickType: 0, //pickType 0:让用户选择拍照或选取图片 1:拍照 2:图片
allowEdit: 0, //0:读取图片后不允许用户编辑 1:允许
uploadUrl: "https://eaifjfe.com", //网路上传地址
}
jsb("pickImage", option, param, cf);
}
var forbidPanBack = function(option, cf) {
var param = {
forbidPanBack: true,
}
jsb("forbidPanBack", option, param, cf);
}
var networkReq = function(option, cf) {
var param = {
requestUrl: "https://mobi.fangkuaiyi.com", //shceme和host
param: {
"content": "东西用的不错",
"title": "还会再买"
}
}
jsb("networkReq", option, param, cf);
}
var hideNavigation = function(option, cf) {
var param = {
isHide: true
}
jsb("hideNavigation", option, param, cf);
}
var setupNavigation = function(option, cf,f1,f2) {
var param = {
left: {
hasBack: true,
hasShutdown: true,
callid: 9999
},
isShow:false, //是否显示原生导航,默认false
middle: {
title: ""
},
right: //最多有两个数据
[{
menuType: 0,
imgUrl: "",
buttonName: "",
callid: 9998
}]
}
jsb("setupNavigation", option, param, cf,f1,f2);
}
var getHistoryData = function(option, cf) {
jsb("getHistoryData", option, {}, cf);
}
var getAppCookie = function(option, cf) {
jsb("getAppCookie", option, {}, cf);
}
var scanning = function(option, cf) {
jsb("scanning", option, {}, cf);
}
var setProvince = function(option, cf) {
var param = {
provinceName: '上海'
}
jsb("setProvince", option, param, cf);
}
var synCartNumStatus = function(option, cf) {
var param = {
cartnum: 0
}
jsb("synCartNumStatus", option, param, cf);
}
var autoLogin = function(option, cf) {
jsb("autoLogin", option, {}, cf);
}
var updateAppStorage = function(option, cf) {
var param = {
key: "",
value: "",
isPersistence:false
}
jsb("updateAppStorage", option, param, cf);
}
var queryAppStorage = function(option, cf) {
var param = {
key: ""
}
jsb("queryAppStorage", option, param, cf);
}
var removeAllAppStorage = function(option, cf) {
jsb("removeAllAppStorage", option, {}, cf);
}
var hideTabbar = function(option, cf) {
var param = {
hide: true //true:隐藏 false:显示
}
jsb("hideTabbar", option, param, cf);
}
var goPay = function(option, cf) {
var param = {
topage: "bank",
animation: 0,
fixPage: false, //true:去特定页面 false:正常推出一个页面 默认false
params: {
paymentId: "AE233", //支付方式
isPOSOK: true, //是否支持pos机
isYiKaTongOK: true, //是否支持一卡通
isHuoDaoFuKuanOK: true, //是否支持货到付款
orderID: "883738733", //订单id,一般传nil
userCoin: "0" //支付金额,一般传0
}
}
jsb("forward", option, param, cf);
}
var synDemandNumStatus = function(option, cf) {
var param = {
demandnum: 0
};
jsb("synDemandNumStatus", option, param, cf);
}
var hideLoading = function(option, cf) {
jsb("hideLoading", option, {}, cf);
}
function init() {
//setupNavigation();
//hideNavigation();
}
function callbackListFunction(callid, cf) {
callBackList[callid] = cf || new Function();
}
function isApp() {
return "Browser" in window;
}
function nativeCallback(result) {
var res = "";
if(!navigator.userAgent.match(/Android/i) ? true : false){
res = result
}else{
res= JSON.parse(result);
}
callBackList[res.callid * 1](res);
}
var jsBridge = {
callback: nativeCallback,
toast: toast,
share: share,
toNativedetail: toNativedetail,
openWebView: openWebView,
back: back,
shutDown: shutDown,
isLogin: isLogin,
getNetworkStatus: getNetworkStatus,
getUserInfo: getUserInfo,
getLocationInfo: getLocationInfo,
getAppInfo: getAppInfo,
wxPay: wxPay,
aliPay: aliPay,
linkPay: linkPay,
yiQianBaoPay: yiQianBaoPay,
bestPay: bestPay,
pickImage: pickImage,
forbidPanBack: forbidPanBack,
networkReq: networkReq,
hideNavigation: hideNavigation,
setupNavigation: setupNavigation,
getHistoryData: getHistoryData,
goPay: goPay,
getAppCookie: getAppCookie,
scanning: scanning,
setProvince: setProvince,
synCartNumStatus: synCartNumStatus,
synDemandNumStatus: synDemandNumStatus,
autoLogin: autoLogin,
updateAppStorage: updateAppStorage,
queryAppStorage: queryAppStorage,
removeAllAppStorage: removeAllAppStorage,
hideTabbar: hideTabbar,
hideLoading:hideLoading,
init: init
};
function log(name, param) {
if(baseConfig.debuger) {
console.log("调用的方法是--->" + name);
console.log("调用的参数是 --->" + JSON.stringify(param));
}
/* layer.open({
content: msg,
time: 2
});*/
}
if(typeof define === 'function' && define.amd) {
// AMD
define(jsBridge);
} else {
window.jsBridge = jsBridge;
}
})(window);
function networkStatusChange(o) {
console.log("网络变化了" + JSON.stringify(o))
}
function userStatusChange(o) {
console.log("登录状态改变了" + | JSON.stringify(o))
}
function fillweb(o) {
console.log("通知网页取值" + JSON.stringify(o))
}
function webchange() {
}
function nativeBack() {
console.log("我从native界面返回了 刷新不刷新 看你自己的业务逻辑吧")
}
function callba | identifier_body |
|
read_state.rs | {
/// A read operation which advances the buffer, and reads from it.
ConsumingRead,
/// A read operation which reads from the buffer but doesn't advance it.
PeekingRead,
}
impl Default for ReadIsPeek {
fn default() -> Self {
ReadIsPeek::ConsumingRead
}
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
| ReadIsPeek | identifier_name |
|
read_state.rs | start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self.rewritten_bytes` is unpredictable relative to
// `start_rewriting_index`.
//
// Instead, we'll use all of `self.rewritten_bytes`, and then append onto that all of the
// bytes that were just rewritten, and potentially had `stream_change_data` operations
// applied to them. This new buffer might be larger than the user-provided buffer.
//
// For consuming reads, we'll save all the newly-rewritten bytes that don't fit in the
// user-provided buffer in `self.rewritten_bytes`.
//
// For peeking reads, we'll save all the rewritten-and-`stream_change_data`-applied bytes
// in `self.rewritten_bytes`.
let just_rewritten_bytes = &self.buffer[start_rewriting_index..];
self.output_buffer.extend_from_slice(&self.rewritten_bytes);
self.output_buffer.extend_from_slice(just_rewritten_bytes);
// Note that we're using `input_buffer_size` here rather than `bytes_read`. If the OS returns
// less data than we are able to store in the user's buffer, then take advantage of that.
let output_size = self.output_buffer.len().min(input_buffer_size);
stallone::debug!(
"Preparing rewrite_readv result",
bytes_read: usize = bytes_read,
input_buffer_size: usize = input_buffer_size,
rewritten_bytes_len: usize = self.rewritten_bytes.len(),
just_rewritten_bytes_len: usize = just_rewritten_bytes.len(),
output_buffer_len: usize = self.output_buffer.len(),
output_size: usize = output_size,
);
match read_is_peek {
ReadIsPeek::ConsumingRead => {
// For a consuming read, get rid of all the previously-rewritten bytes that are
// about to be copied into `self.buffer`.
let rewritten_bytes_used = self.rewritten_bytes.len().min(output_size);
if rewritten_bytes_used > 0 {
stallone::debug!(
"Dropping previously-rewritten bytes that have been consumed",
rewritten_bytes_used: usize = rewritten_bytes_used,
);
}
std::mem::drop(self.rewritten_bytes.drain(..rewritten_bytes_used));
// Find the just-rewritten bytes that won't be returned to the user, and that we
// need to save. If we didn't rewrite anything, then of course this is empty. If we
// did some rewriting, then the `output_size` index splits `self.output_buffer` into two
// parts: the part we'll return to the user, and the part we need to save.
let just_rewritten_bytes_to_save = if just_rewritten_bytes.is_empty() {
&[]
} else {
&self.output_buffer[output_size..]
};
if !just_rewritten_bytes_to_save.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that don't fit in user buffer",
num_just_rewritten_bytes_to_save: usize =
just_rewritten_bytes_to_save.len(),
);
}
// Save all the just-rewritten bytes that won't fit in the user-provided
// buffer.
self.rewritten_bytes
.extend_from_slice(just_rewritten_bytes_to_save);
}
ReadIsPeek::PeekingRead => { | if !just_rewritten_bytes.is_empty() { | random_line_split |
|
read_state.rs |
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_ | {
ReadIsPeek::ConsumingRead
} | identifier_body |
|
read_state.rs | This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte |
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self | {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
} | conditional_block |
init.ts | {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-react-component',
},
'react-block': {
name: locale.LABEL_BLOCK,
template: '@arco-design/arco-template-react-block',
},
'react-page': {
name: locale.LABEL_PAGE,
template: '@arco-design/arco-template-react-page',
},
'react-library': {
name: locale.LABEL_LIBRARY,
template: '@arco-design/arco-template-react-library',
},
'react-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-react-monorepo',
},
'vue-component': {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-vue-component',
},
'vue-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-vue-monorepo',
},
'arco-design-pro': {
name: locale.LABEL_ARCO_PRO,
template: '@arco-design/arco-template-arco-design-pro',
templateSimple: '@arco-design/arco-template-arco-design-pro-simple',
},
};
const TYPES_MATERIAL = ['react-component', 'react-block', 'react-page'];
const TYPES_FOR_REACT = [
'react-component',
'react-block',
'react-page',
'react-library',
'react-monorepo',
'arco-design-pro',
];
const TYPES_FOR_VUE = ['vue-component', 'vue-monorepo'];
// Templates for Monorepo
const VALID_TYPES_IN_MONOREPO = [
'react-component',
'react-block',
'react-page',
'react-library',
'vue-component',
];
const CATEGORIES_COMPONENT = [
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'其他',
];
const CATEGORIES_BLOCK = [
'基础模版',
'官网模版',
'注册登陆',
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'可视化',
'其他',
];
const CATEGORIES_PAGE = CATEGORIES_BLOCK;
/**
* Init project
*/
export default async function ({
projectName,
template,
metaFileName = 'arcoMeta',
configFileName,
isForMonorepo = false,
isPureProject = false,
}: ProjectInitOptions) {
if (!projectName) {
print.error(`\n${locale.ERROR_NO_PROJECT_NAME}\n`);
process.exit(0);
}
if (!isGitStatusClean()) {
print.error(`\n${locale.ERROR_GIT_DIRTY}\n`);
process.exit(0);
}
// project init path
const root = path.resolve(projectName);
if (
!(await confirm(
() => fs.pathExistsSync(root),
`${locale.WARN_PATH_EXIST} ${chalk.yellow(root)}`
))
) {
process.exit(0);
}
const getCreateProjectOptions = async (): Promise<Partial<CreateProjectOptions>> => {
// Create a pure project
if (isPureProject) {
return getPureProjectConfig();
}
// When the user specifies a template
if (template) {
let metaInTemplate: { [key: string]: any } = {};
try {
let packageJson = null;
if (template.startsWith('file:')) {
packageJson = fs.readJsonSync(`${template.replace(/^file:/, '')}/package.json`);
} else {
const hostUnpkg = getGlobalInfo().host.unpkg;
const { data } = await axios.get(`${hostUnpkg}/${template}/template/package.json`);
packageJson = data;
}
metaInTemplate = packageJson && packageJson[metaFileName];
} catch (e) {}
let materialType = metaInTemplate?.type;
if (!materialType) {
materialType = await inquiryMaterialType({
template,
isForMonorepo,
});
}
return getComponentConfig({
type: materialType,
template,
metaFileName,
meta: metaInTemplate,
});
}
const framework = await inquiryFramework(isForMonorepo);
const materialType = await inquiryMaterialType({
framework,
template,
isForMonorepo,
});
switch (materialType) {
case 'arco-design-pro':
return getArcoDesignProConfig();
case 'react-monorepo':
return {
template: MATERIAL_TYPE_MAP['react-monorepo'].template,
};
case 'vue-monorepo':
return {
template: MATERIAL_TYPE_MAP['vue-monorepo'].template,
};
default:
return getComponentConfig({
type: materialType,
template,
metaFileName,
framework,
});
}
};
const extraOptions = await getCreateProjectOptions();
return createProjectFromTemplate({
root,
template,
projectName,
isForMonorepo,
beforeGitCommit: () => {
// Create arco.config.js
if (!isPureProject && !isForMonorepo) {
initConfig(configFileName);
}
},
...extraOptions,
}).catch((err) => {
print.error(err);
throw err;
});
}
/**
* Ask the type of material to be created
*/
async function inquiryMaterialType({
framework = 'react',
template,
isForMonorepo,
}: {
framework?: FrameworkType;
template: string;
isForMonorepo: boolean;
}): Promise<string> {
print.info(`\n${locale.TIP_INFO_ABOUT_TEMPLATE}\n`);
const { type } = await inquirer.prompt([
{
type: |
/**
* Ask the meta information of the material
*/
async function inquiryMaterialMeta(meta: { [key: string]: any })
: Promise<{
name: string;
title: string;
description: string;
version: string;
category?: string[];
}> {
let pkgNamePrefix = 'rc';
let categories = [];
switch (meta.type) {
case 'vue-component':
pkgNamePrefix = 'vc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-component':
pkgNamePrefix = 'rc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-block':
pkgNamePrefix = 'rb';
categories = CATEGORIES_PAGE;
break;
case 'react-page':
pkgNamePrefix = 'rp';
break;
default:
break;
}
return inquirer.prompt([
{
type: 'input',
name: 'name',
message: locale.TIP_INPUT_PACKAGE_NAME,
default: meta.name || `@arco-design/${pkgNamePrefix}-xxx`,
},
{
type: 'input',
name: 'title',
message: locale.TIP_INPUT_TITLE,
default: meta.title || '',
},
{
type: 'input',
name: 'description',
message: locale.TIP_INPUT_DESCRIPTION,
default: meta.description || '',
},
{
type: 'input',
name: 'version',
message: locale.TIP_INPUT_VERSION,
default: '0.1.0',
},
{
type: 'checkbox',
name: `category`,
message: locale.TIP_SELECT_CATEGORY,
choices: categories,
default: meta.category || categories[categories.length - 1],
when: () => categories.length,
},
]);
}
/**
* Ask framework
*/
async function inquiryFramework(isForMonorepo?: boolean): Promise<FrameworkType> {
let framework: FrameworkType = null;
// Try to get the framework dependencies of the current warehouse
if (isForMonorepo) {
try {
const pathGitRoot = getGitRootPath();
const { dependencies, devDependencies } =
fs.readJsonSync(path.resolve(pathGitRoot, 'package.json')) || {};
if ((dependencies && dependencies.react) || (devDependencies && devDependencies.react)) {
framework = 'react';
} else if ((dependencies && dependencies.vue) || (devDependencies && devDependencies.vue)) {
framework = 'vue';
}
} catch (error) {}
}
if (!framework) {
const answer = await inquirer.prompt({
type: 'list',
name: 'framework',
message: locale.TIP_SELECT_FRAMEWORK,
choices: ['React', 'Vue'],
});
framework = answer.framework.toLowerCase();
}
return framework;
}
/**
* Get the template to create Arco Pro
*/
async function get | 'list',
name: 'type',
message: template ? locale.TIP_SELECT_TYPE_OF_MATERIAL : locale.TIP_SELECT_TYPE_OF_PROJECT,
choices: Object.entries(MATERIAL_TYPE_MAP)
.filter(([key]) => {
if (template) {
return TYPES_MATERIAL.indexOf(key) > -1;
}
return (
(framework === 'react' && TYPES_FOR_REACT.indexOf(key) > -1) ||
(framework === 'vue' && TYPES_FOR_VUE.indexOf(key) > -1)
);
})
.filter(([key]) => !isForMonorepo || VALID_TYPES_IN_MONOREPO.indexOf(key) !== -1)
.map(([key, { name }]) => ({ name, value: key })),
},
]);
return type;
} | identifier_body |
init.ts | {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-react-component',
},
'react-block': {
name: locale.LABEL_BLOCK,
template: '@arco-design/arco-template-react-block',
},
'react-page': {
name: locale.LABEL_PAGE,
template: '@arco-design/arco-template-react-page',
},
'react-library': {
name: locale.LABEL_LIBRARY,
template: '@arco-design/arco-template-react-library',
},
'react-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-react-monorepo',
},
'vue-component': {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-vue-component',
},
'vue-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-vue-monorepo',
},
'arco-design-pro': {
name: locale.LABEL_ARCO_PRO,
template: '@arco-design/arco-template-arco-design-pro',
templateSimple: '@arco-design/arco-template-arco-design-pro-simple',
},
};
const TYPES_MATERIAL = ['react-component', 'react-block', 'react-page'];
const TYPES_FOR_REACT = [
'react-component',
'react-block',
'react-page',
'react-library',
'react-monorepo',
'arco-design-pro',
];
const TYPES_FOR_VUE = ['vue-component', 'vue-monorepo'];
// Templates for Monorepo
const VALID_TYPES_IN_MONOREPO = [
'react-component',
'react-block',
'react-page',
'react-library',
'vue-component',
];
const CATEGORIES_COMPONENT = [
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'其他',
];
const CATEGORIES_BLOCK = [
'基础模版',
'官网模版',
'注册登陆',
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'可视化',
'其他',
];
const CATEGORIES_PAGE = CATEGORIES_BLOCK;
/**
* Init project
*/
export default async function ({
projectName,
template,
metaFileName = 'arcoMeta',
configFileName,
isForMonorepo = false,
isPureProject = false,
}: ProjectInitOptions) {
if (!projectName) {
print.error(`\n${locale.ERROR_NO_PROJECT_NAME}\n`);
process.exit(0);
}
if (!isGitStatusClean()) {
print.error(`\n${locale.ERROR_GIT_DIRTY}\n`);
process.exit(0);
}
// project init path
const root = path.resolve(projectName);
if (
!(await confirm(
() => fs.pathExistsSync(root),
`${locale.WARN_PATH_EXIST} ${chalk.yellow(root)}`
))
) {
process.exit(0);
}
const getCreateProjectOptions = async (): Promise<Partial<CreateProjectOptions>> => {
// Create a pure project
if (isPureProject) {
return getPureProjectConfig();
}
// When the user specifies a template
if (template) {
let metaInTemplate: { [key: string]: any } = {};
try {
let packageJson = null;
if (template.startsWith('file:')) {
packageJson = fs.readJsonSync(`${template.replace(/^file:/, '')}/package.json`);
} else {
const hostUnpkg = getGlobalInfo().host.unpkg;
const { data } = await axios.get(`${hostUnpkg}/${template}/template/package.json`);
packageJson = data;
}
metaInTemplate = packageJson && packageJson[metaFileName];
} catch (e) {}
let materialType = metaInTemplate?.type;
if (!materialType) {
materialType = await inquiryMaterialType({
template,
isForMonorepo,
});
}
return getComponentConfig({
type: materialType,
template,
metaFileName,
meta: metaInTemplate,
});
}
const framework = await inquiryFramework(isForMonorepo);
const materialType = await inquiryMaterialType({
framework,
template,
isForMonorepo,
});
switch (materialType) {
case 'arco-design-pro':
return getArcoDesignProConfig();
case 'react-monorepo':
return {
template: MATERIAL_TYPE_MAP['react-monorepo'].template,
};
case 'vue-monorepo':
return {
template: MATERIAL_TYPE_MAP['vue-monorepo'].template,
};
default:
return getComponentConfig({
type: materialType,
template,
metaFileName,
framework,
});
}
};
const extraOptions = await getCreateProjectOptions();
return createProjectFromTemplate({
root,
template,
projectName,
isForMonorepo,
beforeGitCommit: () => {
// Create arco.config.js
if (!isPureProject && !isForMonorepo) {
initConfig(configFileName);
}
},
...extraOptions,
}).catch((err) => {
print.error(err);
throw err;
});
}
/**
* Ask the type of material to be created
*/
async function inquiryMaterialType({
framework = 'react',
template,
isForMonorepo,
}: {
framework?: FrameworkType;
template: string;
isForMonorepo: boolean;
}): Promise<string> {
print.info(`\n${locale.TIP_INFO_ABOUT_TEMPLATE}\n`);
const { type } = await inquirer.prompt([
{
type: 'list',
name: 'type',
message: template ? locale.TIP_SELECT_TYPE_OF_MATERIAL : locale.TIP_SELECT_TYPE_OF_PROJECT,
choices: Object.entries(MATERIAL_TYPE_MAP)
.filter(([key]) => {
if (template) {
return TYPES_MATERIAL.indexOf(key) > -1;
}
return (
(framework === 'react' && TYPES_FOR_REACT.indexOf(key) > -1) ||
(framework === 'vue' && TYPES_FOR_VUE.indexOf(key) > -1)
);
})
.filter(([key]) => !isForMonorepo || VALID_TYPES_IN_MONOREPO.indexOf(key) !== -1)
.map(([key, { name }]) => ({ name, value: key })),
},
]);
return type;
}
/**
* Ask the meta information of the material
*/
async function inquiryMaterialMeta(meta: { [key: string]: any }): Promise<{
name: string;
title: string;
description: string;
version: string;
category?: string[];
}> {
let pkgNamePrefix = 'rc';
let categories = [];
switch (meta.type) {
case 'vue-component':
pkgNamePrefix = 'vc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-component':
pkgNamePrefix = 'rc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-block':
pkgNamePrefix = 'rb';
categories = CATEGORIES_PAGE;
break;
case 'react-page':
pkgNamePrefix = 'rp';
break;
default:
break;
}
return inquirer.prompt([
{
type: 'input',
name: 'name',
message: locale.TIP_INPUT_PACKAGE_NAME,
default: meta.name || `@arco-design/${pkgNamePrefix}-xxx`,
},
{
type: 'input',
name: 'title',
message: locale.TIP_INPUT_TITLE,
default: meta.title || '',
},
{
type: 'input',
name: 'description',
message: locale.TIP_INPUT_DESCRIPTION,
default: meta.description || '',
},
{
type: 'input',
name: 'version',
message: locale.TIP_INPUT_VERSION,
default: '0.1.0',
},
{
type: 'checkbox',
name: `category`,
message: locale.TIP_SELECT_CATEGORY,
choices: categories,
default: meta.category || categories[categories.length - 1],
when: () => categories.length,
},
]);
}
/**
* Ask framework
*/
async function inquiryFramework(isForMonorepo?: boolean): Promise<FrameworkType> {
let framework: FrameworkType = null;
// Try t | ork dependencies of the current warehouse
if (isForMonorepo) {
try {
const pathGitRoot = getGitRootPath();
const { dependencies, devDependencies } =
fs.readJsonSync(path.resolve(pathGitRoot, 'package.json')) || {};
if ((dependencies && dependencies.react) || (devDependencies && devDependencies.react)) {
framework = 'react';
} else if ((dependencies && dependencies.vue) || (devDependencies && devDependencies.vue)) {
framework = 'vue';
}
} catch (error) {}
}
if (!framework) {
const answer = await inquirer.prompt({
type: 'list',
name: 'framework',
message: locale.TIP_SELECT_FRAMEWORK,
choices: ['React', 'Vue'],
});
framework = answer.framework.toLowerCase();
}
return framework;
}
/**
* Get the template to create Arco Pro
*/
async function | o get the framew | identifier_name |
init.ts | ': {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-react-component',
},
'react-block': {
name: locale.LABEL_BLOCK,
template: '@arco-design/arco-template-react-block',
},
'react-page': {
name: locale.LABEL_PAGE,
template: '@arco-design/arco-template-react-page',
},
'react-library': {
name: locale.LABEL_LIBRARY,
template: '@arco-design/arco-template-react-library',
},
'react-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-react-monorepo',
},
'vue-component': {
name: locale.LABEL_COMPONENT,
template: '@arco-design/arco-template-vue-component',
},
'vue-monorepo': {
name: locale.LABEL_MONOREPO,
template: '@arco-design/arco-template-vue-monorepo',
},
'arco-design-pro': {
name: locale.LABEL_ARCO_PRO,
template: '@arco-design/arco-template-arco-design-pro',
templateSimple: '@arco-design/arco-template-arco-design-pro-simple',
},
};
const TYPES_MATERIAL = ['react-component', 'react-block', 'react-page'];
const TYPES_FOR_REACT = [
'react-component',
'react-block',
'react-page',
'react-library',
'react-monorepo',
'arco-design-pro',
];
const TYPES_FOR_VUE = ['vue-component', 'vue-monorepo'];
// Templates for Monorepo
const VALID_TYPES_IN_MONOREPO = [
'react-component',
'react-block',
'react-page',
'react-library',
'vue-component',
];
const CATEGORIES_COMPONENT = [
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'其他',
];
const CATEGORIES_BLOCK = [
'基础模版',
'官网模版',
'注册登陆',
'数据展示',
'信息展示',
'表格',
'表单',
'筛选',
'弹出框',
'编辑器',
'可视化',
'其他',
];
const CATEGORIES_PAGE = CATEGORIES_BLOCK;
/**
* Init project
*/
export default async function ({
projectName,
template,
metaFileName = 'arcoMeta',
configFileName,
isForMonorepo = false,
isPureProject = false,
}: ProjectInitOptions) {
if (!projectName) {
print.error(`\n${locale.ERROR_NO_PROJECT_NAME}\n`);
process.exit(0);
}
if (!isGitStatusClean()) {
print.error(`\n${locale.ERROR_GIT_DIRTY}\n`);
process.exit(0);
}
// project init path
const root = path.resolve(projectName);
if (
!(await confirm(
() => fs.pathExistsSync(root),
`${locale.WARN_PATH_EXIST} ${chalk.yellow(root)}`
))
) {
process.exit(0);
}
const getCreateProjectOptions = async (): Promise<Partial<CreateProjectOptions>> => {
// Create a pure project
if (isPureProject) {
return getPureProjectConfig();
}
// When the user specifies a template
if (template) {
let metaInTemplate: { [key: string]: any } = {};
try {
let packageJson = null;
if (template.startsWith('file:')) {
packageJson = fs.readJsonSync(`${template.replace(/^file:/, '')}/package.json`);
} else {
const hostUnpkg = getGlobalInfo().host.unpkg;
const { data } = await axios.get(`${hostUnpkg}/${template}/template/package.json`);
packageJson = data;
}
metaInTemplate = packageJson && packageJson[metaFileName];
} catch (e) {}
let materialType = metaInTemplate?.type;
if (!materialType) {
materialType = await inquiryMaterialType({
template,
isForMonorepo,
});
}
return getComponentConfig({
type: materialType,
template,
metaFileName,
meta: metaInTemplate,
});
}
const framework = await inquiryFramework(isForMonorepo);
const materialType = await inquiryMaterialType({
framework,
template,
isForMonorepo,
});
switch (materialType) {
case 'arco-design-pro':
return getArcoDesignProConfig();
case 'react-monorepo':
return {
template: MATERIAL_TYPE_MAP['react-monorepo'].template,
};
case 'vue-monorepo':
return {
template: MATERIAL_TYPE_MAP['vue-monorepo'].template,
};
default:
return getComponentConfig({
type: materialType,
template,
metaFileName,
framework,
});
}
};
const extraOptions = await getCreateProjectOptions();
return createProjectFromTemplate({
root,
template,
projectName,
isForMonorepo,
beforeGitCommit: () => {
// Create arco.config.js
if (!isPureProject && !isForMonorepo) {
initConfig(configFileName);
}
},
...extraOptions,
}).catch((err) => {
print.error(err);
throw err;
});
}
/**
* Ask the type of material to be created
*/
async function inquiryMaterialType({
framework = 'react',
template,
isForMonorepo,
}: {
framework?: FrameworkType;
template: string;
isForMonorepo: boolean;
}): Promise<string> {
print.info(`\n${locale.TIP_INFO_ABOUT_TEMPLATE}\n`);
const { type } = await inquirer.prompt([
{
type: 'list',
name: 'type',
message: template ? locale.TIP_SELECT_TYPE_OF_MATERIAL : locale.TIP_SELECT_TYPE_OF_PROJECT,
choices: Object.entries(MATERIAL_TYPE_MAP)
.filter(([key]) => {
if (template) {
return TYPES_MATERIAL.indexOf(key) > -1;
}
return (
(framework === 'react' && TYPES_FOR_REACT.indexOf(key) > -1) ||
(framework === 'vue' && TYPES_FOR_VUE.indexOf(key) > -1)
);
})
.filter(([key]) => !isForMonorepo || VALID_TYPES_IN_MONOREPO.indexOf(key) !== -1)
.map(([key, { name }]) => ({ name, value: key })),
},
]);
return type;
}
/**
* Ask the meta information of the material
*/
async function inquiryMaterialMeta(meta: { [key: string]: any }): Promise<{
name: string;
title: string;
description: string;
version: string;
category?: string[];
}> {
let pkgNamePrefix = 'rc';
let categories = [];
switch (meta.type) {
case 'vue-component':
pkgNamePrefix = 'vc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-component':
pkgNamePrefix = 'rc';
categories = CATEGORIES_COMPONENT;
break;
case 'react-block':
pkgNamePrefix = 'rb';
categories = CATEGORIES_PAGE;
break;
case 'react-page':
pkgNamePrefix = 'rp';
break;
default:
break;
}
return inquirer.prompt([
{
type: 'input',
name: 'name',
message: locale.TIP_INPUT_PACKAGE_NAME,
default: meta.name || `@arco-design/${pkgNamePrefix}-xxx`,
},
{
type: 'input',
name: 'title',
message: locale.TIP_INPUT_TITLE,
default: meta.title || '',
},
{
type: 'input',
name: 'description',
message: locale.TIP_INPUT_DESCRIPTION,
default: meta.description || '',
},
{
type: 'input',
name: 'version',
message: locale.TIP_INPUT_VERSION,
default: '0.1.0',
},
{
type: 'checkbox',
name: `category`,
message: locale.TIP_SELECT_CATEGORY,
choices: categories,
default: meta.category || categories[categories.length - 1],
when: () => categories.length,
},
]);
}
/**
* Ask framework
*/
async function inquiryFramework(isForMonorepo?: boolean): Promise<FrameworkType> {
let framework: FrameworkType = null;
// Try to get the framework dependencies of the current warehouse
if (isForMonorepo) {
try {
const pathGitRoot = getGitRootPath();
const { dependencies, devDependencies } =
fs.readJsonSync(path.resolve(pathGitRoot, 'package.json')) || {};
if ((dependencies && dependencies.react) || (devDependencies && devDependencies.react)) {
framework = 'react';
} else if ((dependencies && dependencies.vue) || (devDependencies && devDependencies.vue)) {
framework = 'vue';
}
} catch (error) {}
}
if (!framework) {
const answer = await inquirer.prompt({ | framework = answer.framework.toLowerCase();
}
return framework;
}
/**
* Get the template to create Arco Pro
*/
async function getAr | type: 'list',
name: 'framework',
message: locale.TIP_SELECT_FRAMEWORK,
choices: ['React', 'Vue'],
}); | random_line_split |
imdb_lstm_chen.py | izers import SGD, RMSprop, Adagrad
# from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import MaskedLayer, Recurrent
from keras import activations, initializations
from keras.utils.theano_utils import shared_zeros
from keras.datasets import imdb
class LSTMLayer(Recurrent):
| http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Learning to forget: Continual prediction with LSTM
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015
Supervised sequence labelling with recurrent neural networks
http://www.cs.toronto.edu/~graves/preprint.pdf
"""
def __init__(self, input_dim, output_dim=128, train_init_cell=True, train_init_h=True,
init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
input_activation='tanh', gate_activation='hard_sigmoid', output_activation='tanh',
weights=None, truncate_gradient=-1, return_sequences=False):
super(LSTMLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.input_activation = activations.get(input_activation)
self.gate_activation = activations.get(gate_activation)
self.output_activation = activations.get(output_activation)
self.input = T.tensor3()
self.time_range = None
W_z = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_z = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_z = shared_zeros(self.output_dim)
W_i = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_i = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_i = shared_zeros(self.output_dim)
W_f = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_f = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_f = self.forget_bias_init(self.output_dim)
W_o = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_o = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_o = shared_zeros(self.output_dim)
self.h_m1 = shared_zeros(shape=(1, self.output_dim), name='h0')
self.c_m1 = shared_zeros(shape=(1, self.output_dim), name='c0')
W = np.vstack((W_z[np.newaxis, :, :],
W_i[np.newaxis, :, :],
W_f[np.newaxis, :, :],
W_o[np.newaxis, :, :])) # shape = (4, input_dim, output_dim)
R = np.vstack((R_z[np.newaxis, :, :],
R_i[np.newaxis, :, :],
R_f[np.newaxis, :, :],
R_o[np.newaxis, :, :])) # shape = (4, output_dim, output_dim)
self.W = theano.shared(W, name='Input to hidden weights (zifo)', borrow=True)
self.R = theano.shared(R, name='Recurrent weights (zifo)', borrow=True)
self.b = theano.shared(np.zeros(shape=(4, self.output_dim), dtype=theano.config.floatX),
name='bias', borrow=True)
self.params = [self.W, self.R]
if train_init_cell:
self.params.append(self.c_m1)
if train_init_h:
self.params.append(self.h_m1)
if weights is not None:
self.set_weights(weights)
def _step(self,
Y_t, # sequence
h_tm1, c_tm1, # output_info
R): # non_sequence
# h_mask_tm1 = mask_tm1 * h_tm1
# c_mask_tm1 = mask_tm1 * c_tm1
G_tm1 = T.dot(h_tm1, R)
M_t = Y_t + G_tm1
z_t = self.input_activation(M_t[:, 0, :])
ifo_t = self.gate_activation(M_t[:, 1:, :])
i_t = ifo_t[:, 0, :]
f_t = ifo_t[:, 1, :]
o_t = ifo_t[:, 2, :]
# c_t_cndt = f_t * c_tm1 + i_t * z_t
# h_t_cndt = o_t * self.output_activation(c_t_cndt)
c_t = f_t * c_tm1 + i_t * z_t
h_t = o_t * self.output_activation(c_t)
# h_t = mask * h_t_cndt + (1-mask) * h_tm1
# c_t = mask * c_t_cndt + (1-mask) * c_tm1
return h_t, c_t
def get_output(self, train=False):
X = self.get_input(train)
# mask = self.get_padded_shuffled_mask(train, X, pad=0)
mask = self.get_input_mask(train=train)
ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
max_time = T.max(ind)
X = X.dimshuffle((1, 0, 2))
Y = T.dot(X, self.W) + self.b
# h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
c0 = T.repeat(self.c_m1, X.shape[1], axis=0)
[outputs, _], updates = theano.scan(
self._step,
sequences=Y,
outputs_info=[h0, c0],
non_sequences=[self.R], n_steps=max_time,
truncate_gradient=self.truncate_gradient, strict=True,
allow_gc=theano.config.scan.allow_gc)
res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
if self.return_sequences:
return res
#return outputs[-1]
return res[T.arange(mask.shape[0], dtype='int32'), ind]
def set_init_cell_parameter(self, is_param=True):
if is_param:
if self.c_m1 not in self.params:
self.params.append(self.c_m1)
else:
self.params.remove(self.c_m1)
def set_init_h_parameter(self, is_param=True):
if is_param:
if self.h_m1 not in self.params:
self.params.append(self.h_m1)
else:
self.params.remove(self.h_m1)
# def get_time_range(self, train):
# mask = self.get_input_mask(train=train)
# ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
# self.time_range = ind
# return ind
class MeanPooling(MaskedLayer):
"""
Self-defined pooling layer
Global Mean Pooling Layer
"""
def __init__(self, start=1):
super(MeanPooling, self).__init__()
self.start = start
# def supports_masked_input(self):
# return False
def get_output_mask(self, train=False):
return None
def get_output(self, train=False):
data = self.get_input(train=train)
mask = self.get_input_mask(train=train)
mask = mask.dimshuffle((0, 1, 'x'))
return (data[:, self.start:] * mask).mean(axis=1)
def get_config(self):
return {"name": self.__class__.__name__}
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 128
print("Loading data...")
(X_train, | """
Self-defined LSTM layer
optimized version: Not using mask in _step function and tensorized computation.
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
For a step-by-step description of the algorithm, see:
http://deeplearning.net/tutorial/lstm.html
References:
Long short-term memory (original 97 paper) | identifier_body |
imdb_lstm_chen.py | import SGD, RMSprop, Adagrad
# from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import MaskedLayer, Recurrent
from keras import activations, initializations
from keras.utils.theano_utils import shared_zeros
from keras.datasets import imdb
class LSTMLayer(Recurrent):
"""
Self-defined LSTM layer
optimized version: Not using mask in _step function and tensorized computation.
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
For a step-by-step description of the algorithm, see:
http://deeplearning.net/tutorial/lstm.html
References:
Long short-term memory (original 97 paper)
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Learning to forget: Continual prediction with LSTM
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015
Supervised sequence labelling with recurrent neural networks
http://www.cs.toronto.edu/~graves/preprint.pdf
"""
def __init__(self, input_dim, output_dim=128, train_init_cell=True, train_init_h=True,
init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
input_activation='tanh', gate_activation='hard_sigmoid', output_activation='tanh',
weights=None, truncate_gradient=-1, return_sequences=False):
super(LSTMLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.input_activation = activations.get(input_activation)
self.gate_activation = activations.get(gate_activation)
self.output_activation = activations.get(output_activation)
self.input = T.tensor3()
self.time_range = None
W_z = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_z = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_z = shared_zeros(self.output_dim)
W_i = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_i = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_i = shared_zeros(self.output_dim)
W_f = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_f = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_f = self.forget_bias_init(self.output_dim)
W_o = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_o = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_o = shared_zeros(self.output_dim)
self.h_m1 = shared_zeros(shape=(1, self.output_dim), name='h0')
self.c_m1 = shared_zeros(shape=(1, self.output_dim), name='c0')
W = np.vstack((W_z[np.newaxis, :, :],
W_i[np.newaxis, :, :],
W_f[np.newaxis, :, :],
W_o[np.newaxis, :, :])) # shape = (4, input_dim, output_dim)
R = np.vstack((R_z[np.newaxis, :, :],
R_i[np.newaxis, :, :],
R_f[np.newaxis, :, :],
R_o[np.newaxis, :, :])) # shape = (4, output_dim, output_dim)
self.W = theano.shared(W, name='Input to hidden weights (zifo)', borrow=True)
self.R = theano.shared(R, name='Recurrent weights (zifo)', borrow=True)
self.b = theano.shared(np.zeros(shape=(4, self.output_dim), dtype=theano.config.floatX),
name='bias', borrow=True)
self.params = [self.W, self.R]
if train_init_cell:
self.params.append(self.c_m1)
if train_init_h:
self.params.append(self.h_m1)
if weights is not None:
|
def _step(self,
Y_t, # sequence
h_tm1, c_tm1, # output_info
R): # non_sequence
# h_mask_tm1 = mask_tm1 * h_tm1
# c_mask_tm1 = mask_tm1 * c_tm1
G_tm1 = T.dot(h_tm1, R)
M_t = Y_t + G_tm1
z_t = self.input_activation(M_t[:, 0, :])
ifo_t = self.gate_activation(M_t[:, 1:, :])
i_t = ifo_t[:, 0, :]
f_t = ifo_t[:, 1, :]
o_t = ifo_t[:, 2, :]
# c_t_cndt = f_t * c_tm1 + i_t * z_t
# h_t_cndt = o_t * self.output_activation(c_t_cndt)
c_t = f_t * c_tm1 + i_t * z_t
h_t = o_t * self.output_activation(c_t)
# h_t = mask * h_t_cndt + (1-mask) * h_tm1
# c_t = mask * c_t_cndt + (1-mask) * c_tm1
return h_t, c_t
def get_output(self, train=False):
X = self.get_input(train)
# mask = self.get_padded_shuffled_mask(train, X, pad=0)
mask = self.get_input_mask(train=train)
ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
max_time = T.max(ind)
X = X.dimshuffle((1, 0, 2))
Y = T.dot(X, self.W) + self.b
# h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
c0 = T.repeat(self.c_m1, X.shape[1], axis=0)
[outputs, _], updates = theano.scan(
self._step,
sequences=Y,
outputs_info=[h0, c0],
non_sequences=[self.R], n_steps=max_time,
truncate_gradient=self.truncate_gradient, strict=True,
allow_gc=theano.config.scan.allow_gc)
res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
if self.return_sequences:
return res
#return outputs[-1]
return res[T.arange(mask.shape[0], dtype='int32'), ind]
def set_init_cell_parameter(self, is_param=True):
if is_param:
if self.c_m1 not in self.params:
self.params.append(self.c_m1)
else:
self.params.remove(self.c_m1)
def set_init_h_parameter(self, is_param=True):
if is_param:
if self.h_m1 not in self.params:
self.params.append(self.h_m1)
else:
self.params.remove(self.h_m1)
# def get_time_range(self, train):
# mask = self.get_input_mask(train=train)
# ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
# self.time_range = ind
# return ind
class MeanPooling(MaskedLayer):
"""
Self-defined pooling layer
Global Mean Pooling Layer
"""
def __init__(self, start=1):
super(MeanPooling, self).__init__()
self.start = start
# def supports_masked_input(self):
# return False
def get_output_mask(self, train=False):
return None
def get_output(self, train=False):
data = self.get_input(train=train)
mask = self.get_input_mask(train=train)
mask = mask.dimshuffle((0, 1, 'x'))
return (data[:, self.start:] * mask).mean(axis=1)
def get_config(self):
return {"name": self.__class__.__name__}
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 128
print("Loading data...")
(X_train, | self.set_weights(weights) | conditional_block |
imdb_lstm_chen.py |
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import MaskedLayer, Recurrent
from keras import activations, initializations
from keras.utils.theano_utils import shared_zeros
from keras.datasets import imdb
class LSTMLayer(Recurrent):
"""
Self-defined LSTM layer
optimized version: Not using mask in _step function and tensorized computation.
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
For a step-by-step description of the algorithm, see:
http://deeplearning.net/tutorial/lstm.html
References:
Long short-term memory (original 97 paper)
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Learning to forget: Continual prediction with LSTM
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015
Supervised sequence labelling with recurrent neural networks
http://www.cs.toronto.edu/~graves/preprint.pdf
"""
def __init__(self, input_dim, output_dim=128, train_init_cell=True, train_init_h=True,
init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
input_activation='tanh', gate_activation='hard_sigmoid', output_activation='tanh',
weights=None, truncate_gradient=-1, return_sequences=False):
super(LSTMLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.input_activation = activations.get(input_activation)
self.gate_activation = activations.get(gate_activation)
self.output_activation = activations.get(output_activation)
self.input = T.tensor3()
self.time_range = None
W_z = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_z = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_z = shared_zeros(self.output_dim)
W_i = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_i = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_i = shared_zeros(self.output_dim)
W_f = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_f = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_f = self.forget_bias_init(self.output_dim)
W_o = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_o = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_o = shared_zeros(self.output_dim)
self.h_m1 = shared_zeros(shape=(1, self.output_dim), name='h0')
self.c_m1 = shared_zeros(shape=(1, self.output_dim), name='c0')
W = np.vstack((W_z[np.newaxis, :, :],
W_i[np.newaxis, :, :],
W_f[np.newaxis, :, :],
W_o[np.newaxis, :, :])) # shape = (4, input_dim, output_dim)
R = np.vstack((R_z[np.newaxis, :, :],
R_i[np.newaxis, :, :],
R_f[np.newaxis, :, :],
R_o[np.newaxis, :, :])) # shape = (4, output_dim, output_dim)
self.W = theano.shared(W, name='Input to hidden weights (zifo)', borrow=True)
self.R = theano.shared(R, name='Recurrent weights (zifo)', borrow=True)
self.b = theano.shared(np.zeros(shape=(4, self.output_dim), dtype=theano.config.floatX),
name='bias', borrow=True)
self.params = [self.W, self.R]
if train_init_cell:
self.params.append(self.c_m1)
if train_init_h:
self.params.append(self.h_m1)
if weights is not None:
self.set_weights(weights)
def _step(self,
Y_t, # sequence
h_tm1, c_tm1, # output_info
R): # non_sequence
# h_mask_tm1 = mask_tm1 * h_tm1
# c_mask_tm1 = mask_tm1 * c_tm1
G_tm1 = T.dot(h_tm1, R)
M_t = Y_t + G_tm1
z_t = self.input_activation(M_t[:, 0, :])
ifo_t = self.gate_activation(M_t[:, 1:, :])
i_t = ifo_t[:, 0, :]
f_t = ifo_t[:, 1, :]
o_t = ifo_t[:, 2, :]
# c_t_cndt = f_t * c_tm1 + i_t * z_t
# h_t_cndt = o_t * self.output_activation(c_t_cndt)
c_t = f_t * c_tm1 + i_t * z_t
h_t = o_t * self.output_activation(c_t)
# h_t = mask * h_t_cndt + (1-mask) * h_tm1
# c_t = mask * c_t_cndt + (1-mask) * c_tm1
return h_t, c_t
def get_output(self, train=False):
X = self.get_input(train)
# mask = self.get_padded_shuffled_mask(train, X, pad=0)
mask = self.get_input_mask(train=train)
ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
max_time = T.max(ind)
X = X.dimshuffle((1, 0, 2))
Y = T.dot(X, self.W) + self.b
# h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
c0 = T.repeat(self.c_m1, X.shape[1], axis=0)
[outputs, _], updates = theano.scan(
self._step,
sequences=Y,
outputs_info=[h0, c0],
non_sequences=[self.R], n_steps=max_time,
truncate_gradient=self.truncate_gradient, strict=True,
allow_gc=theano.config.scan.allow_gc)
res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
if self.return_sequences:
return res
#return outputs[-1]
return res[T.arange(mask.shape[0], dtype='int32'), ind]
def set_init_cell_parameter(self, is_param=True):
if is_param:
if self.c_m1 not in self.params:
self.params.append(self.c_m1)
else:
self.params.remove(self.c_m1)
def set_init_h_parameter(self, is_param=True):
if is_param:
if self.h_m1 not in self.params:
self.params.append(self.h_m1)
else:
self.params.remove(self.h_m1)
# def get_time_range(self, train):
# mask = self.get_input_mask(train=train)
# ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
# self.time_range = ind
# return ind
class MeanPooling(MaskedLayer):
"""
Self-defined pooling layer
Global Mean Pooling Layer
"""
def __init__(self, start=1):
super(MeanPooling, self).__init__()
self.start = start
# def supports_masked_input(self):
# return False
def get_output_mask(self, train=False):
return None
def get_output(self, train=False):
data = self.get_input(train=train)
mask = self.get_input_mask(train=train)
mask = mask.dimshuffle((0, 1, 'x'))
return (data[:, self.start:] * mask).mean(axis=1)
def get_config(self):
return {"name": self.__class__.__name__}
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 128
print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2)
print(len(X_train), 'train sequences') | random_line_split |
||
imdb_lstm_chen.py | import SGD, RMSprop, Adagrad
# from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import MaskedLayer, Recurrent
from keras import activations, initializations
from keras.utils.theano_utils import shared_zeros
from keras.datasets import imdb
class LSTMLayer(Recurrent):
"""
Self-defined LSTM layer
optimized version: Not using mask in _step function and tensorized computation.
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
For a step-by-step description of the algorithm, see:
http://deeplearning.net/tutorial/lstm.html
References:
Long short-term memory (original 97 paper)
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Learning to forget: Continual prediction with LSTM
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015
Supervised sequence labelling with recurrent neural networks
http://www.cs.toronto.edu/~graves/preprint.pdf
"""
def __init__(self, input_dim, output_dim=128, train_init_cell=True, train_init_h=True,
init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
input_activation='tanh', gate_activation='hard_sigmoid', output_activation='tanh',
weights=None, truncate_gradient=-1, return_sequences=False):
super(LSTMLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.input_activation = activations.get(input_activation)
self.gate_activation = activations.get(gate_activation)
self.output_activation = activations.get(output_activation)
self.input = T.tensor3()
self.time_range = None
W_z = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_z = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_z = shared_zeros(self.output_dim)
W_i = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_i = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_i = shared_zeros(self.output_dim)
W_f = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_f = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_f = self.forget_bias_init(self.output_dim)
W_o = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
R_o = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
# self.b_o = shared_zeros(self.output_dim)
self.h_m1 = shared_zeros(shape=(1, self.output_dim), name='h0')
self.c_m1 = shared_zeros(shape=(1, self.output_dim), name='c0')
W = np.vstack((W_z[np.newaxis, :, :],
W_i[np.newaxis, :, :],
W_f[np.newaxis, :, :],
W_o[np.newaxis, :, :])) # shape = (4, input_dim, output_dim)
R = np.vstack((R_z[np.newaxis, :, :],
R_i[np.newaxis, :, :],
R_f[np.newaxis, :, :],
R_o[np.newaxis, :, :])) # shape = (4, output_dim, output_dim)
self.W = theano.shared(W, name='Input to hidden weights (zifo)', borrow=True)
self.R = theano.shared(R, name='Recurrent weights (zifo)', borrow=True)
self.b = theano.shared(np.zeros(shape=(4, self.output_dim), dtype=theano.config.floatX),
name='bias', borrow=True)
self.params = [self.W, self.R]
if train_init_cell:
self.params.append(self.c_m1)
if train_init_h:
self.params.append(self.h_m1)
if weights is not None:
self.set_weights(weights)
def _step(self,
Y_t, # sequence
h_tm1, c_tm1, # output_info
R): # non_sequence
# h_mask_tm1 = mask_tm1 * h_tm1
# c_mask_tm1 = mask_tm1 * c_tm1
G_tm1 = T.dot(h_tm1, R)
M_t = Y_t + G_tm1
z_t = self.input_activation(M_t[:, 0, :])
ifo_t = self.gate_activation(M_t[:, 1:, :])
i_t = ifo_t[:, 0, :]
f_t = ifo_t[:, 1, :]
o_t = ifo_t[:, 2, :]
# c_t_cndt = f_t * c_tm1 + i_t * z_t
# h_t_cndt = o_t * self.output_activation(c_t_cndt)
c_t = f_t * c_tm1 + i_t * z_t
h_t = o_t * self.output_activation(c_t)
# h_t = mask * h_t_cndt + (1-mask) * h_tm1
# c_t = mask * c_t_cndt + (1-mask) * c_tm1
return h_t, c_t
def | (self, train=False):
X = self.get_input(train)
# mask = self.get_padded_shuffled_mask(train, X, pad=0)
mask = self.get_input_mask(train=train)
ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
max_time = T.max(ind)
X = X.dimshuffle((1, 0, 2))
Y = T.dot(X, self.W) + self.b
# h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
c0 = T.repeat(self.c_m1, X.shape[1], axis=0)
[outputs, _], updates = theano.scan(
self._step,
sequences=Y,
outputs_info=[h0, c0],
non_sequences=[self.R], n_steps=max_time,
truncate_gradient=self.truncate_gradient, strict=True,
allow_gc=theano.config.scan.allow_gc)
res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
if self.return_sequences:
return res
#return outputs[-1]
return res[T.arange(mask.shape[0], dtype='int32'), ind]
def set_init_cell_parameter(self, is_param=True):
if is_param:
if self.c_m1 not in self.params:
self.params.append(self.c_m1)
else:
self.params.remove(self.c_m1)
def set_init_h_parameter(self, is_param=True):
if is_param:
if self.h_m1 not in self.params:
self.params.append(self.h_m1)
else:
self.params.remove(self.h_m1)
# def get_time_range(self, train):
# mask = self.get_input_mask(train=train)
# ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32')
# self.time_range = ind
# return ind
class MeanPooling(MaskedLayer):
"""
Self-defined pooling layer
Global Mean Pooling Layer
"""
def __init__(self, start=1):
super(MeanPooling, self).__init__()
self.start = start
# def supports_masked_input(self):
# return False
def get_output_mask(self, train=False):
return None
def get_output(self, train=False):
data = self.get_input(train=train)
mask = self.get_input_mask(train=train)
mask = mask.dimshuffle((0, 1, 'x'))
return (data[:, self.start:] * mask).mean(axis=1)
def get_config(self):
return {"name": self.__class__.__name__}
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 128
print("Loading data...")
(X_train, | get_output | identifier_name |
demuxer.rs | found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor + ?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor + ?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else |
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = | {
None
} | conditional_block |
demuxer.rs | found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor + ?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor + ?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct | {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[ | DummyDes | identifier_name |
demuxer.rs | is found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event { | }
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor + ?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor + ?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[ | self.info.streams.push(st.clone()); | random_line_split |
demuxer.rs | by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor + ?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor + ?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() | {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[DUMMY_DES];
demuxers.probe(b"dummy").unwrap();
} | identifier_body |
|
parser.go | .next() // consume closing '"'
txt := &ast.ParsedText{
Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: values,
Closer: p.pos,
}
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseURLStringLiteral() ast.Expr {
sb := strings.Builder{}
sb.Grow(32)
pos := p.pos
switch tok := p.tok; tok {
case token.DoubleQuote:
p.next()
if p.tok == token.StringMacro {
url := p.parseMacroURL(p.lit)
p.expect(token.StringRBrace)
p.expect(token.DoubleQuote)
return &ast.ParsedText{
Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: []ast.Expr{url},
Closer: p.pos,
}
}
for p.tok != token.DoubleQuote {
if p.tok == token.EOF {
p.errorExpected(p.pos, "double quote")
return &ast.BadExpr{From: pos, To: p.pos}
}
sb.WriteString(p.lit)
p.next()
}
txt := &ast.Text{
ValuePos: pos,
Value: sb.String(),
}
p.expect(token.DoubleQuote)
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseExpr() (x ast.Expr) {
if p.trace {
defer un(trace(p, "Expr"))
}
pos := p.pos
switch {
case p.tok.IsLiteral():
x = p.parseBasicLit()
if p.tok == token.Concat {
p.next()
opPos := p.pos
y := p.parseExpr()
x = &ast.ConcatExpr{
X: x,
OpPos: opPos,
Y: y,
}
}
case p.tok.IsStringLiteral():
x = p.parseStringLiteral()
default:
p.errorExpected(p.pos, "literal: number or string")
x = &ast.BadExpr{
From: pos,
To: p.pos,
}
p.next() // make progress
}
return
}
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
switch p.tok {
// Bibtex cite keys may be all numbers, but tag keys may not. Allow either
// here and check one level up.
case token.Ident, token.Number:
name = p.lit
p.next()
default:
p.expect(token.Ident) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseTagStmt() *ast.TagStmt {
if p.trace {
defer un(trace(p, "TagStmt"))
}
doc := p.leadComment
key := p.parseIdent()
p.expect(token.Assign)
val := p.parseExpr()
p.expectOptionalTagComma()
return &ast.TagStmt{
Doc: doc,
NamePos: key.Pos(),
Name: strings.ToLower(key.Name),
RawName: key.Name,
Value: val,
}
}
func (p *parser) expectCloser(open token.Token) gotok.Pos {
end := p.pos
switch open {
case token.LBrace:
end = p.expect(token.RBrace)
case token.LParen:
end = p.expect(token.RParen)
default:
p.error(p.pos, "no closing delimiter for "+open.String())
}
return end
}
func (p *parser) parsePreambleDecl() *ast.PreambleDecl {
if p.trace {
defer un(trace(p, "PreambleDecl"))
}
doc := p.leadComment
pos := p.expect(token.Preamble)
opener, _ := p.expectOne(token.LBrace, token.LParen)
text := p.parseExpr()
closer := p.expectCloser(opener)
return &ast.PreambleDecl{
Doc: doc,
Entry: pos,
Text: text,
RBrace: closer,
}
}
func (p *parser) parseAbbrevDecl() *ast.AbbrevDecl {
if p.trace {
defer un(trace(p, "AbbrevDecl"))
}
doc := p.leadComment
pos := p.expect(token.Abbrev)
opener, _ := p.expectOne(token.LBrace, token.LParen)
tag := p.parseTagStmt()
closer := p.expectCloser(opener)
return &ast.AbbrevDecl{
Doc: doc,
Entry: pos,
Tag: tag,
RBrace: closer,
}
}
// fixUpFields alters val based on tag type. For example, a url tag doesn't
// follow the normal Bibtex parsing rules because it's usually wrapped in a
// \url{} macro.
func fixUpFields(tag string, val ast.Expr) ast.Expr {
if tag == "url" {
txt, ok := val.(*ast.ParsedText)
if !ok || len(txt.Values) == 0 {
return val
}
child1, ok := txt.Values[0].(*ast.Text)
if !ok || !strings.HasPrefix(child1.Value, "http") {
return val
}
pos := child1.ValuePos
sb := strings.Builder{}
sb.Grow(32)
for _, child := range txt.Values {
if cTxt, ok := child.(*ast.Text); !ok {
return val
} else {
sb.WriteString(cTxt.Value)
}
}
return &ast.ParsedText{
Opener: txt.Opener,
Depth: txt.Depth,
Delim: txt.Delim,
Values: []ast.Expr{&ast.Text{ValuePos: pos, Value: sb.String()}},
Closer: txt.Closer,
}
}
return val
}
func (p *parser) parseBibDecl() *ast.BibDecl {
if p.trace {
defer un(trace(p, "BibDecl"))
}
doc := p.leadComment
entryType := p.lit[1:] // drop '@', e.g. "@book" -> "book"
pos := p.expect(token.BibEntry)
var bibKey *ast.Ident // use first key found as bibKey
var extraKeys []*ast.Ident
tags := make([]*ast.TagStmt, 0, 8)
opener, _ := p.expectOne(token.LBrace, token.LParen)
// A bibtex entry cite key may be all numbers but a tag key cannot.
for p.tok == token.Ident || p.tok == token.Number {
doc := p.leadComment
key := p.parseIdent() // parses both ident and number
switch p.tok {
case token.Assign:
// It's a tag.
if !isValidTagName(key) {
p.error(key.Pos(), "tag keys must not start with a number")
}
p.next()
var val ast.Expr
if key.Name == "url" && p.tok.IsStringLiteral() {
val = p.parseURLStringLiteral()
} else {
val = p.parseExpr()
}
fixVal := fixUpFields(key.Name, val)
tag := &ast.TagStmt{
Doc: doc,
NamePos: key.Pos(),
Name: strings.ToLower(key.Name),
RawName: key.Name,
Value: fixVal,
}
tags = append(tags, tag)
}
switch p.tok {
case token.Comma:
// It's a cite key.
p.next()
if bibKey == nil {
bibKey = key
} else {
extraKeys = append(extraKeys, key)
}
continue
}
}
closer := p.expectCloser(opener)
p.expectOptional(token.Comma) // trailing commas allowed
return &ast.BibDecl{
Type: entryType,
Doc: doc,
Entry: pos,
Key: bibKey,
ExtraKeys: extraKeys,
Tags: tags,
RBrace: closer,
}
}
func (p *parser) parseDecl() ast.Decl | {
if p.trace {
defer un(trace(p, "Declaration"))
}
switch p.tok {
case token.Preamble:
return p.parsePreambleDecl()
case token.Abbrev:
return p.parseAbbrevDecl()
case token.BibEntry:
return p.parseBibDecl()
default:
pos := p.pos
p.errorExpected(pos, "entry")
p.advance(entryStart)
return &ast.BadDecl{
From: pos,
To: p.pos,
} | identifier_body |
|
parser.go | }
// add comment group to the comments list
comments = &ast.TexCommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
prev := p.pos
p.next0()
if p.tok == token.TexComment {
var comment *ast.TexCommentGroup
var endLine int
if p.file.Line(p.pos) == p.file.Line(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endLine = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endLine || p.tok == token.EOF {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endLine = -1
for p.tok == token.TexComment {
comment, endLine = p.consumeCommentGroup(1)
}
if endLine+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos gotok.Pos, msg string) {
epos := p.file.Position(pos)
// If AllErrors is not set, discard errors reported on the same line
// as the last recorded error and stop parsing if there are more than
// 10 errors.
if p.mode&AllErrors == 0 {
n := len(p.errors)
if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
return // discard - likely a spurious error
}
if n > 10 {
panic(bailout{})
}
}
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos gotok.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
switch {
case p.tok.IsLiteral():
// print 123 rather than 'Number', etc.
msg += ", found " + p.lit
default:
msg += ", found '" + p.tok.String() + "'"
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) gotok.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
func (p *parser) expectOptional(tok token.Token) gotok.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
p.next() // make progress
return pos
}
func (p *parser) expectOne(tok ...token.Token) (token.Token, gotok.Pos) {
pos := p.pos
for _, t := range tok {
if p.tok == t {
p.next()
return t, pos
}
}
sb := strings.Builder{}
sb.WriteString("one of [")
for i, t := range tok {
sb.WriteString("'" + t.String() + "'")
if i < len(tok)-1 {
sb.WriteString(", ")
}
}
sb.WriteString("]")
p.errorExpected(pos, sb.String())
p.next() // make progress
return token.Illegal, pos
}
func (p *parser) expectOptionalTagComma() {
if p.tok == token.RBrace || p.tok == token.RParen {
// TextComma is optional before a closing ')' or '}'
return
}
switch p.tok {
case token.Comma:
p.next()
default:
p.errorExpected(p.pos, "','")
p.advance(stmtStart)
}
}
func assert(cond bool, msg string) {
if !cond {
panic("bibtex/parser internal error: " + msg)
}
}
// advance consumes tokens until the current token p.tok
// is in the 'to' set, or token.EOF. For error recovery.
func (p *parser) advance(to map[token.Token]bool) {
for ; p.tok != token.EOF; p.next() {
if to[p.tok] {
// Return only if parser made some progress since last
// sync or if it has not reached 10 advance calls without
// progress. Otherwise, consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call advance and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse. | var stmtStart = map[token.Token]bool{
token.Abbrev: true,
token.Comment: true,
token.Preamble: true,
token.BibEntry: true,
token.Ident: true,
}
var entryStart = map[token.Token]bool{
token.Abbrev: true,
token.Comment: true,
token.Preamble: true,
token.BibEntry: true,
}
// isValidTagName returns true if the ident is a valid tag name.
// Uses rules according to Biber which means a tag key is a Bibtex name with the
// extra condition that it must begin with a letter:
// https://metacpan.org/pod/release/AMBS/Text-BibTeX-0.66/btparse/doc/bt_language.pod
func isValidTagName(key *ast.Ident) bool {
ch := key.Name[0]
return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z')
}
func (p *parser) parseBasicLit() (l ast.Expr) {
switch p.tok {
case token.BraceString, token.String:
l = &ast.UnparsedText{
ValuePos: p.pos,
Type: p.tok,
Value: p.lit,
}
p.next()
case token.Number:
l = &ast.Number{
ValuePos: p.pos,
Value: p.lit,
}
p.next()
case token.Ident:
l = p.parseIdent()
default:
p.errorExpected(p.pos, "literal: number or string")
}
return
}
// parseMacroURL parses a TeX \url or \href macro. This is separate because
// urls use common LaTeX characters like ~ for non-breaking spaces.
func (p *parser) parseMacroURL(name string) ast.Expr {
urlCmd := &ast.TextMacro{Cmd: p.pos, Name: name}
p.next()
p.expect(token.StringLBrace)
sb := strings.Builder{}
sb.Grow(32)
for p.tok != token.StringRBrace && p.tok != token.StringSpace {
sb.WriteString(p.lit)
p.next()
}
urlCmd.Values = []ast.Expr{
&ast.Text{ValuePos: p.pos, Value: sb.String()},
}
if p.tok == token.StringSpace {
p.next()
}
urlCmd.RBrace = p.pos
pos := p.pos
if p.tok != token.StringRBrace {
p.errorExpected(pos, "'"+token.StringRBrace.String()+"'")
}
return urlCmd
}
func (p *parser) parseText(depth int) (txt ast.Expr) {
switch p.tok {
case token.StringMath:
txt = &ast.TextMath{ValuePos: p.pos, Value: p.lit}
case token.StringHyphen:
txt = &ast.TextHyphen{ValuePos: p.pos}
case token.StringNBSP:
txt = &ast.TextNBSP{ValuePos: p.pos}
case token.StringContents:
txt = &ast.Text{ValuePos: p.pos, Value: p.lit}
case token.StringSpace:
txt = &ast.TextSpace{ValuePos: p.pos, Value: p.lit}
case token.StringComma:
| }
}
}
| random_line_split |
parser.go | token.Illegal:
txt = &ast.BadExpr{From: p.pos, To: p.pos}
case token.StringLBrace: // recursive case
opener := p.pos
p.next()
values := make([]ast.Expr, 0, 2)
for p.tok != token.StringRBrace {
text := p.parseText(depth + 1)
if _, ok := text.(*ast.BadExpr); ok {
p.next()
return text
}
values = append(values, text)
}
p.next() // consume closing '}'
return &ast.ParsedText{
Depth: depth,
Opener: opener,
Delim: ast.BraceDelimiter,
Values: values,
Closer: p.pos,
}
default:
p.error(p.pos, "unknown text type: "+p.tok.String())
}
p.next()
return
}
func (p *parser) parseStringLiteral() ast.Expr {
pos := p.pos
switch tok := p.tok; tok {
case token.DoubleQuote:
p.next()
values := make([]ast.Expr, 0, 2)
for p.tok != token.DoubleQuote {
if p.tok == token.EOF {
p.errorExpected(p.pos, "double quote")
return &ast.BadExpr{From: pos, To: p.pos}
}
values = append(values, p.parseText(1))
}
p.next() // consume closing '"'
txt := &ast.ParsedText{
Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: values,
Closer: p.pos,
}
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseURLStringLiteral() ast.Expr {
sb := strings.Builder{}
sb.Grow(32)
pos := p.pos
switch tok := p.tok; tok {
case token.DoubleQuote:
p.next()
if p.tok == token.StringMacro {
url := p.parseMacroURL(p.lit)
p.expect(token.StringRBrace)
p.expect(token.DoubleQuote)
return &ast.ParsedText{
Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: []ast.Expr{url},
Closer: p.pos,
}
}
for p.tok != token.DoubleQuote {
if p.tok == token.EOF {
p.errorExpected(p.pos, "double quote")
return &ast.BadExpr{From: pos, To: p.pos}
}
sb.WriteString(p.lit)
p.next()
}
txt := &ast.Text{
ValuePos: pos,
Value: sb.String(),
}
p.expect(token.DoubleQuote)
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseExpr() (x ast.Expr) {
if p.trace {
defer un(trace(p, "Expr"))
}
pos := p.pos
switch {
case p.tok.IsLiteral():
x = p.parseBasicLit()
if p.tok == token.Concat {
p.next()
opPos := p.pos
y := p.parseExpr()
x = &ast.ConcatExpr{
X: x,
OpPos: opPos,
Y: y,
}
}
case p.tok.IsStringLiteral():
x = p.parseStringLiteral()
default:
p.errorExpected(p.pos, "literal: number or string")
x = &ast.BadExpr{
From: pos,
To: p.pos,
}
p.next() // make progress
}
return
}
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
switch p.tok {
// Bibtex cite keys may be all numbers, but tag keys may not. Allow either
// here and check one level up.
case token.Ident, token.Number:
name = p.lit
p.next()
default:
p.expect(token.Ident) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseTagStmt() *ast.TagStmt {
if p.trace {
defer un(trace(p, "TagStmt"))
}
doc := p.leadComment
key := p.parseIdent()
p.expect(token.Assign)
val := p.parseExpr()
p.expectOptionalTagComma()
return &ast.TagStmt{
Doc: doc,
NamePos: key.Pos(),
Name: strings.ToLower(key.Name),
RawName: key.Name,
Value: val,
}
}
func (p *parser) expectCloser(open token.Token) gotok.Pos {
end := p.pos
switch open {
case token.LBrace:
end = p.expect(token.RBrace)
case token.LParen:
end = p.expect(token.RParen)
default:
p.error(p.pos, "no closing delimiter for "+open.String())
}
return end
}
func (p *parser) parsePreambleDecl() *ast.PreambleDecl {
if p.trace {
defer un(trace(p, "PreambleDecl"))
}
doc := p.leadComment
pos := p.expect(token.Preamble)
opener, _ := p.expectOne(token.LBrace, token.LParen)
text := p.parseExpr()
closer := p.expectCloser(opener)
return &ast.PreambleDecl{
Doc: doc,
Entry: pos,
Text: text,
RBrace: closer,
}
}
func (p *parser) parseAbbrevDecl() *ast.AbbrevDecl {
if p.trace {
defer un(trace(p, "AbbrevDecl"))
}
doc := p.leadComment
pos := p.expect(token.Abbrev)
opener, _ := p.expectOne(token.LBrace, token.LParen)
tag := p.parseTagStmt()
closer := p.expectCloser(opener)
return &ast.AbbrevDecl{
Doc: doc,
Entry: pos,
Tag: tag,
RBrace: closer,
}
}
// fixUpFields alters val based on tag type. For example, a url tag doesn't
// follow the normal Bibtex parsing rules because it's usually wrapped in a
// \url{} macro.
func fixUpFields(tag string, val ast.Expr) ast.Expr {
if tag == "url" {
txt, ok := val.(*ast.ParsedText)
if !ok || len(txt.Values) == 0 {
return val
}
child1, ok := txt.Values[0].(*ast.Text)
if !ok || !strings.HasPrefix(child1.Value, "http") {
return val
}
pos := child1.ValuePos
sb := strings.Builder{}
sb.Grow(32)
for _, child := range txt.Values {
if cTxt, ok := child.(*ast.Text); !ok {
return val
} else {
sb.WriteString(cTxt.Value)
}
}
return &ast.ParsedText{
Opener: txt.Opener,
Depth: txt.Depth,
Delim: txt.Delim,
Values: []ast.Expr{&ast.Text{ValuePos: pos, Value: sb.String()}},
Closer: txt.Closer,
}
}
return val
}
func (p *parser) parseBibDecl() *ast.BibDecl {
if p.trace {
defer un(trace(p, "BibDecl"))
}
doc := p.leadComment
entryType := p.lit[1:] // drop '@', e.g. "@book" -> "book"
pos := p.expect(token.BibEntry)
var bibKey *ast.Ident // use first key found as bibKey
var extraKeys []*ast.Ident
tags := make([]*ast.TagStmt, 0, 8)
opener, _ := p.expectOne(token.LBrace, token.LParen)
// A bibtex entry cite key may be all numbers but a tag key cannot.
for p.tok == token.Ident || p.tok == token.Number | {
doc := p.leadComment
key := p.parseIdent() // parses both ident and number
switch p.tok {
case token.Assign:
// It's a tag.
if !isValidTagName(key) {
p.error(key.Pos(), "tag keys must not start with a number")
}
p.next()
var val ast.Expr
if key.Name == "url" && p.tok.IsStringLiteral() {
val = p.parseURLStringLiteral()
} else {
val = p.parseExpr()
}
fixVal := fixUpFields(key.Name, val)
tag := &ast.TagStmt{
Doc: doc, | conditional_block |
|
parser.go | Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: values,
Closer: p.pos,
}
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseURLStringLiteral() ast.Expr {
sb := strings.Builder{}
sb.Grow(32)
pos := p.pos
switch tok := p.tok; tok {
case token.DoubleQuote:
p.next()
if p.tok == token.StringMacro {
url := p.parseMacroURL(p.lit)
p.expect(token.StringRBrace)
p.expect(token.DoubleQuote)
return &ast.ParsedText{
Opener: pos,
Depth: 0,
Delim: ast.QuoteDelimiter,
Values: []ast.Expr{url},
Closer: p.pos,
}
}
for p.tok != token.DoubleQuote {
if p.tok == token.EOF {
p.errorExpected(p.pos, "double quote")
return &ast.BadExpr{From: pos, To: p.pos}
}
sb.WriteString(p.lit)
p.next()
}
txt := &ast.Text{
ValuePos: pos,
Value: sb.String(),
}
p.expect(token.DoubleQuote)
return txt
case token.StringLBrace:
return p.parseText(0)
default:
p.errorExpected(p.pos, "string literal")
p.advance(stmtStart)
return &ast.BadExpr{
From: pos,
To: p.pos,
}
}
}
func (p *parser) parseExpr() (x ast.Expr) {
if p.trace {
defer un(trace(p, "Expr"))
}
pos := p.pos
switch {
case p.tok.IsLiteral():
x = p.parseBasicLit()
if p.tok == token.Concat {
p.next()
opPos := p.pos
y := p.parseExpr()
x = &ast.ConcatExpr{
X: x,
OpPos: opPos,
Y: y,
}
}
case p.tok.IsStringLiteral():
x = p.parseStringLiteral()
default:
p.errorExpected(p.pos, "literal: number or string")
x = &ast.BadExpr{
From: pos,
To: p.pos,
}
p.next() // make progress
}
return
}
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
switch p.tok {
// Bibtex cite keys may be all numbers, but tag keys may not. Allow either
// here and check one level up.
case token.Ident, token.Number:
name = p.lit
p.next()
default:
p.expect(token.Ident) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseTagStmt() *ast.TagStmt {
if p.trace {
defer un(trace(p, "TagStmt"))
}
doc := p.leadComment
key := p.parseIdent()
p.expect(token.Assign)
val := p.parseExpr()
p.expectOptionalTagComma()
return &ast.TagStmt{
Doc: doc,
NamePos: key.Pos(),
Name: strings.ToLower(key.Name),
RawName: key.Name,
Value: val,
}
}
func (p *parser) expectCloser(open token.Token) gotok.Pos {
end := p.pos
switch open {
case token.LBrace:
end = p.expect(token.RBrace)
case token.LParen:
end = p.expect(token.RParen)
default:
p.error(p.pos, "no closing delimiter for "+open.String())
}
return end
}
func (p *parser) parsePreambleDecl() *ast.PreambleDecl {
if p.trace {
defer un(trace(p, "PreambleDecl"))
}
doc := p.leadComment
pos := p.expect(token.Preamble)
opener, _ := p.expectOne(token.LBrace, token.LParen)
text := p.parseExpr()
closer := p.expectCloser(opener)
return &ast.PreambleDecl{
Doc: doc,
Entry: pos,
Text: text,
RBrace: closer,
}
}
func (p *parser) parseAbbrevDecl() *ast.AbbrevDecl {
if p.trace {
defer un(trace(p, "AbbrevDecl"))
}
doc := p.leadComment
pos := p.expect(token.Abbrev)
opener, _ := p.expectOne(token.LBrace, token.LParen)
tag := p.parseTagStmt()
closer := p.expectCloser(opener)
return &ast.AbbrevDecl{
Doc: doc,
Entry: pos,
Tag: tag,
RBrace: closer,
}
}
// fixUpFields alters val based on tag type. For example, a url tag doesn't
// follow the normal Bibtex parsing rules because it's usually wrapped in a
// \url{} macro.
func fixUpFields(tag string, val ast.Expr) ast.Expr {
if tag == "url" {
txt, ok := val.(*ast.ParsedText)
if !ok || len(txt.Values) == 0 {
return val
}
child1, ok := txt.Values[0].(*ast.Text)
if !ok || !strings.HasPrefix(child1.Value, "http") {
return val
}
pos := child1.ValuePos
sb := strings.Builder{}
sb.Grow(32)
for _, child := range txt.Values {
if cTxt, ok := child.(*ast.Text); !ok {
return val
} else {
sb.WriteString(cTxt.Value)
}
}
return &ast.ParsedText{
Opener: txt.Opener,
Depth: txt.Depth,
Delim: txt.Delim,
Values: []ast.Expr{&ast.Text{ValuePos: pos, Value: sb.String()}},
Closer: txt.Closer,
}
}
return val
}
func (p *parser) parseBibDecl() *ast.BibDecl {
if p.trace {
defer un(trace(p, "BibDecl"))
}
doc := p.leadComment
entryType := p.lit[1:] // drop '@', e.g. "@book" -> "book"
pos := p.expect(token.BibEntry)
var bibKey *ast.Ident // use first key found as bibKey
var extraKeys []*ast.Ident
tags := make([]*ast.TagStmt, 0, 8)
opener, _ := p.expectOne(token.LBrace, token.LParen)
// A bibtex entry cite key may be all numbers but a tag key cannot.
for p.tok == token.Ident || p.tok == token.Number {
doc := p.leadComment
key := p.parseIdent() // parses both ident and number
switch p.tok {
case token.Assign:
// It's a tag.
if !isValidTagName(key) {
p.error(key.Pos(), "tag keys must not start with a number")
}
p.next()
var val ast.Expr
if key.Name == "url" && p.tok.IsStringLiteral() {
val = p.parseURLStringLiteral()
} else {
val = p.parseExpr()
}
fixVal := fixUpFields(key.Name, val)
tag := &ast.TagStmt{
Doc: doc,
NamePos: key.Pos(),
Name: strings.ToLower(key.Name),
RawName: key.Name,
Value: fixVal,
}
tags = append(tags, tag)
}
switch p.tok {
case token.Comma:
// It's a cite key.
p.next()
if bibKey == nil {
bibKey = key
} else {
extraKeys = append(extraKeys, key)
}
continue
}
}
closer := p.expectCloser(opener)
p.expectOptional(token.Comma) // trailing commas allowed
return &ast.BibDecl{
Type: entryType,
Doc: doc,
Entry: pos,
Key: bibKey,
ExtraKeys: extraKeys,
Tags: tags,
RBrace: closer,
}
}
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
switch p.tok {
case token.Preamble:
return p.parsePreambleDecl()
case token.Abbrev:
return p.parseAbbrevDecl()
case token.BibEntry:
return p.parseBibDecl()
default:
pos := p.pos
p.errorExpected(pos, "entry")
p.advance(entryStart)
return &ast.BadDecl{
From: pos,
To: p.pos,
}
}
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) | parseFile | identifier_name |
|
DisWavenet.py | 25,47.95.32.212:2226,47.95.32.212:2227,"
"47.95.32.225:2222,47.95.32.225:2223,47.95.32.225:2224,47.95.32.225:2225,47.95.32.225:2226,"
"47.95.33.5:2222,47.95.33.5:2223,47.95.33.5:2224,47.95.33.5:2225,47.95.33.5:2226,"
"47.94.41.45:2222,47.94.41.45:2223,47.94.41.45:2224,47.94.41.45:2225,47.94.41.45:2226,"
"47.95.33.8:2222,47.95.33.8:2223,47.95.33.8:2224,47.95.33.8:2225,47.95.33.8:2226,"
"47.95.33.15:2222,47.95.33.15:2223,47.95.33.15:2224,47.95.33.15:2225,47.95.33.15:2226,"
"47.94.40.26:2222,47.94.40.26:2223,47.94.40.26:2224,47.94.40.26:2225,47.94.40.26:2226,"
"47.95.32.137:2222,47.95.32.137:2223,47.95.32.137:2224,47.95.32.137:2225,47.95.32.137:2226,"
"47.93.136.26:2222,47.93.136.26:2223,47.93.136.26:2224,47.93.136.26:2225,47.93.136.26:2226,"
"47.95.32.230:2222,47.95.32.230:2223,47.95.32.230:2224,47.95.32.230:2225,47.95.32.230:2226,"
"47.95.32.182:2222,47.95.32.182:2223,47.95.32.182:2224,47.95.32.182:2225,47.95.32.182:2226",
"Comma_separated list of hostname :port pairs")
#本作业是工作节点还是参数服务器
flags.DEFINE_string("job_name",None,"job name:worker or ps")
FLAGS=flags.FLAGS
#IMAGE_PIXELS=28
#读取集群描述信息
ps_spec=FLAGS.ps_hosts.split(",")
worker_spec=FLAGS.worker_hosts.split(",")
"""临时添加"""
num_workers=len(worker_spec)
#创建TensorFlow集群描述对象
cluster=tf.train.ClusterSpec({
"ps":ps_spec,
"worker":worker_spec
})
#为本地执行的任务创建TensorFlow serverdui象
if not FLAGS.existing_servers:
"""
创建本地Server 对象,从tf.train.Server 这个定义开始,每个节点开始不同
根据执行的命令的参数(作业名字)不同,决定啦这个任务是那个任务
如果作业名字是ps,进程就加入到这里,作为参数更新的服务,等待其他工作节点给他提交参数更新的数据
如果作业名字是worker,就执行后面的计算任务
"""
server=tf.train.Server(cluster,job_name=FLAGS.job_name,task_index=FLAGS.task_index)
#如果是参数服务器,直接启动即可。这时,进程就会阻塞在这里
#下面的tf.train.replica_device_setter代码会将参数指定给ps_server保管
if FLAGS.job_name =="ps":
server.join()
#找出worker的主节点,即task_index为0的点
is_chief=(FLAGS.task_index==0)
#如果使用GPU
if FLAGS.num_gpus >0:
# if FLAGS.num_gpus < num_workers:
# raise ValueError("number of gpus is less than number of workers")
gpu=(FLAGS.task_index %FLAGS.num_gpus)
#分配worker到指定到gpu上运行
worker_device ="/job:worker/task:%d/gpu:%d"%(FLAGS.task_index,gpu)
elif FLAGS.num_gpus ==0:
#把CPU分配给worker
#cpu = 0
worker_device ="/job:worker/task:%d/cpu:%d"%(FLAGS.task_index,FLAGS.cpu_index)
#在这个with 语句之下定义的参数,会自动分配到参数服务器上去定义,如果有多个参数服务器就轮流分配
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster
)
):
# 数据路径
data_path = "/root/mickey21/wavenet/"
compute_sample_num,sample_rate=FLAGS.compute_sample_num,FLAGS.sample_rate
batch_size,n_mfcc=FLAGS.batch_size,FLAGS.n_mfcc
n_epoch=FLAGS.n_epoch
# join()连接两个路径或这更多的路径
load_path = os.path.join(data_path, "aishell_num_%d_samplerate_%d" % (compute_sample_num, sample_rate))
# 定义全局步长,默认值为0
global_step = tf.Variable(0, name="global_step", trainable=False)
print("start loading data ")
new_train = np.load(os.path.join(load_path, "wav_feature.npy"))
labels_vec = np.load(os.path.join(load_path, "txt_decoder.npy"))
train_seq_len = np.load(os.path.join(load_path, "wav_seqlen.npy"))
# pickle对象持久化的一种方式
char_index = pickle.load(open(os.path.join(load_path, 'char_index.pkl'), 'rb'))
index_char = pickle.load(open(os.path.join(load_path, 'index_char.pkl'), 'rb'))
print("loading data over")
num_examples = new_train.shape[0]
# 每一轮批次数
num_batches_per_epoch = int(num_examples / batch_size)
train_inputs = new_train
train_targets = []
for index in range(labels_vec.shape[0] // batch_size):
train_targets.append(sparse_tuple_from(labels_vec[index * batch_size: (index + 1) * batch_size, :]))
# 设置验证集合
val_inputs, val_targets, val_seq_len = train_inputs, train_targets, train_seq_len
# 定义网络模型
model = Model(vocab_size=len(index_char.items()), max_seq_len=np.max(train_seq_len),
batch_size=batch_size, n_mfcc=n_mfcc)
#定义损失函数和值
loss = tf.nn.ctc_loss(model.targets, model.logit, model.seq_len, time_major=False)
cost = tf.reduce_mean(loss)
#定义优化器
optimizer = tf.train.AdamOptimizer()
var_list = [var for var in tf.trainable_variables()]
gradient = optimizer.compute_gradients(cost, var_list=var_list)
#optimizer_op = optimizer.apply_gradients(gradient)
#转质
decoded = tf.transpose(model.logit, perm=[1, 0, 2])
decoded, log_prob = tf.nn.ctc_greedy_decoder(decoded, model.seq_len)
#
if FLAGS.sync_replicas:
# 同步模式计算更新梯度
rep_op = tf.train.SyncReplicasOptimizer(optimizer,
replicas_to_aggregate=len(
worker_spec),
replica | _id=FLAGS.task_index,
total_num_replicas=len(
| conditional_block |
|
DisWavenet.py | =worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster
)
):
# 数据路径
data_path = "/root/mickey21/wavenet/"
compute_sample_num,sample_rate=FLAGS.compute_sample_num,FLAGS.sample_rate
batch_size,n_mfcc=FLAGS.batch_size,FLAGS.n_mfcc
n_epoch=FLAGS.n_epoch
# join()连接两个路径或这更多的路径
load_path = os.path.join(data_path, "aishell_num_%d_samplerate_%d" % (compute_sample_num, sample_rate))
# 定义全局步长,默认值为0
global_step = tf.Variable(0, name="global_step", trainable=False)
print("start loading data ")
new_train = np.load(os.path.join(load_path, "wav_feature.npy"))
labels_vec = np.load(os.path.join(load_path, "txt_decoder.npy"))
train_seq_len = np.load(os.path.join(load_path, "wav_seqlen.npy"))
# pickle对象持久化的一种方式
char_index = pickle.load(open(os.path.join(load_path, 'char_index.pkl'), 'rb'))
index_char = pickle.load(open(os.path.join(load_path, 'index_char.pkl'), 'rb'))
print("loading data over")
num_examples = new_train.shape[0]
# 每一轮批次数
num_batches_per_epoch = int(num_examples / batch_size)
train_inputs = new_train
train_targets = []
for index in range(labels_vec.shape[0] // batch_size):
train_targets.append(sparse_tuple_from(labels_vec[index * batch_size: (index + 1) * batch_size, :]))
# 设置验证集合
val_inputs, val_targets, val_seq_len = train_inputs, train_targets, train_seq_len
# 定义网络模型
model = Model(vocab_size=len(index_char.items()), max_seq_len=np.max(train_seq_len),
batch_size=batch_size, n_mfcc=n_mfcc)
#定义损失函数和值
loss = tf.nn.ctc_loss(model.targets, model.logit, model.seq_len, time_major=False)
cost = tf.reduce_mean(loss)
#定义优化器
optimizer = tf.train.AdamOptimizer()
var_list = [var for var in tf.trainable_variables()]
gradient = optimizer.compute_gradients(cost, var_list=var_list)
#optimizer_op = optimizer.apply_gradients(gradient)
#转质
decoded = tf.transpose(model.logit, perm=[1, 0, 2])
decoded, log_prob = tf.nn.ctc_greedy_decoder(decoded, model.seq_len)
#
if FLAGS.sync_replicas:
# 同步模式计算更新梯度
rep_op = tf.train.SyncReplicasOptimizer(optimizer,
replicas_to_aggregate=len(
worker_spec),
replica_id=FLAGS.task_index,
total_num_replicas=len(
worker_spec),
use_locking=True)
train_op = rep_op.apply_gradients(gradient,
global_step=global_step)
init_token_op = rep_op.get_init_tokens_op()
chief_queue_runner = rep_op.get_chief_queue_runner()
else:
# 异步模式计算更新梯度 给变量运用梯度
"""这是mini()方法的第二部分,它返回一个运用梯度变化的操作
def apply_gradients(self, grads_and_vars, global_step=None, name=None)
grads_and_vars:由`compute_gradients()`返回的梯度或变量的列表对
global_step:(可选)变量被更新之后加一
name:返回操作的名称
"""
train_op = optimizer.apply_gradients(gradient,
global_step=global_step)
#预测值
predict = tf.sparse_to_dense(decoded[0].indices, decoded[0].dense_shape, decoded[0].values)
ler = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), model.targets))
var_op = tf.global_variables()
var_trainable_op = tf.trainable_variables()
saver=tf.train.Saver()
#判断是否是主节点
if FLAGS.sync_replicas:
local_init_op = optimizer.local_step_init_op
if is_chief:
#所有的进行计算的工作节点里的一个主工作节点(chief)
#这个主节点负责初始化参数,模型的保存,概要的保存等
local_init_op=optimizer.chief_init_op
ready_for_local_init_op=optimizer.ready_for_local_init_op
#同步训练模式所需的初始令牌和主队列
chief_queue_runner=optimizer.get_chief_queue_runner()
sync_init_op=optimizer.get_init_tokens_op()
#创建生成日志的目录
train_dir = tempfile.mkdtemp()
# 初始化操作
init_op=tf.global_variables_initializer()
if FLAGS.sync_replicas:
"""
创建一个监管程序,用于统计训练模型过程中的信息
logdir是保存和加载模型的路径
启动就会去这个logdir目录看是否有检查点文件,有的话自动加载
没有就用init_op指定的初始化参数
主工作节点(chief)负责模型初始化等工作
在这个工程中,其他工作节点等待主节点完成初始化等工作,初始化完成后,一起开始训练数据
global_step的值所有计算节点共享的
在执行损失函数最小值时自动加1,通过gloab_step能知道所有节点一共计算啦多少步
"""
sv=tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
global_step=global_step
)
else:
sv=tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step,
saver=saver
)
# 在创建会话时,设置属性allow_soft_placement为True,所有的操作会默认使用其被指定的设备如:GPU
# 如果该操作函数没有GPU实现时,会自动使用CPU设备
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index]
)
# 主工作节点(chief),即task_inde为0的节点将会初始化会话
# 其余的工作节点会等待会话被初始化后就行计算
if is_chief:
print ("Worker %d: Initializing session ..." % FLAGS.task_index)
else:
print ("Worker %d: Waiting for session to be initialized ..." % FLAGS.task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
print ("Using existing server at : %s" % server_grpc_url)
# 创建TensorFlow 会话对象,用于执行TensorFlow图计算
# prepare_or_waite_for_session需要参数初始化完成且主节点也准备好,才开始训练
sess = sv.prepare_or_wait_for_session(server_grpc_url, config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print ("Worker %d: Session initialization complete." % FLAGS.task_index)
"""开始执行分布式训练"""
#time_begin = time.time()
time_begin = datetime.datetime.now()
print ("Training begins @ %s" % str(time_begin))
#sess.run(init_op)
#saver = tf.train.Saver(var_op)
train_cost = train_ler = 0
cur_epoch=0
while True:
start = datetime.datetime.now()
for batch in range(num_batches_per_epoch):
feed = {model.input_data: train_inputs[batch * batch_size: (batch + 1) * batch_size],
model.targets: train_targets[batch],
model.seq_len: train_seq_len[batch * batch_size: (batch + 1) * batch_size]}
batch_cost, _,step,ler_cost = sess.run([cost, train_op,global_step,ler], feed_dict=feed)
train_cost += batch_cost * batch_size
train_ler += ler_cost * batch_size
sum_val_cost = sum_val_ler = 0.
train_cost /= num_examples
train_ler /= num_examples
cur_epoch+=1
log = "Epoch {}/{}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.23s}"
print(log.format(cur_epoch , n_epoch, train_cost, train_ler, sum_val_cost / num_batches_per_epoch,
sum_val_ler / num_batches_per_epoch, str(datetime.datetime.now() - start)))
# save("log"+str(cur_epoch), "第" + str(cur_epoch) + "结束于耗时" + str(datetime.datetime.now() - start) + """
# """)
# save models
if cur_epoch % 5 == 0:
saver.save(sess, os.path.join(os.getcwd(), 'model', 'speech.module'), global_step=cur_epoch)
if step >= num_batches_per_epoch*n_epoch:
time_end = datetime.datetime.now()
print ("all training over at @"+str(time_end)+" last :"+str(time_end-time_begin))
break | random_line_split |
||
DisWavenet.py | all parameters of a tensorflow graph
'''
if mode == 'all':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_op])
elif mode == 'trainable':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_trainable_op])
else:
raise TypeError('model should be all or trainable.')
print('number of ' + mode + ' parameters: ' + str(num))
return num
"""本代码采用图间模式,异步更新"""
#忽视忽视警告,并屏蔽警告
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#定义一些常量,用于构建数据流图
flags = tf.app.flags
flags.DEFINE_string("data_dir","/tmp/mnist-data","Directory for storing mnist data")
#只下载数据,不做其他操作
flags.DEFINE_boolean("download_only",False,"Only perform downloading of data;Do not to"
"sessioin preparation,model definition or training")
#task_index 从0开始。0代表用来厨师化变量第一个任务
flags.DEFINE_integer("task_index",None,"Worker task index,should >=0.task_index=0 is the master"
" worker task the performs the variable initialization")
#每台机器第GPU个数,这里在集群上跑,"""没GPU"""
flags.DEFINE_integer("num_gpus",0,"Total number of gpus for each machine.If you dont use GPU,please it to '0'")
#在同步训练模式下,设置收集的工作节点的数量。默认是工作的总数
flags.DEFINE_integer("replicas_to_aggregate",
None,"Number of replicas to aggregate before parameter update is applied"
"(For sync_replicas mode only;default:num workers)")
#梅尔频率倒谱系数(语音识别)
flags.DEFINE_integer("n_mfcc",20,"Number of units in the hidden layer of the NN")
# 训练次数
flags.DEFINE_integer("n_epoch",None, "Number of (gloabal) training steps to perform")
# cpu索引
flags.DEFINE_integer("cpu_index",None, "Number of (gloabal) training steps to perform")
#每一批次样本数
flags.DEFINE_integer("batch_size",50,"Training batch size")
#计算样本数
flags.DEFINE_integer("compute_sample_num",10000,"Training batch size")
#采样率
flags.DEFINE_integer("sample_rate",16000,"Training batch size")
#学习率
#flags.DEFINE_float("learning_rate",0.01,"Learning rate")
#使用同步训练/异步训练
flags.DEFINE_boolean("sync_replicas",False,"Use the sync_replicas (sysnchronized replicas)"
" mode,wherein the parameter updates from workers are "
"aggregated before applied to avoid stale gradients")
#如果服务器已经存在,采用gRPC协议通信;如果不存在采用进程间通信
flags.DEFINE_boolean("existing_servers",False,"Whether servers already exists.If True,will use "
"the worker hosts via their GRPC URLS(one client "
"process per worker hosts).Otherwise,will create an in_process Tensorflow server.")
#参数服务器主机
flags.DEFINE_string("ps_hosts","47.95.32.212:2222","Comma_separated list of hostname:port pairs")
#工作节点主机
"""
47.95.32.212 iZ2zedu05kkqson2296xbcZ 作为参数服务器
47.95.32.225 iZ2zedu05kkqson2296xb8Z
47.95.33.5 iZ2zedu05kkqson2296xbaZ #
47.94.41.45 iZ2zedu05kkqson2296xbbZ # 无hbase
47.95.33.8 iZ2zedu05kkqson2296xbdZ # 无hbase @有presto
47.95.33.15 iZ2zedu05kkqson2296xb9Z # 无hbase @有presto
47.94.40.26 iZ2zedu05kkqson2296xbeZ # 无hbase
47.95.32.137 iZ2zedu05kkqson2296xbiZ # @有presto
47.93.136.26 iZ2zedu05kkqson2296xbgZ #
47.95.32.230 iZ2zedu05kkqson2296xbfZ # 无hbase@有presto
47.95.32.182 iZ2zedu05kkqson2296xbhZ # 无hbase@有presto
47.93.55.184 gpu
"""
flags.DEFINE_string("worker_hosts","47.95.32.212:2223,47.95.32.212:2224,47.95.32.212:2225,47.95.32.212:2226,47.95.32.212:2227,"
"47.95.32.225:2222,47.95.32.225:2223,47.95.32.225:2224,47.95.32.225:2225,47.95.32.225:2226,"
"47.95.33.5:2222,47.95.33.5:2223,47.95.33.5:2224,47.95.33.5:2225,47.95.33.5:2226,"
"47.94.41.45:2222,47.94.41.45:2223,47.94.41.45:2224,47.94.41.45:2225,47.94.41.45:2226,"
"47.95.33.8:2222,47.95.33.8:2223,47.95.33.8:2224,47.95.33.8:2225,47.95.33.8:2226,"
"47.95.33.15:2222,47.95.33.15:2223,47.95.33.15:2224,47.95.33.15:2225,47.95.33.15:2226,"
"47.94.40.26:2222,47.94.40.26:2223,47.94.40.26:2224,47.94.40.26:2225,47.94.40.26:2226,"
"47.95.32.137:2222,47.95.32.137:2223,47.95.32.137:2224,47.95.32.137:2225,47.95.32.137:2226,"
"47.93.136.26:2222,47.93.136.26:2223,47.93.136.26:2224,47.93.136.26:2225,47.93.136.26:2226,"
"47.95.32.230:2 | erate(sequences):#强转为枚举类
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)
return [indices, values, shape]
#统计参数
def count_params(model, mode='trainable'):
''' count | identifier_body |
|
DisWavenet.py | lues = []
for n, seq in enumerate(sequences):#强转为枚举类
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)
return [indices, values, shape]
#统计参数
def count_params(model, mode='trainable'):
''' count all parameters of a tensorflow graph
'''
if mode == 'all':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_op])
elif mode == 'trainable':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_trainable_op])
else:
raise TypeError('model should be all or trainable.')
print('number of ' + mode + ' parameters: ' + str(num))
return num
"""本代码采用图间模式,异步更新"""
#忽视忽视警告,并屏蔽警告
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#定义一些常量,用于构建数据流图
flags = tf.app.flags
flags.DEFINE_string("data_dir","/tmp/mnist-data","Directory for storing mnist data")
#只下载数据,不做其他操作
flags.DEFINE_boolean("download_only",False,"Only perform downloading of data;Do not to"
"sessioin preparation,model definition or training")
#task_index 从0开始。0代表用来厨师化变量第一个任务
flags.DEFINE_integer("task_index",None,"Worker task index,should >=0.task_index=0 is the master"
" worker task the performs the variable initialization")
#每台机器第GPU个数,这里在集群上跑,"""没GPU"""
flags.DEFINE_integer("num_gpus",0,"Total number of gpus for each machine.If you dont use GPU,please it to '0'")
#在同步训练模式下,设置收集的工作节点的数量。默认是工作的总数
flags.DEFINE_integer("replicas_to_aggregate",
None,"Number of replicas to aggregate before parameter update is applied"
"(For sync_replicas mode only;default:num workers)")
#梅尔频率倒谱系数(语音识别)
flags.DEFINE_integer("n_mfcc",20,"Number of units in the hidden layer of the NN")
# 训练次数
flags.DEFINE_integer("n_epoch",None, "Number of (gloabal) training steps to perform")
# cpu索引
flags.DEFINE_integer("cpu_index",None, "Number of (gloabal) training steps to perform")
#每一批次样本数
flags.DEFINE_integer("batch_size",50,"Training batch size")
#计算样本数
flags.DEFINE_integer("compute_sample_num",10000,"Training batch size")
#采样率
flags.DEFINE_integer("sample_rate",16000,"Training batch size")
#学习率
#flags.DEFINE_float("learning_rate",0.01,"Learning rate")
#使用同步训练/异步训练
flags.DEFINE_boolean("sync_replicas",False,"Use the sync_replicas (sysnchronized replicas)"
" mode,wherein the parameter updates from workers are "
"aggregated before applied to avoid stale gradients")
#如果服务器已经存在,采用gRPC协议通信;如果不存在采用进程间通信
flags.DEFINE_boolean("existing_servers",False,"Whether servers already exists.If True,will use "
"the worker hosts via their GRPC URLS(one client "
"process per worker hosts).Otherwise,will create an in_process Tensorflow server.")
#参数服务器主机
flags.DEFINE_string("ps_hosts","47.95.32.212:2222","Comma_separated list of hostname:port pairs")
#工作节点主机
"""
47.95.32.212 iZ2zedu05kkqson2296xbcZ 作为参数服务器
47.95.32.225 iZ2zedu05kkqson2296xb8Z
47.95.33.5 iZ2zedu05kkqson2296xbaZ #
47.94.41.45 iZ2zedu05kkqson2296xbbZ # 无hbase
47.95.33.8 iZ2zedu05kkqson2296xbdZ # 无hbase @有presto
47.95.33.15 iZ2zedu05kkqson2296xb9Z # 无hbase @有presto
47.94.40.26 iZ2zedu05kkqson2296xbeZ # 无hbase
47.95.32.137 iZ2zedu05kkqson2296xbiZ # @有presto
47.93.136.26 iZ2zedu05kkqson2296xbgZ #
47.95.32.230 iZ2zedu05kkqson2296xbfZ # 无hbase@有presto
47.95.32.182 iZ2zedu05kkqson2296xbhZ # 无hbase@有presto
47.93.55.184 gpu
"""
flags.DEFINE_string("worker_hosts","47.95.32.212:2223,47.95.32.212:2224,47.95.32.212:2225,47.95.32.212:2226,47.95.32.212:2227,"
"47.95.32.225:2222,47.95.32.225:2223,47.95.32.225:2224,47.95.32.225:2225,47.95.32.225:2226,"
"47.95.33.5:2222,47.95.33.5:2223,47.95.33.5:2224,47.95.33.5:2225,47.95.33.5:2226,"
"47.94.41.45:2222,47.94.41.45:2223,47.94.41.45:2224,47.94.41.45:2225,47.94.41.45:2226,"
"47.95.33.8:2222,47.95.33.8:2223,47.95.33.8:2224,47.95.33.8:2225,47.95.33.8:2226,"
"47.95.33.15:2222,47.95.33.15:2223,47.95.33.15:2224,47.95.33.15:2225,47.95.33.15:2226,"
"47.94.40.26:2222,47.94.40.26:2223,47.94.40.26:2224,47.94.40.26:2225,47.94.40.26:2226,"
"47.95.32.137:2222,47.95.32.137:2223,47.95.32.137:2224,47.95.32.137:2225,47.95.32.137:2226,"
"47.93.136.26:2222,47.93.136.26:2223,47.93.136.26:2224,47.93.136.26:2225,47.93.136.26:2226,"
| dices = []
va | identifier_name |
|
db_transaction.rs | , BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn delete(&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::BestBlock(_)) => f.write_str("Chain tip block hash"),
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::UnspentOutput(_) => f.write_str("Unspent output"),
DbValue::SpentOutput(_) => f.write_str("Spent output"),
DbValue::TransactionKernel(_) => f.write_str("Transaction kernel"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
} | }
| random_line_split |
|
db_transaction.rs | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
blocks::{blockheader::BlockHash, Block, BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn | (&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::Best | delete | identifier_name |
db_transaction.rs | , BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn delete(&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> | {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::BestBlock(_)) => f.write_str("Chain tip block hash"),
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::UnspentOutput(_) => f.write_str("Unspent output"),
DbValue::SpentOutput(_) => f.write_str("Spent output"),
DbValue::TransactionKernel(_) => f.write_str("Transaction kernel"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
} | identifier_body |
|
proc_cr10x_wq_v1.py | in sw:
m = re.search(REAL_RE_STR, s)
if m:
csi.append(float(m.groups()[0]))
if len(csi)==14:
# get sample datetime from data
yyyy = csi[1]
yday = csi[2]
(MM, HH) = math.modf(csi[3]/100.)
MM = math.ceil(MM*100.)
if (HH == 24):
yday=yday+1
HH = 0.
sample_str = '%04d-%03d %02d:%02d' % (yyyy, yday, HH, MM)
# if sensor_info['utc_offset']:
# sample_dt = scanf_datetime(sample_str, fmt='%m-%d-%Y %H:%M:%S') + \
# timedelta(hours=sensor_info['utc_offset'])
# else:
sample_dt = scanf_datetime(sample_str, fmt='%Y-%j %H:%M')
data['dt'][i] = sample_dt # sample datetime
data['time'][i] = dt2es(sample_dt) # sample time in epoch seconds
#
data['wtemp'][i] = csi[4] # water temperature (C)
data['cond'][i] = csi[5] # specific conductivity (mS/cm)
data['do_sat'][i] = csi[6] # saturated dissolved oxygen (% air sat)
data['do_mg'][i] = csi[7] # dissolved oxygen (mg/l)
data['ph'][i] = csi[8] # ph
data['turb'][i] = csi[9] # turbidity (NTU)
# no adcp's prior to March 2005
# data['sontek_wl'][i] = csi[5] # sontek water level (ft)
# data['sontek_flow'][i] = csi[6] # sontek flow (cfs)
# data['press_wl'][i] = csi[10] # pressure water level (ft ?? or inches)
# data['rain'][i] = csi[11] # 15 sec rain count ??
# data['press_flow'][i] = csi[12] # flow flow (cfs)
data['battvolts'][i] = csi[13] # battery (volts)
i=i+1
# if-elif
# for line
# check that no data[dt] is set to Nan or anything but datetime
# keep only data that has a resolved datetime
keep = numpy.array([type(datetime(1970,1,1)) == type(dt) for dt in data['dt'][:]])
if keep.any():
for param in data.keys():
data[param] = data[param][keep]
return data
def creator(platform_info, sensor_info, data):
#
#
title_str = sensor_info['description']+' at '+ platform_info['location']
global_atts = {
'title' : title_str,
'institution' : 'Unversity of North Carolina at Chapel Hill (UNC-CH)',
'institution_url' : 'http://nccoos.unc.edu',
'institution_dods_url' : 'http://nccoos.unc.edu',
'metadata_url' : 'http://nccoos.unc.edu',
'references' : 'http://ehs.unc.edu',
'contact' : 'Sara Haines ([email protected])',
'station_owner' : 'Environment, Health, and Safety Office',
'station_contact' : 'Sharon Myers ([email protected])',
#
'source' : 'fixed-point observation',
'history' : 'raw2proc using ' + sensor_info['process_module'],
'comment' : 'File created using pycdf'+pycdfVersion()+' and numpy '+pycdfArrayPkg(),
# conventions
'Conventions' : 'CF-1.0; SEACOOS-CDL-v2.0',
# SEACOOS CDL codes
'format_category_code' : 'fixed-point',
'institution_code' : platform_info['institution'],
'platform_code' : platform_info['id'],
'package_code' : sensor_info['id'],
# institution specific
'project' : 'Environment, Health, and Safety (EHS)',
'project_url' : 'http://ehs.unc.edu/environment/water_quality',
# timeframe of data contained in file yyyy-mm-dd HH:MM:SS
# first date in monthly file
'start_date' : data['dt'][0].strftime("%Y-%m-%d %H:%M:%S"),
# last date in monthly file
'end_date' : data['dt'][-1].strftime("%Y-%m-%d %H:%M:%S"),
'release_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
#
'creation_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'modification_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'process_level' : 'level1',
#
# must type match to data (e.g. fillvalue is real if data is real)
'_FillValue' : -99999.,
}
var_atts = {
# coordinate variables
'time' : {'short_name': 'time',
'long_name': 'Time',
'standard_name': 'time',
'units': 'seconds since 1970-1-1 00:00:00 -0', # UTC
'axis': 'T',
},
'lat' : {'short_name': 'lat',
'long_name': 'Latitude',
'standard_name': 'latitude',
'reference':'geographic coordinates',
'units': 'degrees_north',
'valid_range':(-90.,90.),
'axis': 'Y',
},
'lon' : {'short_name': 'lon',
'long_name': 'Longitude',
'standard_name': 'longitude',
'reference':'geographic coordinates',
'units': 'degrees_east',
'valid_range':(-180.,180.),
'axis': 'Y',
},
'z' : {'short_name': 'z',
'long_name': 'Height',
'standard_name': 'height',
'reference':'zero at sea-surface',
'positive' : 'up',
'units': 'm',
'axis': 'Z',
},
# data variables
'wtemp': {'short_name': 'wtemp',
'long_name': 'Water Temperature',
'standard_name': 'water_temperature',
'units': 'degrees_Celsius',
},
'cond': {'short_name': 'cond',
'long_name': 'Conductivity',
'standard_name': 'conductivity',
'units': 'mS cm-1',
},
'turb': {'short_name': 'turb',
'long_name': 'Turbidity',
'standard_name': 'turbidity',
'units': 'NTU',
},
'ph': {'short_name': 'ph',
'long_name': 'pH',
'standard_name': 'ph',
'units': '',
},
'do_mg': {'short_name': 'do_mg',
'long_name': 'ROX Optical Dissolved Oxygen, Derived Concentration',
'standard_name': 'dissolved_oxygen_concentration',
'units': 'mg l-1',
},
'do_sat': {'short_name': 'do_sat',
'long_name': 'ROX Optical Dissolved Oxygen, Percent of Air Saturation',
'standard_name': 'dissolved_oxygen_relative_to_air_saturation',
'units': '%',
},
'battvolts': {'short_name': 'battery',
'long_name': 'Battery Voltage of the Station',
'standard_name': 'battery_voltage',
'units': 'volts',
},
}
# dimension names use tuple so order of initialization is maintained
dim_inits = (
('ntime', NC.UNLIMITED),
('nlat', 1),
('nlon', 1),
('nz', 1),
)
# using tuple of tuples so order of initialization is maintained
# using dict for attributes order of init not important
# use dimension names not values
# (varName, varType, (dimName1, [dimName2], ...))
var_inits = (
# coordinate variables
('time', NC.INT, ('ntime',)),
('lat', NC.FLOAT, ('nlat',)),
('lon', NC.FLOAT, ('nlon',)),
('z', NC.FLOAT, ('nz',)),
# data variables
('wtemp', NC.FLOAT, ('ntime',)), | ('cond', NC.FLOAT, ('ntime',)),
('turb', NC.FLOAT, ('ntime',)), | random_line_split |
|
proc_cr10x_wq_v1.py | nsamp=nsamp+1
N = nsamp
data = {
'dt' : numpy.array(numpy.ones((N,), dtype=object)*numpy.nan),
'time' : numpy.array(numpy.ones((N,), dtype=long)*numpy.nan),
'wtemp' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'cond' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_sat' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_mg' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'ph' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'turb' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'battvolts' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
}
# sample count
i = 0
for line in lines:
csi = []
# split line and parse float and integers
m=re.search("^1,", line)
if m:
sw = re.split(',', line)
else:
continue
for s in sw:
m = re.search(REAL_RE_STR, s)
if m:
csi.append(float(m.groups()[0]))
if len(csi)==14:
# get sample datetime from data
yyyy = csi[1]
yday = csi[2]
(MM, HH) = math.modf(csi[3]/100.)
MM = math.ceil(MM*100.)
if (HH == 24):
yday=yday+1
HH = 0.
sample_str = '%04d-%03d %02d:%02d' % (yyyy, yday, HH, MM)
# if sensor_info['utc_offset']:
# sample_dt = scanf_datetime(sample_str, fmt='%m-%d-%Y %H:%M:%S') + \
# timedelta(hours=sensor_info['utc_offset'])
# else:
sample_dt = scanf_datetime(sample_str, fmt='%Y-%j %H:%M')
data['dt'][i] = sample_dt # sample datetime
data['time'][i] = dt2es(sample_dt) # sample time in epoch seconds
#
data['wtemp'][i] = csi[4] # water temperature (C)
data['cond'][i] = csi[5] # specific conductivity (mS/cm)
data['do_sat'][i] = csi[6] # saturated dissolved oxygen (% air sat)
data['do_mg'][i] = csi[7] # dissolved oxygen (mg/l)
data['ph'][i] = csi[8] # ph
data['turb'][i] = csi[9] # turbidity (NTU)
# no adcp's prior to March 2005
# data['sontek_wl'][i] = csi[5] # sontek water level (ft)
# data['sontek_flow'][i] = csi[6] # sontek flow (cfs)
# data['press_wl'][i] = csi[10] # pressure water level (ft ?? or inches)
# data['rain'][i] = csi[11] # 15 sec rain count ??
# data['press_flow'][i] = csi[12] # flow flow (cfs)
data['battvolts'][i] = csi[13] # battery (volts)
i=i+1
# if-elif
# for line
# check that no data[dt] is set to Nan or anything but datetime
# keep only data that has a resolved datetime
keep = numpy.array([type(datetime(1970,1,1)) == type(dt) for dt in data['dt'][:]])
if keep.any():
for param in data.keys():
data[param] = data[param][keep]
return data
def creator(platform_info, sensor_info, data):
#
#
title_str = sensor_info['description']+' at '+ platform_info['location']
global_atts = {
'title' : title_str,
'institution' : 'Unversity of North Carolina at Chapel Hill (UNC-CH)',
'institution_url' : 'http://nccoos.unc.edu',
'institution_dods_url' : 'http://nccoos.unc.edu',
'metadata_url' : 'http://nccoos.unc.edu',
'references' : 'http://ehs.unc.edu',
'contact' : 'Sara Haines ([email protected])',
'station_owner' : 'Environment, Health, and Safety Office',
'station_contact' : 'Sharon Myers ([email protected])',
#
'source' : 'fixed-point observation',
'history' : 'raw2proc using ' + sensor_info['process_module'],
'comment' : 'File created using pycdf'+pycdfVersion()+' and numpy '+pycdfArrayPkg(),
# conventions
'Conventions' : 'CF-1.0; SEACOOS-CDL-v2.0',
# SEACOOS CDL codes
'format_category_code' : 'fixed-point',
'institution_code' : platform_info['institution'],
'platform_code' : platform_info['id'],
'package_code' : sensor_info['id'],
# institution specific
'project' : 'Environment, Health, and Safety (EHS)',
'project_url' : 'http://ehs.unc.edu/environment/water_quality',
# timeframe of data contained in file yyyy-mm-dd HH:MM:SS
# first date in monthly file
'start_date' : data['dt'][0].strftime("%Y-%m-%d %H:%M:%S"),
# last date in monthly file
'end_date' : data['dt'][-1].strftime("%Y-%m-%d %H:%M:%S"),
'release_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
#
'creation_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'modification_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'process_level' : 'level1',
#
# must type match to data (e.g. fillvalue is real if data is real)
'_FillValue' : -99999.,
}
var_atts = {
# coordinate variables
'time' : {'short_name': 'time',
'long_name': 'Time',
'standard_name': 'time',
'units': 'seconds since 1970-1-1 00:00:00 -0', # UTC
'axis': 'T',
},
'lat' : {'short_name': 'lat',
'long_name': 'Latitude',
'standard_name': 'latitude',
'reference':'geographic coordinates',
'units': 'degrees_north',
'valid_range':(-90.,90.),
'axis': 'Y',
},
'lon' : {'short_name': 'lon',
'long_name': 'Longitude',
'standard_name': 'longitude',
'reference':'geographic coordinates',
'units': 'degrees_east',
'valid_range':(-180.,180.),
'axis': 'Y',
},
'z' : {'short_name': 'z',
'long_name': 'Height',
'standard_name': 'height',
'reference':'zero at sea-surface',
'positive' : 'up',
'units': 'm',
'axis': 'Z',
},
# data variables
'wtemp': {'short_name': 'wtemp',
'long_name': 'Water Temperature',
'standard_name': 'water_temperature',
'units': 'degrees_Celsius',
},
'cond': {'short_name': 'cond',
'long_name': 'Conductivity',
'standard_name': 'conductivity',
'units': 'mS cm-1',
},
'turb': {'short_name': 'turb',
'long_name': 'Turbidity',
'standard_name': 'turbidity',
'units': 'NTU',
},
'ph': {'short_name': 'ph',
'long_name': 'pH',
'standard_name': 'ph',
'units': '',
},
'do_mg': {'short_name': 'do_mg',
'long_name': 'ROX Optical Dissolved | """
From FSL (CSI datalogger program files):
"""
import numpy
from datetime import datetime
from time import strptime
import math
# get sample datetime from filename
fn = sensor_info['fn']
sample_dt_start = filt_datetime(fn)
# how many samples
nsamp = 0
for line in lines:
m=re.search("^1,", line)
if m: | identifier_body |
|
proc_cr10x_wq_v1.py | how many samples
nsamp = 0
for line in lines:
m=re.search("^1,", line)
if m:
nsamp=nsamp+1
N = nsamp
data = {
'dt' : numpy.array(numpy.ones((N,), dtype=object)*numpy.nan),
'time' : numpy.array(numpy.ones((N,), dtype=long)*numpy.nan),
'wtemp' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'cond' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_sat' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_mg' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'ph' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'turb' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'battvolts' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
}
# sample count
i = 0
for line in lines:
csi = []
# split line and parse float and integers
m=re.search("^1,", line)
if m:
sw = re.split(',', line)
else:
continue
for s in sw:
m = re.search(REAL_RE_STR, s)
if m:
csi.append(float(m.groups()[0]))
if len(csi)==14:
# get sample datetime from data
yyyy = csi[1]
yday = csi[2]
(MM, HH) = math.modf(csi[3]/100.)
MM = math.ceil(MM*100.)
if (HH == 24):
|
sample_str = '%04d-%03d %02d:%02d' % (yyyy, yday, HH, MM)
# if sensor_info['utc_offset']:
# sample_dt = scanf_datetime(sample_str, fmt='%m-%d-%Y %H:%M:%S') + \
# timedelta(hours=sensor_info['utc_offset'])
# else:
sample_dt = scanf_datetime(sample_str, fmt='%Y-%j %H:%M')
data['dt'][i] = sample_dt # sample datetime
data['time'][i] = dt2es(sample_dt) # sample time in epoch seconds
#
data['wtemp'][i] = csi[4] # water temperature (C)
data['cond'][i] = csi[5] # specific conductivity (mS/cm)
data['do_sat'][i] = csi[6] # saturated dissolved oxygen (% air sat)
data['do_mg'][i] = csi[7] # dissolved oxygen (mg/l)
data['ph'][i] = csi[8] # ph
data['turb'][i] = csi[9] # turbidity (NTU)
# no adcp's prior to March 2005
# data['sontek_wl'][i] = csi[5] # sontek water level (ft)
# data['sontek_flow'][i] = csi[6] # sontek flow (cfs)
# data['press_wl'][i] = csi[10] # pressure water level (ft ?? or inches)
# data['rain'][i] = csi[11] # 15 sec rain count ??
# data['press_flow'][i] = csi[12] # flow flow (cfs)
data['battvolts'][i] = csi[13] # battery (volts)
i=i+1
# if-elif
# for line
# check that no data[dt] is set to Nan or anything but datetime
# keep only data that has a resolved datetime
keep = numpy.array([type(datetime(1970,1,1)) == type(dt) for dt in data['dt'][:]])
if keep.any():
for param in data.keys():
data[param] = data[param][keep]
return data
def creator(platform_info, sensor_info, data):
#
#
title_str = sensor_info['description']+' at '+ platform_info['location']
global_atts = {
'title' : title_str,
'institution' : 'Unversity of North Carolina at Chapel Hill (UNC-CH)',
'institution_url' : 'http://nccoos.unc.edu',
'institution_dods_url' : 'http://nccoos.unc.edu',
'metadata_url' : 'http://nccoos.unc.edu',
'references' : 'http://ehs.unc.edu',
'contact' : 'Sara Haines ([email protected])',
'station_owner' : 'Environment, Health, and Safety Office',
'station_contact' : 'Sharon Myers ([email protected])',
#
'source' : 'fixed-point observation',
'history' : 'raw2proc using ' + sensor_info['process_module'],
'comment' : 'File created using pycdf'+pycdfVersion()+' and numpy '+pycdfArrayPkg(),
# conventions
'Conventions' : 'CF-1.0; SEACOOS-CDL-v2.0',
# SEACOOS CDL codes
'format_category_code' : 'fixed-point',
'institution_code' : platform_info['institution'],
'platform_code' : platform_info['id'],
'package_code' : sensor_info['id'],
# institution specific
'project' : 'Environment, Health, and Safety (EHS)',
'project_url' : 'http://ehs.unc.edu/environment/water_quality',
# timeframe of data contained in file yyyy-mm-dd HH:MM:SS
# first date in monthly file
'start_date' : data['dt'][0].strftime("%Y-%m-%d %H:%M:%S"),
# last date in monthly file
'end_date' : data['dt'][-1].strftime("%Y-%m-%d %H:%M:%S"),
'release_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
#
'creation_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'modification_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'process_level' : 'level1',
#
# must type match to data (e.g. fillvalue is real if data is real)
'_FillValue' : -99999.,
}
var_atts = {
# coordinate variables
'time' : {'short_name': 'time',
'long_name': 'Time',
'standard_name': 'time',
'units': 'seconds since 1970-1-1 00:00:00 -0', # UTC
'axis': 'T',
},
'lat' : {'short_name': 'lat',
'long_name': 'Latitude',
'standard_name': 'latitude',
'reference':'geographic coordinates',
'units': 'degrees_north',
'valid_range':(-90.,90.),
'axis': 'Y',
},
'lon' : {'short_name': 'lon',
'long_name': 'Longitude',
'standard_name': 'longitude',
'reference':'geographic coordinates',
'units': 'degrees_east',
'valid_range':(-180.,180.),
'axis': 'Y',
},
'z' : {'short_name': 'z',
'long_name': 'Height',
'standard_name': 'height',
'reference':'zero at sea-surface',
'positive' : 'up',
'units': 'm',
'axis': 'Z',
},
# data variables
'wtemp': {'short_name': 'wtemp',
'long_name': 'Water Temperature',
'standard_name': 'water_temperature',
'units': 'degrees_Celsius',
},
'cond': {'short_name': 'cond',
'long_name': 'Conductivity',
'standard_name': 'conductivity',
'units': 'mS cm-1',
},
'turb': {'short_name': 'turb',
'long_name': 'Turbidity',
'standard_name': 'turbidity',
'units': 'NTU',
},
'ph': {'short_name': 'ph',
'long_name': 'pH',
'standard_name': 'ph',
'units': '',
},
'do_mg': {'short_name': 'do_mg',
'long_name': 'ROX Optical Dissolved Oxygen, Derived Concentration',
'standard_name': 'dissolved_oxygen_concentration',
'units': 'mg l-1',
},
'do_sat': {'short_name': 'do_sat',
'long_name': 'ROX Optical Dissolved Oxygen, Percent of Air Saturation',
| yday=yday+1
HH = 0. | conditional_block |
proc_cr10x_wq_v1.py | many samples
nsamp = 0
for line in lines:
m=re.search("^1,", line)
if m:
nsamp=nsamp+1
N = nsamp
data = {
'dt' : numpy.array(numpy.ones((N,), dtype=object)*numpy.nan),
'time' : numpy.array(numpy.ones((N,), dtype=long)*numpy.nan),
'wtemp' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'cond' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_sat' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'do_mg' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'ph' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'turb' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
'battvolts' : numpy.array(numpy.ones((N,), dtype=float)*numpy.nan),
}
# sample count
i = 0
for line in lines:
csi = []
# split line and parse float and integers
m=re.search("^1,", line)
if m:
sw = re.split(',', line)
else:
continue
for s in sw:
m = re.search(REAL_RE_STR, s)
if m:
csi.append(float(m.groups()[0]))
if len(csi)==14:
# get sample datetime from data
yyyy = csi[1]
yday = csi[2]
(MM, HH) = math.modf(csi[3]/100.)
MM = math.ceil(MM*100.)
if (HH == 24):
yday=yday+1
HH = 0.
sample_str = '%04d-%03d %02d:%02d' % (yyyy, yday, HH, MM)
# if sensor_info['utc_offset']:
# sample_dt = scanf_datetime(sample_str, fmt='%m-%d-%Y %H:%M:%S') + \
# timedelta(hours=sensor_info['utc_offset'])
# else:
sample_dt = scanf_datetime(sample_str, fmt='%Y-%j %H:%M')
data['dt'][i] = sample_dt # sample datetime
data['time'][i] = dt2es(sample_dt) # sample time in epoch seconds
#
data['wtemp'][i] = csi[4] # water temperature (C)
data['cond'][i] = csi[5] # specific conductivity (mS/cm)
data['do_sat'][i] = csi[6] # saturated dissolved oxygen (% air sat)
data['do_mg'][i] = csi[7] # dissolved oxygen (mg/l)
data['ph'][i] = csi[8] # ph
data['turb'][i] = csi[9] # turbidity (NTU)
# no adcp's prior to March 2005
# data['sontek_wl'][i] = csi[5] # sontek water level (ft)
# data['sontek_flow'][i] = csi[6] # sontek flow (cfs)
# data['press_wl'][i] = csi[10] # pressure water level (ft ?? or inches)
# data['rain'][i] = csi[11] # 15 sec rain count ??
# data['press_flow'][i] = csi[12] # flow flow (cfs)
data['battvolts'][i] = csi[13] # battery (volts)
i=i+1
# if-elif
# for line
# check that no data[dt] is set to Nan or anything but datetime
# keep only data that has a resolved datetime
keep = numpy.array([type(datetime(1970,1,1)) == type(dt) for dt in data['dt'][:]])
if keep.any():
for param in data.keys():
data[param] = data[param][keep]
return data
def | (platform_info, sensor_info, data):
#
#
title_str = sensor_info['description']+' at '+ platform_info['location']
global_atts = {
'title' : title_str,
'institution' : 'Unversity of North Carolina at Chapel Hill (UNC-CH)',
'institution_url' : 'http://nccoos.unc.edu',
'institution_dods_url' : 'http://nccoos.unc.edu',
'metadata_url' : 'http://nccoos.unc.edu',
'references' : 'http://ehs.unc.edu',
'contact' : 'Sara Haines ([email protected])',
'station_owner' : 'Environment, Health, and Safety Office',
'station_contact' : 'Sharon Myers ([email protected])',
#
'source' : 'fixed-point observation',
'history' : 'raw2proc using ' + sensor_info['process_module'],
'comment' : 'File created using pycdf'+pycdfVersion()+' and numpy '+pycdfArrayPkg(),
# conventions
'Conventions' : 'CF-1.0; SEACOOS-CDL-v2.0',
# SEACOOS CDL codes
'format_category_code' : 'fixed-point',
'institution_code' : platform_info['institution'],
'platform_code' : platform_info['id'],
'package_code' : sensor_info['id'],
# institution specific
'project' : 'Environment, Health, and Safety (EHS)',
'project_url' : 'http://ehs.unc.edu/environment/water_quality',
# timeframe of data contained in file yyyy-mm-dd HH:MM:SS
# first date in monthly file
'start_date' : data['dt'][0].strftime("%Y-%m-%d %H:%M:%S"),
# last date in monthly file
'end_date' : data['dt'][-1].strftime("%Y-%m-%d %H:%M:%S"),
'release_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
#
'creation_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'modification_date' : now_dt.strftime("%Y-%m-%d %H:%M:%S"),
'process_level' : 'level1',
#
# must type match to data (e.g. fillvalue is real if data is real)
'_FillValue' : -99999.,
}
var_atts = {
# coordinate variables
'time' : {'short_name': 'time',
'long_name': 'Time',
'standard_name': 'time',
'units': 'seconds since 1970-1-1 00:00:00 -0', # UTC
'axis': 'T',
},
'lat' : {'short_name': 'lat',
'long_name': 'Latitude',
'standard_name': 'latitude',
'reference':'geographic coordinates',
'units': 'degrees_north',
'valid_range':(-90.,90.),
'axis': 'Y',
},
'lon' : {'short_name': 'lon',
'long_name': 'Longitude',
'standard_name': 'longitude',
'reference':'geographic coordinates',
'units': 'degrees_east',
'valid_range':(-180.,180.),
'axis': 'Y',
},
'z' : {'short_name': 'z',
'long_name': 'Height',
'standard_name': 'height',
'reference':'zero at sea-surface',
'positive' : 'up',
'units': 'm',
'axis': 'Z',
},
# data variables
'wtemp': {'short_name': 'wtemp',
'long_name': 'Water Temperature',
'standard_name': 'water_temperature',
'units': 'degrees_Celsius',
},
'cond': {'short_name': 'cond',
'long_name': 'Conductivity',
'standard_name': 'conductivity',
'units': 'mS cm-1',
},
'turb': {'short_name': 'turb',
'long_name': 'Turbidity',
'standard_name': 'turbidity',
'units': 'NTU',
},
'ph': {'short_name': 'ph',
'long_name': 'pH',
'standard_name': 'ph',
'units': '',
},
'do_mg': {'short_name': 'do_mg',
'long_name': 'ROX Optical Dissolved Oxygen, Derived Concentration',
'standard_name': 'dissolved_oxygen_concentration',
'units': 'mg l-1',
},
'do_sat': {'short_name': 'do_sat',
'long_name': 'ROX Optical Dissolved Oxygen, Percent of Air Saturation',
| creator | identifier_name |
step1_L1_ProdLike_PF.py | 3B2-EA45-9A12-BECFF07760FC.root'),
# fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRWinter20DIGI/SingleElectron_PT2to200/GEN-SIM-DIGI-RAW/PU200_110X_mcRun4_realistic_v3_ext2-v2/F32C5A21-F0E9-9149-B04A-883CC704E820.root'),
secondaryFileNames = cms.untracked.vstring(),
# eventsToProcess = cms.untracked.VEventRange('1:162232-1:162232', ),
# lumisToProcess = cms.untracked.VLuminosityBlockRange('1:978-1:978'),
)
process.options = cms.untracked.PSet(
FailPath = cms.untracked.vstring(),
IgnoreCompletely = cms.untracked.vstring(),
Rethrow = cms.untracked.vstring(),
SkipEvent = cms.untracked.vstring(),
allowUnscheduled = cms.obsolete.untracked.bool,
canDeleteEarly = cms.untracked.vstring(),
emptyRunLumiMode = cms.obsolete.untracked.string,
eventSetup = cms.untracked.PSet(
forceNumberOfConcurrentIOVs = cms.untracked.PSet(
),
numberOfConcurrentIOVs = cms.untracked.uint32(1)
),
fileMode = cms.untracked.string('FULLMERGE'),
forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False),
makeTriggerResults = cms.obsolete.untracked.bool,
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(1),
printDependencies = cms.untracked.bool(False),
sizeOfStackForThreadsInKB = cms.optional.untracked.uint32,
throwIfIllegalParameter = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:2'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('FEVTDEBUGHLT'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:/tmp/step1_Reprocess_TrackTrigger_L1.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T15', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1TrackTrigger_step = cms.Path(process.L1TrackTrigger)
process.pL1TkPrimaryVertex = cms.Path(process.L1TkPrimaryVertex)
process.pL1TkPhotonsCrystal = cms.Path(process.L1TkPhotonsCrystal)
process.pL1TkIsoElectronsCrystal = cms.Path(process.L1TkIsoElectronsCrystal)
process.pL1TkElectronsLooseCrystal = cms.Path(process.L1TkElectronsLooseCrystal)
process.pL1TkElectronsHGC = cms.Path(process.L1TkElectronsHGC)
process.pL1TkMuon = cms.Path(process.L1TkMuons+process.L1TkMuonsTP)
process.pL1TkElectronsLooseHGC = cms.Path(process.L1TkElectronsLooseHGC)
process.pL1TkElectronsEllipticMatchHGC = cms.Path(process.L1TkElectronsEllipticMatchHGC)
process.pL1TkElectronsCrystal = cms.Path(process.L1TkElectronsCrystal)
process.pL1TkPhotonsHGC = cms.Path(process.L1TkPhotonsHGC)
process.pL1TkIsoElectronsHGC = cms.Path(process.L1TkIsoElectronsHGC)
process.pL1TkElectronsEllipticMatchCrystal = cms.Path(process.L1TkElectronsEllipticMatchCrystal)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
process.L1TrackTrigger.remove(process.TTTracksFromExtendedTrackletEmulation)
process.L1TrackTrigger.remove(process.TTTrackAssociatorFromPixelDigisExtended)
# load ntuplizer
process.load('L1Trigger.L1CaloTrigger.L1TCaloTriggerNtuples_cff')
# process.ntuple_step = cms.Path(process.l1CaloTriggerNtuples)
process.ntuple_step = cms.Path(process.l1CaloTriggerNtuplizer_egOnly)
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string("ntuple.root")
)
process.load("L1Trigger.Phase2L1ParticleFlow.l1ParticleFlow_cff")
process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer1_cff')
process.runPF_newemulator = cms.Path(
process.pfTracksFromL1Tracks +
process.l1ParticleFlow_calo +
process.l1ctLayer1Barrel +
process.l1ctLayer1HGCal +
process.l1ctLayer1HGCalNoTK +
process.l1ctLayer1HF +
process.l1ctLayer1 +
process.l1ctLayer1EG
)
# process.L1simulation_step.remove(process.L1TkElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsLooseCrystal)
# process.L1simulation_step.remove(process.L1TkIsoElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsHGC)
# process.L1simulation_step.remove(process.L1TkIsoElectronsHGC)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1TrackTrigger_step,process.L1simulation_step,process.ntuple_step)
# process.schedule = cms.Schedule(process.raw2digi_step,process.L1simulation_step,process.runPF_newemulator,process.ntuple_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(1)
process.options.numberOfStreams=cms.untracked.uint32(0)
process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1)
process.options.SkipEvent = cms.untracked.vstring('ProductNotFound')
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.aging
from SLHCUpgradeSimulations.Configuration.aging import customise_aging_1000
#call to customisation function customise_aging_1000 imported from SLHCUpgradeSimulations.Configuration.aging
process = customise_aging_1000(process)
# Automatic addition of the customisation function from L1Trigger.Configuration.customisePhase2TTNoMC
from L1Trigger.Configuration.customisePhase2TTNoMC import customisePhase2TTNoMC
#call to customisation function customisePhase2TTNoMC imported from L1Trigger.Configuration.customisePhase2TTNoMC
process = customisePhase2TTNoMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal = cms.untracked.int32(1),
# oncePerEventMode=cms.untracked.bool(True)
# )
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
# define regions
def goRegional(postfix="", relativeCoordinates=False):
| overlap=0.25 # 0.3
getattr(process, 'l1pfProducer'+postfix+'Barrel').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-1.5, -0.5, 0.5, 1.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
)
)
getattr(process, 'l1pfProducer'+postfix+'HGCalNoTK').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-3, -2.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
),
cms.PSet(
etaBoundaries = cms.vdouble(2.5, 3),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap), | identifier_body |
|
step1_L1_ProdLike_PF.py | FULLMERGE'),
forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False),
makeTriggerResults = cms.obsolete.untracked.bool,
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(1),
printDependencies = cms.untracked.bool(False),
sizeOfStackForThreadsInKB = cms.optional.untracked.uint32,
throwIfIllegalParameter = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:2'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('FEVTDEBUGHLT'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:/tmp/step1_Reprocess_TrackTrigger_L1.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T15', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1TrackTrigger_step = cms.Path(process.L1TrackTrigger)
process.pL1TkPrimaryVertex = cms.Path(process.L1TkPrimaryVertex)
process.pL1TkPhotonsCrystal = cms.Path(process.L1TkPhotonsCrystal)
process.pL1TkIsoElectronsCrystal = cms.Path(process.L1TkIsoElectronsCrystal)
process.pL1TkElectronsLooseCrystal = cms.Path(process.L1TkElectronsLooseCrystal)
process.pL1TkElectronsHGC = cms.Path(process.L1TkElectronsHGC)
process.pL1TkMuon = cms.Path(process.L1TkMuons+process.L1TkMuonsTP)
process.pL1TkElectronsLooseHGC = cms.Path(process.L1TkElectronsLooseHGC)
process.pL1TkElectronsEllipticMatchHGC = cms.Path(process.L1TkElectronsEllipticMatchHGC)
process.pL1TkElectronsCrystal = cms.Path(process.L1TkElectronsCrystal)
process.pL1TkPhotonsHGC = cms.Path(process.L1TkPhotonsHGC)
process.pL1TkIsoElectronsHGC = cms.Path(process.L1TkIsoElectronsHGC)
process.pL1TkElectronsEllipticMatchCrystal = cms.Path(process.L1TkElectronsEllipticMatchCrystal)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
process.L1TrackTrigger.remove(process.TTTracksFromExtendedTrackletEmulation)
process.L1TrackTrigger.remove(process.TTTrackAssociatorFromPixelDigisExtended)
# load ntuplizer
process.load('L1Trigger.L1CaloTrigger.L1TCaloTriggerNtuples_cff')
# process.ntuple_step = cms.Path(process.l1CaloTriggerNtuples)
process.ntuple_step = cms.Path(process.l1CaloTriggerNtuplizer_egOnly)
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string("ntuple.root")
)
process.load("L1Trigger.Phase2L1ParticleFlow.l1ParticleFlow_cff")
process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer1_cff')
process.runPF_newemulator = cms.Path(
process.pfTracksFromL1Tracks +
process.l1ParticleFlow_calo +
process.l1ctLayer1Barrel +
process.l1ctLayer1HGCal +
process.l1ctLayer1HGCalNoTK +
process.l1ctLayer1HF +
process.l1ctLayer1 +
process.l1ctLayer1EG
)
# process.L1simulation_step.remove(process.L1TkElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsLooseCrystal)
# process.L1simulation_step.remove(process.L1TkIsoElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsHGC)
# process.L1simulation_step.remove(process.L1TkIsoElectronsHGC)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1TrackTrigger_step,process.L1simulation_step,process.ntuple_step)
# process.schedule = cms.Schedule(process.raw2digi_step,process.L1simulation_step,process.runPF_newemulator,process.ntuple_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(1)
process.options.numberOfStreams=cms.untracked.uint32(0)
process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1)
process.options.SkipEvent = cms.untracked.vstring('ProductNotFound')
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.aging
from SLHCUpgradeSimulations.Configuration.aging import customise_aging_1000
#call to customisation function customise_aging_1000 imported from SLHCUpgradeSimulations.Configuration.aging
process = customise_aging_1000(process)
# Automatic addition of the customisation function from L1Trigger.Configuration.customisePhase2TTNoMC
from L1Trigger.Configuration.customisePhase2TTNoMC import customisePhase2TTNoMC
#call to customisation function customisePhase2TTNoMC imported from L1Trigger.Configuration.customisePhase2TTNoMC
process = customisePhase2TTNoMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal = cms.untracked.int32(1),
# oncePerEventMode=cms.untracked.bool(True)
# )
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
# define regions
def goRegional(postfix="", relativeCoordinates=False):
overlap=0.25 # 0.3
getattr(process, 'l1pfProducer'+postfix+'Barrel').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-1.5, -0.5, 0.5, 1.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
)
)
getattr(process, 'l1pfProducer'+postfix+'HGCalNoTK').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-3, -2.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
),
cms.PSet(
etaBoundaries = cms.vdouble(2.5, 3),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
)
)
getattr(process, 'l1pfProducer'+postfix+'HGCal').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-2.5, -1.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
),
cms.PSet(
etaBoundaries = cms.vdouble(1.5, 2.5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
)
)
getattr(process, 'l1pfProducer'+postfix+'HF').regions = cms.VPSet(
cms.PSet(
etaBoundaries = cms.vdouble(-5, -4, -3),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
),
cms.PSet(
etaBoundaries = cms.vdouble(3, 4, 5),
etaExtra = cms.double(overlap),
phiExtra = cms.double(overlap),
phiSlices = cms.uint32(9)
)
)
for D in 'Barrel', 'HGCal', 'HGCalNoTK', 'HF':
| getattr(process, 'l1pfProducer'+postfix+D).useRelativeRegionalCoordinates = relativeCoordinates | conditional_block |
|
step1_L1_ProdLike_PF.py | process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.L1TrackTrigger_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10),
output = cms.optional.untracked.allowed(cms.int32,cms.PSet)
)
# Input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRSummer20ReRECOMiniAOD/DoubleElectron_FlatPt-1To100/GEN-SIM-DIGI-RAW-MINIAOD/PU200_111X_mcRun4_realistic_T15_v1-v2/E2F32293-BA24-C646-8060-CE3B4A9E5D4B.root'),
fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRSummer20ReRECOMiniAOD/TT_TuneCP5_14TeV-powheg-pythia8/FEVT/PU200_111X_mcRun4_realistic_T15_v1-v2/003ACFBC-23B2-EA45-9A12-BECFF07760FC.root'),
# fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRWinter20DIGI/SingleElectron_PT2to200/GEN-SIM-DIGI-RAW/PU200_110X_mcRun4_realistic_v3_ext2-v2/F32C5A21-F0E9-9149-B04A-883CC704E820.root'),
secondaryFileNames = cms.untracked.vstring(),
# eventsToProcess = cms.untracked.VEventRange('1:162232-1:162232', ),
# lumisToProcess = cms.untracked.VLuminosityBlockRange('1:978-1:978'),
)
process.options = cms.untracked.PSet(
FailPath = cms.untracked.vstring(),
IgnoreCompletely = cms.untracked.vstring(),
Rethrow = cms.untracked.vstring(),
SkipEvent = cms.untracked.vstring(),
allowUnscheduled = cms.obsolete.untracked.bool,
canDeleteEarly = cms.untracked.vstring(),
emptyRunLumiMode = cms.obsolete.untracked.string,
eventSetup = cms.untracked.PSet(
forceNumberOfConcurrentIOVs = cms.untracked.PSet(
),
numberOfConcurrentIOVs = cms.untracked.uint32(1)
),
fileMode = cms.untracked.string('FULLMERGE'),
forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False),
makeTriggerResults = cms.obsolete.untracked.bool,
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(1),
printDependencies = cms.untracked.bool(False),
sizeOfStackForThreadsInKB = cms.optional.untracked.uint32,
throwIfIllegalParameter = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:2'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('FEVTDEBUGHLT'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:/tmp/step1_Reprocess_TrackTrigger_L1.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T15', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1TrackTrigger_step = cms.Path(process.L1TrackTrigger)
process.pL1TkPrimaryVertex = cms.Path(process.L1TkPrimaryVertex)
process.pL1TkPhotonsCrystal = cms.Path(process.L1TkPhotonsCrystal)
process.pL1TkIsoElectronsCrystal = cms.Path(process.L1TkIsoElectronsCrystal)
process.pL1TkElectronsLooseCrystal = cms.Path(process.L1TkElectronsLooseCrystal)
process.pL1TkElectronsHGC = cms.Path(process.L1TkElectronsHGC)
process.pL1TkMuon = cms.Path(process.L1TkMuons+process.L1TkMuonsTP)
process.pL1TkElectronsLooseHGC = cms.Path(process.L1TkElectronsLooseHGC)
process.pL1TkElectronsEllipticMatchHGC = cms.Path(process.L1TkElectronsEllipticMatchHGC)
process.pL1TkElectronsCrystal = cms.Path(process.L1TkElectronsCrystal)
process.pL1TkPhotonsHGC = cms.Path(process.L1TkPhotonsHGC)
process.pL1TkIsoElectronsHGC = cms.Path(process.L1TkIsoElectronsHGC)
process.pL1TkElectronsEllipticMatchCrystal = cms.Path(process.L1TkElectronsEllipticMatchCrystal)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
process.L1TrackTrigger.remove(process.TTTracksFromExtendedTrackletEmulation)
process.L1TrackTrigger.remove(process.TTTrackAssociatorFromPixelDigisExtended)
# load ntuplizer
process.load('L1Trigger.L1CaloTrigger.L1TCaloTriggerNtuples_cff')
# process.ntuple_step = cms.Path(process.l1CaloTriggerNtuples)
process.ntuple_step = cms.Path(process.l1CaloTriggerNtuplizer_egOnly)
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string("ntuple.root")
)
process.load("L1Trigger.Phase2L1ParticleFlow.l1ParticleFlow_cff")
process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer1_cff')
process.runPF_newemulator = cms.Path(
process.pfTracksFromL1Tracks +
process.l1ParticleFlow_calo +
process.l1ctLayer1Barrel +
process.l1ctLayer1HGCal +
process.l1ctLayer1HGCalNoTK +
process.l1ctLayer1HF +
process.l1ctLayer1 +
process.l1ctLayer1EG
)
# process.L1simulation_step.remove(process.L1TkElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsLooseCrystal)
# process.L1simulation_step.remove(process.L1TkIsoElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsHGC)
# process.L1simulation_step.remove(process.L1TkIsoElectronsHGC)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1TrackTrigger_step,process.L1simulation_step,process.ntuple_step)
# process.schedule = cms.Schedule(process.raw2digi_step,process.L1simulation_step,process.runPF_newemulator,process.ntuple_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(1)
process.options.numberOfStreams=cms.untracked.uint32(0)
process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1)
process.options.SkipEvent = cms.untracked.vstring('ProductNotFound')
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.aging
from SLHCUpgradeSimulations.Configuration.aging import customise_aging_1000
#call to customisation function customise_aging_1000 imported from SLHCUpgradeSimulations.Configuration.aging
process = customise_aging_1000(process)
# Automatic addition of the customisation function from L1Trigger.Configuration.customisePhase2TTNoMC
from L1Trigger.Configuration.customisePhase2TTNoMC import customisePhase2TTNoMC
#call to custom | process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2026D49Reco_cff') | random_line_split |
|
step1_L1_ProdLike_PF.py | cms.optional.untracked.allowed(cms.int32,cms.PSet)
)
# Input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRSummer20ReRECOMiniAOD/DoubleElectron_FlatPt-1To100/GEN-SIM-DIGI-RAW-MINIAOD/PU200_111X_mcRun4_realistic_T15_v1-v2/E2F32293-BA24-C646-8060-CE3B4A9E5D4B.root'),
fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRSummer20ReRECOMiniAOD/TT_TuneCP5_14TeV-powheg-pythia8/FEVT/PU200_111X_mcRun4_realistic_T15_v1-v2/003ACFBC-23B2-EA45-9A12-BECFF07760FC.root'),
# fileNames = cms.untracked.vstring('file:/data/cerminar/Phase2HLTTDRWinter20DIGI/SingleElectron_PT2to200/GEN-SIM-DIGI-RAW/PU200_110X_mcRun4_realistic_v3_ext2-v2/F32C5A21-F0E9-9149-B04A-883CC704E820.root'),
secondaryFileNames = cms.untracked.vstring(),
# eventsToProcess = cms.untracked.VEventRange('1:162232-1:162232', ),
# lumisToProcess = cms.untracked.VLuminosityBlockRange('1:978-1:978'),
)
process.options = cms.untracked.PSet(
FailPath = cms.untracked.vstring(),
IgnoreCompletely = cms.untracked.vstring(),
Rethrow = cms.untracked.vstring(),
SkipEvent = cms.untracked.vstring(),
allowUnscheduled = cms.obsolete.untracked.bool,
canDeleteEarly = cms.untracked.vstring(),
emptyRunLumiMode = cms.obsolete.untracked.string,
eventSetup = cms.untracked.PSet(
forceNumberOfConcurrentIOVs = cms.untracked.PSet(
),
numberOfConcurrentIOVs = cms.untracked.uint32(1)
),
fileMode = cms.untracked.string('FULLMERGE'),
forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False),
makeTriggerResults = cms.obsolete.untracked.bool,
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(1),
printDependencies = cms.untracked.bool(False),
sizeOfStackForThreadsInKB = cms.optional.untracked.uint32,
throwIfIllegalParameter = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:2'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('FEVTDEBUGHLT'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:/tmp/step1_Reprocess_TrackTrigger_L1.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T15', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1TrackTrigger_step = cms.Path(process.L1TrackTrigger)
process.pL1TkPrimaryVertex = cms.Path(process.L1TkPrimaryVertex)
process.pL1TkPhotonsCrystal = cms.Path(process.L1TkPhotonsCrystal)
process.pL1TkIsoElectronsCrystal = cms.Path(process.L1TkIsoElectronsCrystal)
process.pL1TkElectronsLooseCrystal = cms.Path(process.L1TkElectronsLooseCrystal)
process.pL1TkElectronsHGC = cms.Path(process.L1TkElectronsHGC)
process.pL1TkMuon = cms.Path(process.L1TkMuons+process.L1TkMuonsTP)
process.pL1TkElectronsLooseHGC = cms.Path(process.L1TkElectronsLooseHGC)
process.pL1TkElectronsEllipticMatchHGC = cms.Path(process.L1TkElectronsEllipticMatchHGC)
process.pL1TkElectronsCrystal = cms.Path(process.L1TkElectronsCrystal)
process.pL1TkPhotonsHGC = cms.Path(process.L1TkPhotonsHGC)
process.pL1TkIsoElectronsHGC = cms.Path(process.L1TkIsoElectronsHGC)
process.pL1TkElectronsEllipticMatchCrystal = cms.Path(process.L1TkElectronsEllipticMatchCrystal)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
process.L1TrackTrigger.remove(process.TTTracksFromExtendedTrackletEmulation)
process.L1TrackTrigger.remove(process.TTTrackAssociatorFromPixelDigisExtended)
# load ntuplizer
process.load('L1Trigger.L1CaloTrigger.L1TCaloTriggerNtuples_cff')
# process.ntuple_step = cms.Path(process.l1CaloTriggerNtuples)
process.ntuple_step = cms.Path(process.l1CaloTriggerNtuplizer_egOnly)
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string("ntuple.root")
)
process.load("L1Trigger.Phase2L1ParticleFlow.l1ParticleFlow_cff")
process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer1_cff')
process.runPF_newemulator = cms.Path(
process.pfTracksFromL1Tracks +
process.l1ParticleFlow_calo +
process.l1ctLayer1Barrel +
process.l1ctLayer1HGCal +
process.l1ctLayer1HGCalNoTK +
process.l1ctLayer1HF +
process.l1ctLayer1 +
process.l1ctLayer1EG
)
# process.L1simulation_step.remove(process.L1TkElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsLooseCrystal)
# process.L1simulation_step.remove(process.L1TkIsoElectronsCrystal)
# process.L1simulation_step.remove(process.L1TkElectronsHGC)
# process.L1simulation_step.remove(process.L1TkIsoElectronsHGC)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1TrackTrigger_step,process.L1simulation_step,process.ntuple_step)
# process.schedule = cms.Schedule(process.raw2digi_step,process.L1simulation_step,process.runPF_newemulator,process.ntuple_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(1)
process.options.numberOfStreams=cms.untracked.uint32(0)
process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1)
process.options.SkipEvent = cms.untracked.vstring('ProductNotFound')
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.aging
from SLHCUpgradeSimulations.Configuration.aging import customise_aging_1000
#call to customisation function customise_aging_1000 imported from SLHCUpgradeSimulations.Configuration.aging
process = customise_aging_1000(process)
# Automatic addition of the customisation function from L1Trigger.Configuration.customisePhase2TTNoMC
from L1Trigger.Configuration.customisePhase2TTNoMC import customisePhase2TTNoMC
#call to customisation function customisePhase2TTNoMC imported from L1Trigger.Configuration.customisePhase2TTNoMC
process = customisePhase2TTNoMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal = cms.untracked.int32(1),
# oncePerEventMode=cms.untracked.bool(True)
# )
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
# define regions
def | goRegional | identifier_name |
|
suite.go | }
config.DefaultReporterConfig.SlowSpecThreshold = slowSpecThreshold
config.DefaultReporterConfig.Verbose = testing.Verbose()
reporters = append(reporters, gr.NewJUnitReporter(filepath.Join(reportsDir, fmt.Sprintf("%s.junit.xml", suiteId))))
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t, fmt.Sprintf("Jenkins X E2E tests: %s", suiteId), reporters)
}
var BeforeSuiteCallback = func() {
err := ensureConfiguration()
utils.ExpectNoError(err)
WorkDir, err := ioutil.TempDir("", TempDirPrefix)
Expect(err).NotTo(HaveOccurred())
err = os.MkdirAll(WorkDir, 0760)
Expect(err).NotTo(HaveOccurred())
Expect(WorkDir).To(BeADirectory())
AssignWorkDirValue(WorkDir)
}
var SynchronizedAfterSuiteCallback = func() {
// Cleanup workdir as usual
cleanFlag := os.Getenv("JX_DISABLE_CLEAN_DIR")
if strings.ToLower(cleanFlag) != "true" {
os.RemoveAll(WorkDir)
Expect(WorkDir).ToNot(BeADirectory())
}
}
func ensureConfiguration() error {
cwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err)
}
_, found := os.LookupEnv("BDD_JX")
if !found {
_ = os.Setenv("BDD_JX", runner.Jx)
}
r := runner.New(cwd, &TimeoutSessionWait, 0)
version, err := r.RunWithOutput("--version")
if err != nil {
return errors.WithStack(err)
}
factory := cmd.NewFactory()
kubeClient, ns, err := factory.CreateKubeClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create kubeClient")
}
jxClient, _, err := factory.CreateJXClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create jxClient")
}
gitOrganisation := os.Getenv("GIT_ORGANISATION")
if gitOrganisation == "" {
gitOrganisation, err = findDefaultOrganisation(kubeClient, jxClient, ns)
if err != nil {
return errors.Wrapf(errors.WithStack(err), "failed to find gitOrganisation in namespace %s", ns)
}
if gitOrganisation == "" {
gitOrganisation = "jenkins-x-tests"
}
_ = os.Setenv("GIT_ORGANISATION", gitOrganisation)
}
gitProviderUrl := os.Getenv("GIT_PROVIDER_URL")
if gitProviderUrl == "" {
gitProviderUrl = "https://github.com"
_ = os.Setenv("GIT_PROVIDER_URL", gitProviderUrl)
}
gitKind := os.Getenv("GIT_KIND")
if gitKind == "" {
gitKind = "github"
os.Setenv("GIT_KIND", gitKind)
}
disableDeleteAppStr := os.Getenv("JX_DISABLE_DELETE_APP")
disableDeleteApp := "is set. Apps created in the test run will NOT be deleted"
if disableDeleteAppStr == "true" || disableDeleteAppStr == "1" || disableDeleteAppStr == "on" {
disableDeleteApp = "is not set. If you would like to disable the automatic deletion of apps created by the tests set this variable to TRUE."
}
disableDeleteRepoStr := os.Getenv("JX_DISABLE_DELETE_REPO")
disableDeleteRepo := "is set. Repos created in the test run will NOT be deleted"
if disableDeleteRepoStr == "true" || disableDeleteRepoStr == "1" || disableDeleteRepoStr == "on" {
disableDeleteRepo = "is not set. If you would like to disable the automatic deletion of repos created by the tests set this variable to TRUE."
}
disableWaitForFirstReleaseStr := os.Getenv("JX_DISABLE_WAIT_FOR_FIRST_RELEASE")
disableWaitForFirstRelease := "is set. Will not wait for build to be promoted to staging"
if disableWaitForFirstReleaseStr == "true" || disableWaitForFirstReleaseStr == "1" || disableWaitForFirstReleaseStr == "on" {
disableWaitForFirstRelease = "is not set. If you would like to disable waiting for the build to be promoted to staging set this variable to TRUE"
}
enableChatOpsTestLogStr := "is not set. ChatOps tests will not be run as part of quickstart tests. If you would like to run those tests, set this variable to TRUE"
if EnableChatOpsTests == "true" {
enableChatOpsTestLogStr = "is set. ChatOps tests will be run as part of quickstart tests"
}
disablePACheckStr := "is not set. PipelineActivity update tests will be run as part of PR-related tests. If you would like to not run those tests, set this variable to TRUE"
if DisablePipelineActivityCheck == "true" {
disablePACheckStr = "is set. PipelineActivity update tests will NOT be run as part of PR-related tests"
}
includeAppsStr := os.Getenv("JX_BDD_INCLUDE_APPS")
includeApps := "is not set"
if includeAppsStr != "" {
includeApps = fmt.Sprintf("is set to %s", includeAppsStr)
}
bddTimeoutBuildCompletes := os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES")
if bddTimeoutBuildCompletes == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_COMPLETES", "60")
}
bddTimeoutBuildRunningInStaging := os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING")
if bddTimeoutBuildRunningInStaging == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING", "60")
}
bddTimeoutURLReturns := os.Getenv("BDD_TIMEOUT_URL_RETURNS")
if bddTimeoutURLReturns == "" {
_ = os.Setenv("BDD_TIMEOUT_URL_RETURNS", "5")
}
bddTimeoutCmdLine := os.Getenv("BDD_TIMEOUT_CMD_LINE")
if bddTimeoutCmdLine == "" {
_ = os.Setenv("BDD_TIMEOUT_CMD_LINE", "1")
}
bddTimeoutAppTests := os.Getenv("BDD_TIMEOUT_APP_TESTS")
if bddTimeoutAppTests == "" {
_ = os.Setenv("BDD_TIMEOUT_APP_TESTS", "60")
}
bddTimeoutSessionWait := os.Getenv("BDD_TIMEOUT_SESSION_WAIT")
if bddTimeoutSessionWait == "" {
_ = os.Setenv("BDD_TIMEOUT_SESSION_WAIT", "60")
}
bddTimeoutDevpod := os.Getenv("BDD_TIMEOUT_DEVPOD")
if bddTimeoutDevpod == "" {
_ = os.Setenv("BDD_TIMEOUT_DEVPOD", "15")
}
gheUser := os.Getenv("GHE_USER")
if gheUser == "" {
gheUser = "dev1"
_ = os.Setenv("GHE_USER", gheUser)
}
gheProviderUrl := os.Getenv("GHE_PROVIDER_URL")
if gheProviderUrl == "" {
gheProviderUrl = "https://github.beescloud.com"
_ = os.Setenv("GHE_PROVIDER_URL", gheProviderUrl)
}
utils.LogInfof("BDD_JX: %s\n", os.Getenv("BDD_JX"))
utils.LogInfof("jx version: %s\n", version)
utils.LogInfof("GIT_ORGANISATION: %s\n", gitOrganisation)
utils.LogInfof("GIT_PROVIDER_URL: %s\n", gitProviderUrl)
utils.LogInfof("GIT_KIND: %s\n", gitKind)
utils.LogInfof("JX_DISABLE_DELETE_APP: %s\n", disableDeleteApp)
utils.LogInfof("JX_DISABLE_DELETE_REPO: %s\n", disableDeleteRepo)
utils.LogInfof("JX_DISABLE_WAIT_FOR_FIRST_RELEASE: %s\n", disableWaitForFirstRelease)
utils.LogInfof("BDD_ENABLE_TEST_CHATOPS_COMMANDS: %s\n", enableChatOpsTestLogStr)
utils.LogInfof("JX_BDD_INCLUDE_APPS: %s\n", includeApps)
utils.LogInfof("BDD_DISABLE_PIPELINEACTIVITY_CHECK: %s\n", disablePACheckStr)
utils.LogInfof("BDD_TIMEOUT_BUILD_COMPLETES timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES"))
utils.LogInfof("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING"))
utils.LogInfof("BDD_TIMEOUT_URL_RETURNS timeout value: %s\n", os.Getenv("BDD_TIMEOUT | {
reportsDir := os.Getenv("REPORTS_DIR")
if reportsDir == "" {
reportsDir = filepath.Join("../", "build", "reports")
}
err := os.MkdirAll(reportsDir, 0700)
if err != nil {
t.Errorf("cannot create %s because %v", reportsDir, err)
}
reporters := make([]Reporter, 0)
slowSpecThresholdStr := os.Getenv("SLOW_SPEC_THRESHOLD")
if slowSpecThresholdStr == "" {
slowSpecThresholdStr = "50000"
_ = os.Setenv("SLOW_SPEC_THRESHOLD", slowSpecThresholdStr)
}
slowSpecThreshold, err := strconv.ParseFloat(slowSpecThresholdStr, 64)
if err != nil {
panic(err.Error()) | identifier_body |
|
suite.go | reportsDir == "" {
reportsDir = filepath.Join("../", "build", "reports")
}
err := os.MkdirAll(reportsDir, 0700)
if err != nil {
t.Errorf("cannot create %s because %v", reportsDir, err)
}
reporters := make([]Reporter, 0)
slowSpecThresholdStr := os.Getenv("SLOW_SPEC_THRESHOLD")
if slowSpecThresholdStr == "" {
slowSpecThresholdStr = "50000"
_ = os.Setenv("SLOW_SPEC_THRESHOLD", slowSpecThresholdStr)
}
slowSpecThreshold, err := strconv.ParseFloat(slowSpecThresholdStr, 64)
if err != nil {
panic(err.Error())
}
config.DefaultReporterConfig.SlowSpecThreshold = slowSpecThreshold
config.DefaultReporterConfig.Verbose = testing.Verbose()
reporters = append(reporters, gr.NewJUnitReporter(filepath.Join(reportsDir, fmt.Sprintf("%s.junit.xml", suiteId))))
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t, fmt.Sprintf("Jenkins X E2E tests: %s", suiteId), reporters)
}
var BeforeSuiteCallback = func() {
err := ensureConfiguration()
utils.ExpectNoError(err)
WorkDir, err := ioutil.TempDir("", TempDirPrefix)
Expect(err).NotTo(HaveOccurred())
err = os.MkdirAll(WorkDir, 0760)
Expect(err).NotTo(HaveOccurred())
Expect(WorkDir).To(BeADirectory())
AssignWorkDirValue(WorkDir)
}
var SynchronizedAfterSuiteCallback = func() {
// Cleanup workdir as usual
cleanFlag := os.Getenv("JX_DISABLE_CLEAN_DIR")
if strings.ToLower(cleanFlag) != "true" {
os.RemoveAll(WorkDir)
Expect(WorkDir).ToNot(BeADirectory())
}
}
func ensureConfiguration() error { | }
_, found := os.LookupEnv("BDD_JX")
if !found {
_ = os.Setenv("BDD_JX", runner.Jx)
}
r := runner.New(cwd, &TimeoutSessionWait, 0)
version, err := r.RunWithOutput("--version")
if err != nil {
return errors.WithStack(err)
}
factory := cmd.NewFactory()
kubeClient, ns, err := factory.CreateKubeClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create kubeClient")
}
jxClient, _, err := factory.CreateJXClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create jxClient")
}
gitOrganisation := os.Getenv("GIT_ORGANISATION")
if gitOrganisation == "" {
gitOrganisation, err = findDefaultOrganisation(kubeClient, jxClient, ns)
if err != nil {
return errors.Wrapf(errors.WithStack(err), "failed to find gitOrganisation in namespace %s", ns)
}
if gitOrganisation == "" {
gitOrganisation = "jenkins-x-tests"
}
_ = os.Setenv("GIT_ORGANISATION", gitOrganisation)
}
gitProviderUrl := os.Getenv("GIT_PROVIDER_URL")
if gitProviderUrl == "" {
gitProviderUrl = "https://github.com"
_ = os.Setenv("GIT_PROVIDER_URL", gitProviderUrl)
}
gitKind := os.Getenv("GIT_KIND")
if gitKind == "" {
gitKind = "github"
os.Setenv("GIT_KIND", gitKind)
}
disableDeleteAppStr := os.Getenv("JX_DISABLE_DELETE_APP")
disableDeleteApp := "is set. Apps created in the test run will NOT be deleted"
if disableDeleteAppStr == "true" || disableDeleteAppStr == "1" || disableDeleteAppStr == "on" {
disableDeleteApp = "is not set. If you would like to disable the automatic deletion of apps created by the tests set this variable to TRUE."
}
disableDeleteRepoStr := os.Getenv("JX_DISABLE_DELETE_REPO")
disableDeleteRepo := "is set. Repos created in the test run will NOT be deleted"
if disableDeleteRepoStr == "true" || disableDeleteRepoStr == "1" || disableDeleteRepoStr == "on" {
disableDeleteRepo = "is not set. If you would like to disable the automatic deletion of repos created by the tests set this variable to TRUE."
}
disableWaitForFirstReleaseStr := os.Getenv("JX_DISABLE_WAIT_FOR_FIRST_RELEASE")
disableWaitForFirstRelease := "is set. Will not wait for build to be promoted to staging"
if disableWaitForFirstReleaseStr == "true" || disableWaitForFirstReleaseStr == "1" || disableWaitForFirstReleaseStr == "on" {
disableWaitForFirstRelease = "is not set. If you would like to disable waiting for the build to be promoted to staging set this variable to TRUE"
}
enableChatOpsTestLogStr := "is not set. ChatOps tests will not be run as part of quickstart tests. If you would like to run those tests, set this variable to TRUE"
if EnableChatOpsTests == "true" {
enableChatOpsTestLogStr = "is set. ChatOps tests will be run as part of quickstart tests"
}
disablePACheckStr := "is not set. PipelineActivity update tests will be run as part of PR-related tests. If you would like to not run those tests, set this variable to TRUE"
if DisablePipelineActivityCheck == "true" {
disablePACheckStr = "is set. PipelineActivity update tests will NOT be run as part of PR-related tests"
}
includeAppsStr := os.Getenv("JX_BDD_INCLUDE_APPS")
includeApps := "is not set"
if includeAppsStr != "" {
includeApps = fmt.Sprintf("is set to %s", includeAppsStr)
}
bddTimeoutBuildCompletes := os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES")
if bddTimeoutBuildCompletes == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_COMPLETES", "60")
}
bddTimeoutBuildRunningInStaging := os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING")
if bddTimeoutBuildRunningInStaging == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING", "60")
}
bddTimeoutURLReturns := os.Getenv("BDD_TIMEOUT_URL_RETURNS")
if bddTimeoutURLReturns == "" {
_ = os.Setenv("BDD_TIMEOUT_URL_RETURNS", "5")
}
bddTimeoutCmdLine := os.Getenv("BDD_TIMEOUT_CMD_LINE")
if bddTimeoutCmdLine == "" {
_ = os.Setenv("BDD_TIMEOUT_CMD_LINE", "1")
}
bddTimeoutAppTests := os.Getenv("BDD_TIMEOUT_APP_TESTS")
if bddTimeoutAppTests == "" {
_ = os.Setenv("BDD_TIMEOUT_APP_TESTS", "60")
}
bddTimeoutSessionWait := os.Getenv("BDD_TIMEOUT_SESSION_WAIT")
if bddTimeoutSessionWait == "" {
_ = os.Setenv("BDD_TIMEOUT_SESSION_WAIT", "60")
}
bddTimeoutDevpod := os.Getenv("BDD_TIMEOUT_DEVPOD")
if bddTimeoutDevpod == "" {
_ = os.Setenv("BDD_TIMEOUT_DEVPOD", "15")
}
gheUser := os.Getenv("GHE_USER")
if gheUser == "" {
gheUser = "dev1"
_ = os.Setenv("GHE_USER", gheUser)
}
gheProviderUrl := os.Getenv("GHE_PROVIDER_URL")
if gheProviderUrl == "" {
gheProviderUrl = "https://github.beescloud.com"
_ = os.Setenv("GHE_PROVIDER_URL", gheProviderUrl)
}
utils.LogInfof("BDD_JX: %s\n", os.Getenv("BDD_JX"))
utils.LogInfof("jx version: %s\n", version)
utils.LogInfof("GIT_ORGANISATION: %s\n", gitOrganisation)
utils.LogInfof("GIT_PROVIDER_URL: %s\n", gitProviderUrl)
utils.LogInfof("GIT_KIND: %s\n", gitKind)
utils.LogInfof("JX_DISABLE_DELETE_APP: %s\n", disableDeleteApp)
utils.LogInfof("JX_DISABLE_DELETE_REPO: %s\n", disableDeleteRepo)
utils.LogInfof("JX_DISABLE_WAIT_FOR_FIRST_RELEASE: %s\n", disableWaitForFirstRelease)
utils.LogInfof("BDD_ENABLE_TEST_CHATOPS_COMMANDS: %s\n", enableChatOpsTestLogStr)
utils.LogInfof("JX_BDD_INCLUDE_APPS: %s\n", includeApps)
utils.LogInfof("BDD_DISABLE_PIPELINEACTIVITY_CHECK: %s\n", disablePACheckStr)
utils.LogInfof("BDD_TIMEOUT_BUILD_COMPLETES timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES"))
utils.LogInfof("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING"))
utils.LogInfof("BDD_TIMEOUT_URL_RETURNS timeout value: %s\n", os.Getenv("BDD_TIMEOUT_URL_RETURNS"))
utils.LogInfof("BDD_TIMEOUT_CMD | cwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err) | random_line_split |
suite.go | (t *testing.T, suiteId string) {
reportsDir := os.Getenv("REPORTS_DIR")
if reportsDir == "" {
reportsDir = filepath.Join("../", "build", "reports")
}
err := os.MkdirAll(reportsDir, 0700)
if err != nil {
t.Errorf("cannot create %s because %v", reportsDir, err)
}
reporters := make([]Reporter, 0)
slowSpecThresholdStr := os.Getenv("SLOW_SPEC_THRESHOLD")
if slowSpecThresholdStr == "" {
slowSpecThresholdStr = "50000"
_ = os.Setenv("SLOW_SPEC_THRESHOLD", slowSpecThresholdStr)
}
slowSpecThreshold, err := strconv.ParseFloat(slowSpecThresholdStr, 64)
if err != nil {
panic(err.Error())
}
config.DefaultReporterConfig.SlowSpecThreshold = slowSpecThreshold
config.DefaultReporterConfig.Verbose = testing.Verbose()
reporters = append(reporters, gr.NewJUnitReporter(filepath.Join(reportsDir, fmt.Sprintf("%s.junit.xml", suiteId))))
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t, fmt.Sprintf("Jenkins X E2E tests: %s", suiteId), reporters)
}
var BeforeSuiteCallback = func() {
err := ensureConfiguration()
utils.ExpectNoError(err)
WorkDir, err := ioutil.TempDir("", TempDirPrefix)
Expect(err).NotTo(HaveOccurred())
err = os.MkdirAll(WorkDir, 0760)
Expect(err).NotTo(HaveOccurred())
Expect(WorkDir).To(BeADirectory())
AssignWorkDirValue(WorkDir)
}
var SynchronizedAfterSuiteCallback = func() {
// Cleanup workdir as usual
cleanFlag := os.Getenv("JX_DISABLE_CLEAN_DIR")
if strings.ToLower(cleanFlag) != "true" {
os.RemoveAll(WorkDir)
Expect(WorkDir).ToNot(BeADirectory())
}
}
func ensureConfiguration() error {
cwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err)
}
_, found := os.LookupEnv("BDD_JX")
if !found {
_ = os.Setenv("BDD_JX", runner.Jx)
}
r := runner.New(cwd, &TimeoutSessionWait, 0)
version, err := r.RunWithOutput("--version")
if err != nil {
return errors.WithStack(err)
}
factory := cmd.NewFactory()
kubeClient, ns, err := factory.CreateKubeClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create kubeClient")
}
jxClient, _, err := factory.CreateJXClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create jxClient")
}
gitOrganisation := os.Getenv("GIT_ORGANISATION")
if gitOrganisation == "" {
gitOrganisation, err = findDefaultOrganisation(kubeClient, jxClient, ns)
if err != nil {
return errors.Wrapf(errors.WithStack(err), "failed to find gitOrganisation in namespace %s", ns)
}
if gitOrganisation == "" {
gitOrganisation = "jenkins-x-tests"
}
_ = os.Setenv("GIT_ORGANISATION", gitOrganisation)
}
gitProviderUrl := os.Getenv("GIT_PROVIDER_URL")
if gitProviderUrl == "" {
gitProviderUrl = "https://github.com"
_ = os.Setenv("GIT_PROVIDER_URL", gitProviderUrl)
}
gitKind := os.Getenv("GIT_KIND")
if gitKind == "" {
gitKind = "github"
os.Setenv("GIT_KIND", gitKind)
}
disableDeleteAppStr := os.Getenv("JX_DISABLE_DELETE_APP")
disableDeleteApp := "is set. Apps created in the test run will NOT be deleted"
if disableDeleteAppStr == "true" || disableDeleteAppStr == "1" || disableDeleteAppStr == "on" {
disableDeleteApp = "is not set. If you would like to disable the automatic deletion of apps created by the tests set this variable to TRUE."
}
disableDeleteRepoStr := os.Getenv("JX_DISABLE_DELETE_REPO")
disableDeleteRepo := "is set. Repos created in the test run will NOT be deleted"
if disableDeleteRepoStr == "true" || disableDeleteRepoStr == "1" || disableDeleteRepoStr == "on" {
disableDeleteRepo = "is not set. If you would like to disable the automatic deletion of repos created by the tests set this variable to TRUE."
}
disableWaitForFirstReleaseStr := os.Getenv("JX_DISABLE_WAIT_FOR_FIRST_RELEASE")
disableWaitForFirstRelease := "is set. Will not wait for build to be promoted to staging"
if disableWaitForFirstReleaseStr == "true" || disableWaitForFirstReleaseStr == "1" || disableWaitForFirstReleaseStr == "on" {
disableWaitForFirstRelease = "is not set. If you would like to disable waiting for the build to be promoted to staging set this variable to TRUE"
}
enableChatOpsTestLogStr := "is not set. ChatOps tests will not be run as part of quickstart tests. If you would like to run those tests, set this variable to TRUE"
if EnableChatOpsTests == "true" {
enableChatOpsTestLogStr = "is set. ChatOps tests will be run as part of quickstart tests"
}
disablePACheckStr := "is not set. PipelineActivity update tests will be run as part of PR-related tests. If you would like to not run those tests, set this variable to TRUE"
if DisablePipelineActivityCheck == "true" {
disablePACheckStr = "is set. PipelineActivity update tests will NOT be run as part of PR-related tests"
}
includeAppsStr := os.Getenv("JX_BDD_INCLUDE_APPS")
includeApps := "is not set"
if includeAppsStr != "" {
includeApps = fmt.Sprintf("is set to %s", includeAppsStr)
}
bddTimeoutBuildCompletes := os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES")
if bddTimeoutBuildCompletes == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_COMPLETES", "60")
}
bddTimeoutBuildRunningInStaging := os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING")
if bddTimeoutBuildRunningInStaging == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING", "60")
}
bddTimeoutURLReturns := os.Getenv("BDD_TIMEOUT_URL_RETURNS")
if bddTimeoutURLReturns == "" {
_ = os.Setenv("BDD_TIMEOUT_URL_RETURNS", "5")
}
bddTimeoutCmdLine := os.Getenv("BDD_TIMEOUT_CMD_LINE")
if bddTimeoutCmdLine == "" {
_ = os.Setenv("BDD_TIMEOUT_CMD_LINE", "1")
}
bddTimeoutAppTests := os.Getenv("BDD_TIMEOUT_APP_TESTS")
if bddTimeoutAppTests == "" {
_ = os.Setenv("BDD_TIMEOUT_APP_TESTS", "60")
}
bddTimeoutSessionWait := os.Getenv("BDD_TIMEOUT_SESSION_WAIT")
if bddTimeoutSessionWait == "" {
_ = os.Setenv("BDD_TIMEOUT_SESSION_WAIT", "60")
}
bddTimeoutDevpod := os.Getenv("BDD_TIMEOUT_DEVPOD")
if bddTimeoutDevpod == "" {
_ = os.Setenv("BDD_TIMEOUT_DEVPOD", "15")
}
gheUser := os.Getenv("GHE_USER")
if gheUser == "" {
gheUser = "dev1"
_ = os.Setenv("GHE_USER", gheUser)
}
gheProviderUrl := os.Getenv("GHE_PROVIDER_URL")
if gheProviderUrl == "" {
gheProviderUrl = "https://github.beescloud.com"
_ = os.Setenv("GHE_PROVIDER_URL", gheProviderUrl)
}
utils.LogInfof("BDD_JX: %s\n", os.Getenv("BDD_JX"))
utils.LogInfof("jx version: %s\n", version)
utils.LogInfof("GIT_ORGANISATION: %s\n", gitOrganisation)
utils.LogInfof("GIT_PROVIDER_URL: %s\n", gitProviderUrl)
utils.LogInfof("GIT_KIND: %s\n", gitKind)
utils.LogInfof("JX_DISABLE_DELETE_APP: %s\n", disableDeleteApp)
utils.LogInfof("JX_DISABLE_DELETE_REPO: %s\n", disableDeleteRepo)
utils.LogInfof("JX_DISABLE_WAIT_FOR_FIRST_RELEASE: %s\n", disableWaitForFirstRelease)
utils.LogInfof("BDD_ENABLE_TEST_CHATOPS_COMMANDS: %s\n", enableChatOpsTestLogStr)
utils.LogInfof("JX_BDD_INCLUDE_APPS: %s\n", includeApps)
utils.LogInfof("BDD_DISABLE_PIPELINEACTIVITY_CHECK: %s\n", disablePACheckStr)
utils.LogInfof("BDD_TIMEOUT_BUILD_COMPLETES timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES"))
utils.LogInfof("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING"))
utils.LogInfof("BDD_TIMEOUT_URL_RETURNS timeout value | RunWithReporters | identifier_name |
|
suite.go | Dir == "" {
reportsDir = filepath.Join("../", "build", "reports")
}
err := os.MkdirAll(reportsDir, 0700)
if err != nil {
t.Errorf("cannot create %s because %v", reportsDir, err)
}
reporters := make([]Reporter, 0)
slowSpecThresholdStr := os.Getenv("SLOW_SPEC_THRESHOLD")
if slowSpecThresholdStr == "" {
slowSpecThresholdStr = "50000"
_ = os.Setenv("SLOW_SPEC_THRESHOLD", slowSpecThresholdStr)
}
slowSpecThreshold, err := strconv.ParseFloat(slowSpecThresholdStr, 64)
if err != nil {
panic(err.Error())
}
config.DefaultReporterConfig.SlowSpecThreshold = slowSpecThreshold
config.DefaultReporterConfig.Verbose = testing.Verbose()
reporters = append(reporters, gr.NewJUnitReporter(filepath.Join(reportsDir, fmt.Sprintf("%s.junit.xml", suiteId))))
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t, fmt.Sprintf("Jenkins X E2E tests: %s", suiteId), reporters)
}
var BeforeSuiteCallback = func() {
err := ensureConfiguration()
utils.ExpectNoError(err)
WorkDir, err := ioutil.TempDir("", TempDirPrefix)
Expect(err).NotTo(HaveOccurred())
err = os.MkdirAll(WorkDir, 0760)
Expect(err).NotTo(HaveOccurred())
Expect(WorkDir).To(BeADirectory())
AssignWorkDirValue(WorkDir)
}
var SynchronizedAfterSuiteCallback = func() {
// Cleanup workdir as usual
cleanFlag := os.Getenv("JX_DISABLE_CLEAN_DIR")
if strings.ToLower(cleanFlag) != "true" {
os.RemoveAll(WorkDir)
Expect(WorkDir).ToNot(BeADirectory())
}
}
func ensureConfiguration() error {
cwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err)
}
_, found := os.LookupEnv("BDD_JX")
if !found {
_ = os.Setenv("BDD_JX", runner.Jx)
}
r := runner.New(cwd, &TimeoutSessionWait, 0)
version, err := r.RunWithOutput("--version")
if err != nil {
return errors.WithStack(err)
}
factory := cmd.NewFactory()
kubeClient, ns, err := factory.CreateKubeClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create kubeClient")
}
jxClient, _, err := factory.CreateJXClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create jxClient")
}
gitOrganisation := os.Getenv("GIT_ORGANISATION")
if gitOrganisation == "" {
gitOrganisation, err = findDefaultOrganisation(kubeClient, jxClient, ns)
if err != nil {
return errors.Wrapf(errors.WithStack(err), "failed to find gitOrganisation in namespace %s", ns)
}
if gitOrganisation == "" {
gitOrganisation = "jenkins-x-tests"
}
_ = os.Setenv("GIT_ORGANISATION", gitOrganisation)
}
gitProviderUrl := os.Getenv("GIT_PROVIDER_URL")
if gitProviderUrl == "" {
gitProviderUrl = "https://github.com"
_ = os.Setenv("GIT_PROVIDER_URL", gitProviderUrl)
}
gitKind := os.Getenv("GIT_KIND")
if gitKind == "" {
gitKind = "github"
os.Setenv("GIT_KIND", gitKind)
}
disableDeleteAppStr := os.Getenv("JX_DISABLE_DELETE_APP")
disableDeleteApp := "is set. Apps created in the test run will NOT be deleted"
if disableDeleteAppStr == "true" || disableDeleteAppStr == "1" || disableDeleteAppStr == "on" {
disableDeleteApp = "is not set. If you would like to disable the automatic deletion of apps created by the tests set this variable to TRUE."
}
disableDeleteRepoStr := os.Getenv("JX_DISABLE_DELETE_REPO")
disableDeleteRepo := "is set. Repos created in the test run will NOT be deleted"
if disableDeleteRepoStr == "true" || disableDeleteRepoStr == "1" || disableDeleteRepoStr == "on" {
disableDeleteRepo = "is not set. If you would like to disable the automatic deletion of repos created by the tests set this variable to TRUE."
}
disableWaitForFirstReleaseStr := os.Getenv("JX_DISABLE_WAIT_FOR_FIRST_RELEASE")
disableWaitForFirstRelease := "is set. Will not wait for build to be promoted to staging"
if disableWaitForFirstReleaseStr == "true" || disableWaitForFirstReleaseStr == "1" || disableWaitForFirstReleaseStr == "on" {
disableWaitForFirstRelease = "is not set. If you would like to disable waiting for the build to be promoted to staging set this variable to TRUE"
}
enableChatOpsTestLogStr := "is not set. ChatOps tests will not be run as part of quickstart tests. If you would like to run those tests, set this variable to TRUE"
if EnableChatOpsTests == "true" {
enableChatOpsTestLogStr = "is set. ChatOps tests will be run as part of quickstart tests"
}
disablePACheckStr := "is not set. PipelineActivity update tests will be run as part of PR-related tests. If you would like to not run those tests, set this variable to TRUE"
if DisablePipelineActivityCheck == "true" {
disablePACheckStr = "is set. PipelineActivity update tests will NOT be run as part of PR-related tests"
}
includeAppsStr := os.Getenv("JX_BDD_INCLUDE_APPS")
includeApps := "is not set"
if includeAppsStr != "" {
includeApps = fmt.Sprintf("is set to %s", includeAppsStr)
}
bddTimeoutBuildCompletes := os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES")
if bddTimeoutBuildCompletes == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_COMPLETES", "60")
}
bddTimeoutBuildRunningInStaging := os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING")
if bddTimeoutBuildRunningInStaging == "" {
_ = os.Setenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING", "60")
}
bddTimeoutURLReturns := os.Getenv("BDD_TIMEOUT_URL_RETURNS")
if bddTimeoutURLReturns == "" {
_ = os.Setenv("BDD_TIMEOUT_URL_RETURNS", "5")
}
bddTimeoutCmdLine := os.Getenv("BDD_TIMEOUT_CMD_LINE")
if bddTimeoutCmdLine == "" |
bddTimeoutAppTests := os.Getenv("BDD_TIMEOUT_APP_TESTS")
if bddTimeoutAppTests == "" {
_ = os.Setenv("BDD_TIMEOUT_APP_TESTS", "60")
}
bddTimeoutSessionWait := os.Getenv("BDD_TIMEOUT_SESSION_WAIT")
if bddTimeoutSessionWait == "" {
_ = os.Setenv("BDD_TIMEOUT_SESSION_WAIT", "60")
}
bddTimeoutDevpod := os.Getenv("BDD_TIMEOUT_DEVPOD")
if bddTimeoutDevpod == "" {
_ = os.Setenv("BDD_TIMEOUT_DEVPOD", "15")
}
gheUser := os.Getenv("GHE_USER")
if gheUser == "" {
gheUser = "dev1"
_ = os.Setenv("GHE_USER", gheUser)
}
gheProviderUrl := os.Getenv("GHE_PROVIDER_URL")
if gheProviderUrl == "" {
gheProviderUrl = "https://github.beescloud.com"
_ = os.Setenv("GHE_PROVIDER_URL", gheProviderUrl)
}
utils.LogInfof("BDD_JX: %s\n", os.Getenv("BDD_JX"))
utils.LogInfof("jx version: %s\n", version)
utils.LogInfof("GIT_ORGANISATION: %s\n", gitOrganisation)
utils.LogInfof("GIT_PROVIDER_URL: %s\n", gitProviderUrl)
utils.LogInfof("GIT_KIND: %s\n", gitKind)
utils.LogInfof("JX_DISABLE_DELETE_APP: %s\n", disableDeleteApp)
utils.LogInfof("JX_DISABLE_DELETE_REPO: %s\n", disableDeleteRepo)
utils.LogInfof("JX_DISABLE_WAIT_FOR_FIRST_RELEASE: %s\n", disableWaitForFirstRelease)
utils.LogInfof("BDD_ENABLE_TEST_CHATOPS_COMMANDS: %s\n", enableChatOpsTestLogStr)
utils.LogInfof("JX_BDD_INCLUDE_APPS: %s\n", includeApps)
utils.LogInfof("BDD_DISABLE_PIPELINEACTIVITY_CHECK: %s\n", disablePACheckStr)
utils.LogInfof("BDD_TIMEOUT_BUILD_COMPLETES timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_COMPLETES"))
utils.LogInfof("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING timeout value: %s\n", os.Getenv("BDD_TIMEOUT_BUILD_RUNNING_IN_STAGING"))
utils.LogInfof("BDD_TIMEOUT_URL_RETURNS timeout value: %s\n", os.Getenv("BDD_TIMEOUT_URL_RETURNS"))
utils.LogInfof("BDD_TIMEOUT | {
_ = os.Setenv("BDD_TIMEOUT_CMD_LINE", "1")
} | conditional_block |
run-performance-test.py | 2\n")
parameterFile.write("missed-cleavages=0\n")
parameterFile.write("allowed_missed_cleavage=0\n")
# Minimums
parameterFile.write("minimum_peaks=10\n")
parameterFile.write("min-peaks=10\n")
# Precursor selection rules.
parameterFile.write("precursor-window=3\n")
parameterFile.write("precursor-window-type=mass\n")
parameterFile.write("peptide_mass_tolerance=3\n")
parameterFile.write("peptide_mass_units=0\n") # 0=amu, 1=mmu, 2=ppm
# Precursor mass type.
parameterFile.write("isotopic-mass=mono\n")
parameterFile.write("mass_type_parent=1\n") # 1=monoisotopic
# Fragment mass type. Tides uses only monoisotopic.
parameterFile.write("fragment-mass=mono\n")
parameterFile.write("mass_type_fragment=1\n") # 1=monoisotopic
# Decoys.
parameterFile.write("decoy-format=peptide-reverse\n")
parameterFile.write("decoy_search=1\n") # 1 = concatenated decoy search
parameterFile.write("concat=T\n")
parameterFile.write("keep-terminal-aminos=C\n") # No corresponding Comet param
# Report the top 5 matches.
parameterFile.write("top-match=5\n")
parameterFile.write("num_results=6\n")
parameterFile.write("num_output_lines=5\n")
# Precursor removal.
parameterFile.write("remove-precursor-peak=T\n")
parameterFile.write("remove-precursor-tolerance=15\n")
parameterFile.write("remove_precursor_peak=1\n")
parameterFile.write("remove_precursor_tolerance=15\n")
# Flanking peaks.
parameterFile.write("use-flanking-peaks=F\n")
parameterFile.write("theoretical_fragment_ions=1\n") # 0 = flanks; 1 = no flanks
parameterFile.write("use-neutral-loss-peaks=F\n")
# Fragment m/z discretization.
parameterFile.write("mz-bin-offset=0.68\n")
parameterFile.write("mz-bin-width=1.0005079\n")
parameterFile.write("fragment_bin_offset=0.68\n")
parameterFile.write("fragment_bin_tol=1.0005079\n")
# Peptide mass range.
parameterFile.write("min-mass=200\n")
parameterFile.write("max-mass=7200\n")
parameterFile.write("digest_mass_range=200 7200\n")
# Other Crux parameters.
parameterFile.write("compute-sp=T\n")
parameterFile.write("overwrite=T\n")
parameterFile.write("peptide-list=T\n")
# Comet parameters
parameterFile.write("output_pepxmlfile=0\n")
parameterFile.write("add_C_cysteine=57.021464\n")
# parameterFile.write("num_threads=1\n") # Multithreaded sometimes dumps core.
parameterFile.write("max_fragment_charge=2\n")
parameterFile.write("isotope_error=0\n")
parameterFile.write("use_A_ions=0\n")
parameterFile.write("use_B_ions=1\n")
parameterFile.write("use_C_ions=0\n")
parameterFile.write("use_X_ions=0\n")
parameterFile.write("use_Y_ions=1\n")
parameterFile.write("use_Z_ions=0\n")
parameterFile.write("use_NL_ions=0\n")
parameterFile.write("variable_mod01=0.0 X 0 3\n")
parameterFile.write("variable_mod02=0.0 X 0 3\n")
parameterFile.write("[COMET_ENZYME_INFO]\n")
parameterFile.write("0. No_enzyme 0 - -\n")
parameterFile.write("1. Trypsin 1 KR P\n")
parameterFile.close()
#############################################################################
def extractData(inputFileName, columnName, outputFileName):
# dataset = pd.read_csv(inputFileName, sep='\t')
# data_frame = pd.DataFrame(dataset)
data_frame = pd.read_csv(inputFileName, sep='\t')
data_frame = data_frame[[columnName]]
data_frame.to_csv(outputFileName, sep='\t', index=False)
#############################################################################
def runSearch(outputDirectory, searchName, searchParam, database,
psmFile, scoreColumn, confidenceParam):
runCommand("%s %s --output-dir %s --parameter-file %s %s %s %s"
% (CRUX, searchName, outputDirectory, parameterFileName,
searchParam, ms2, database),
psmFile)
confidenceFile = "%s/assign-confidence.target.txt" % outputDirectory
runCommand("%s assign-confidence --output-dir %s %s %s" %
(CRUX, outputDirectory, confidenceParam, psmFile), confidenceFile)
qFile = "%s/%s.q.txt" % (outputDirectory, searchName)
extractData(confidenceFile, "tdc q-value", qFile)
percolatorFile = "%s/percolator.target.psms.txt" % outputDirectory
runCommand("%s percolator --output-dir %s %s"
% (CRUX, outputDirectory, psmFile), percolatorFile)
qFile = "%s/%s.percolator.q.txt" % (outputDirectory, searchName)
extractData(percolatorFile, "q-value", qFile)
fourColFile = "%s/%s.target.four-col.txt" % (outputDirectory, searchName)
dataset = pd.read_csv(psmFile, sep='\t')
data_frame = pd.DataFrame(dataset)
data_frame = data_frame[["scan", "charge", "sequence", scoreColumn]]
data_frame.to_csv(fourColFile, sep='\t', index=False)
reducedFile = "%s/%s.target.reduced.txt" % (outputDirectory, searchName)
runCommand("awk 'NR > 1 {print $1 \"~\" $2 \"~\" $3 \"\t\" $4}' %s | sort -k 1b,1 > %s" %
(fourColFile, reducedFile), "")
# Create a scatter plot of XCorr scores.
def makeScatterPlot(xData, xLabel, yData, yLabel, outputRoot):
runCommand("join %s %s | awk -F \"~\" '{print $1 \" \" $2 \" \" $3}' | awk '{print $1 \"\t\" $2 \"\t\" $3 \"\t\" $4 \"\t\" $5}' | sort -n > %s.txt"
% (xData, yData, outputRoot), "")
gnuplotFileName = "%s.gnuplot" % outputRoot
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set xlabel \"%s\"\n" % xLabel)
gnuplotFile.write("set ylabel \"%s\"\n" % yLabel)
gnuplotFile.write("plot x notitle with lines\n")
gnuplotFile.write("replot \"%s.txt\" using 4:5 notitle\n" % outputRoot)
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > %s.png" % (gnuplotFileName, outputRoot), "")
# Create a gnuplot of a list of methods.
def makePerformancePlot(title, listOfMethods):
if not os.path.isdir("plots"):
os.mkdir("plots")
gnuplotFileName = "plots/%s.gnuplot" % title
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set title \"%s\"\n" % title)
gnuplotFile.write("set xlabel \"q-value threshold\"\n")
gnuplotFile.write("set ylabel \"Number of accepted PSMs\"\n")
gnuplotFile.write("set xrange [0:0.1]\n")
gnuplotFile.write("set key bottom right\n")
firstOne = True
for myTuple in listOfMethods:
(dataFileName, seriesTitle) = myTuple
if firstOne:
|
else:
gnuplotFile.write("re")
gnuplotFile.write("plot \"%s\" using 1:0 title \"%s\" with lines lw 1\n"
% (dataFileName, seriesTitle))
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > plots/%s.png" % (gnuplot | firstOne = False | conditional_block |
run-performance-test.py | 2\n")
parameterFile.write("missed-cleavages=0\n")
parameterFile.write("allowed_missed_cleavage=0\n")
# Minimums
parameterFile.write("minimum_peaks=10\n")
parameterFile.write("min-peaks=10\n")
# Precursor selection rules.
parameterFile.write("precursor-window=3\n")
parameterFile.write("precursor-window-type=mass\n")
parameterFile.write("peptide_mass_tolerance=3\n")
parameterFile.write("peptide_mass_units=0\n") # 0=amu, 1=mmu, 2=ppm
# Precursor mass type.
parameterFile.write("isotopic-mass=mono\n")
parameterFile.write("mass_type_parent=1\n") # 1=monoisotopic
# Fragment mass type. Tides uses only monoisotopic.
parameterFile.write("fragment-mass=mono\n")
parameterFile.write("mass_type_fragment=1\n") # 1=monoisotopic
# Decoys.
parameterFile.write("decoy-format=peptide-reverse\n")
parameterFile.write("decoy_search=1\n") # 1 = concatenated decoy search
parameterFile.write("concat=T\n")
parameterFile.write("keep-terminal-aminos=C\n") # No corresponding Comet param
# Report the top 5 matches.
parameterFile.write("top-match=5\n")
parameterFile.write("num_results=6\n")
parameterFile.write("num_output_lines=5\n")
# Precursor removal.
parameterFile.write("remove-precursor-peak=T\n")
parameterFile.write("remove-precursor-tolerance=15\n")
parameterFile.write("remove_precursor_peak=1\n")
parameterFile.write("remove_precursor_tolerance=15\n")
# Flanking peaks.
parameterFile.write("use-flanking-peaks=F\n")
parameterFile.write("theoretical_fragment_ions=1\n") # 0 = flanks; 1 = no flanks
parameterFile.write("use-neutral-loss-peaks=F\n")
# Fragment m/z discretization.
parameterFile.write("mz-bin-offset=0.68\n")
parameterFile.write("mz-bin-width=1.0005079\n")
parameterFile.write("fragment_bin_offset=0.68\n")
parameterFile.write("fragment_bin_tol=1.0005079\n")
# Peptide mass range.
parameterFile.write("min-mass=200\n")
parameterFile.write("max-mass=7200\n")
parameterFile.write("digest_mass_range=200 7200\n")
# Other Crux parameters.
parameterFile.write("compute-sp=T\n")
parameterFile.write("overwrite=T\n")
parameterFile.write("peptide-list=T\n")
# Comet parameters
parameterFile.write("output_pepxmlfile=0\n")
parameterFile.write("add_C_cysteine=57.021464\n")
# parameterFile.write("num_threads=1\n") # Multithreaded sometimes dumps core.
parameterFile.write("max_fragment_charge=2\n")
parameterFile.write("isotope_error=0\n")
parameterFile.write("use_A_ions=0\n")
parameterFile.write("use_B_ions=1\n")
parameterFile.write("use_C_ions=0\n")
parameterFile.write("use_X_ions=0\n")
parameterFile.write("use_Y_ions=1\n")
parameterFile.write("use_Z_ions=0\n")
parameterFile.write("use_NL_ions=0\n")
parameterFile.write("variable_mod01=0.0 X 0 3\n")
parameterFile.write("variable_mod02=0.0 X 0 3\n")
parameterFile.write("[COMET_ENZYME_INFO]\n")
parameterFile.write("0. No_enzyme 0 - -\n")
parameterFile.write("1. Trypsin 1 KR P\n")
parameterFile.close()
#############################################################################
def extractData(inputFileName, columnName, outputFileName):
# dataset = pd.read_csv(inputFileName, sep='\t')
# data_frame = pd.DataFrame(dataset)
data_frame = pd.read_csv(inputFileName, sep='\t')
data_frame = data_frame[[columnName]]
data_frame.to_csv(outputFileName, sep='\t', index=False)
#############################################################################
def runSearch(outputDirectory, searchName, searchParam, database,
psmFile, scoreColumn, confidenceParam):
runCommand("%s %s --output-dir %s --parameter-file %s %s %s %s"
% (CRUX, searchName, outputDirectory, parameterFileName,
searchParam, ms2, database),
psmFile)
confidenceFile = "%s/assign-confidence.target.txt" % outputDirectory
runCommand("%s assign-confidence --output-dir %s %s %s" %
(CRUX, outputDirectory, confidenceParam, psmFile), confidenceFile)
qFile = "%s/%s.q.txt" % (outputDirectory, searchName)
extractData(confidenceFile, "tdc q-value", qFile)
percolatorFile = "%s/percolator.target.psms.txt" % outputDirectory
runCommand("%s percolator --output-dir %s %s"
% (CRUX, outputDirectory, psmFile), percolatorFile)
qFile = "%s/%s.percolator.q.txt" % (outputDirectory, searchName)
extractData(percolatorFile, "q-value", qFile)
fourColFile = "%s/%s.target.four-col.txt" % (outputDirectory, searchName)
dataset = pd.read_csv(psmFile, sep='\t')
data_frame = pd.DataFrame(dataset)
data_frame = data_frame[["scan", "charge", "sequence", scoreColumn]]
data_frame.to_csv(fourColFile, sep='\t', index=False)
reducedFile = "%s/%s.target.reduced.txt" % (outputDirectory, searchName)
runCommand("awk 'NR > 1 {print $1 \"~\" $2 \"~\" $3 \"\t\" $4}' %s | sort -k 1b,1 > %s" %
(fourColFile, reducedFile), "")
# Create a scatter plot of XCorr scores.
def makeScatterPlot(xData, xLabel, yData, yLabel, outputRoot):
|
# Create a gnuplot of a list of methods.
def makePerformancePlot(title, listOfMethods):
if not os.path.isdir("plots"):
os.mkdir("plots")
gnuplotFileName = "plots/%s.gnuplot" % title
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set title \"%s\"\n" % title)
gnuplotFile.write("set xlabel \"q-value threshold\"\n")
gnuplotFile.write("set ylabel \"Number of accepted PSMs\"\n")
gnuplotFile.write("set xrange [0:0.1]\n")
gnuplotFile.write("set key bottom right\n")
firstOne = True
for myTuple in listOfMethods:
(dataFileName, seriesTitle) = myTuple
if firstOne:
firstOne = False
else:
gnuplotFile.write("re")
gnuplotFile.write("plot \"%s\" using 1:0 title \"%s\" with lines lw 1\n"
% (dataFileName, seriesTitle))
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > plots/%s.png" % (gnup | runCommand("join %s %s | awk -F \"~\" '{print $1 \" \" $2 \" \" $3}' | awk '{print $1 \"\t\" $2 \"\t\" $3 \"\t\" $4 \"\t\" $5}' | sort -n > %s.txt"
% (xData, yData, outputRoot), "")
gnuplotFileName = "%s.gnuplot" % outputRoot
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set xlabel \"%s\"\n" % xLabel)
gnuplotFile.write("set ylabel \"%s\"\n" % yLabel)
gnuplotFile.write("plot x notitle with lines\n")
gnuplotFile.write("replot \"%s.txt\" using 4:5 notitle\n" % outputRoot)
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > %s.png" % (gnuplotFileName, outputRoot), "") | identifier_body |
run-performance-test.py | 2\n")
parameterFile.write("missed-cleavages=0\n")
parameterFile.write("allowed_missed_cleavage=0\n")
# Minimums
parameterFile.write("minimum_peaks=10\n")
parameterFile.write("min-peaks=10\n")
# Precursor selection rules.
parameterFile.write("precursor-window=3\n")
parameterFile.write("precursor-window-type=mass\n")
parameterFile.write("peptide_mass_tolerance=3\n")
parameterFile.write("peptide_mass_units=0\n") # 0=amu, 1=mmu, 2=ppm
# Precursor mass type.
parameterFile.write("isotopic-mass=mono\n")
parameterFile.write("mass_type_parent=1\n") # 1=monoisotopic
# Fragment mass type. Tides uses only monoisotopic.
parameterFile.write("fragment-mass=mono\n")
parameterFile.write("mass_type_fragment=1\n") # 1=monoisotopic
# Decoys.
parameterFile.write("decoy-format=peptide-reverse\n")
parameterFile.write("decoy_search=1\n") # 1 = concatenated decoy search
parameterFile.write("concat=T\n")
parameterFile.write("keep-terminal-aminos=C\n") # No corresponding Comet param
# Report the top 5 matches.
parameterFile.write("top-match=5\n")
parameterFile.write("num_results=6\n")
parameterFile.write("num_output_lines=5\n")
# Precursor removal.
parameterFile.write("remove-precursor-peak=T\n")
parameterFile.write("remove-precursor-tolerance=15\n")
parameterFile.write("remove_precursor_peak=1\n")
parameterFile.write("remove_precursor_tolerance=15\n")
# Flanking peaks.
parameterFile.write("use-flanking-peaks=F\n")
parameterFile.write("theoretical_fragment_ions=1\n") # 0 = flanks; 1 = no flanks
parameterFile.write("use-neutral-loss-peaks=F\n")
# Fragment m/z discretization.
parameterFile.write("mz-bin-offset=0.68\n")
parameterFile.write("mz-bin-width=1.0005079\n")
parameterFile.write("fragment_bin_offset=0.68\n")
parameterFile.write("fragment_bin_tol=1.0005079\n")
# Peptide mass range.
parameterFile.write("min-mass=200\n")
parameterFile.write("max-mass=7200\n")
parameterFile.write("digest_mass_range=200 7200\n")
# Other Crux parameters.
parameterFile.write("compute-sp=T\n")
parameterFile.write("overwrite=T\n")
parameterFile.write("peptide-list=T\n")
# Comet parameters
parameterFile.write("output_pepxmlfile=0\n")
parameterFile.write("add_C_cysteine=57.021464\n")
# parameterFile.write("num_threads=1\n") # Multithreaded sometimes dumps core.
parameterFile.write("max_fragment_charge=2\n")
parameterFile.write("isotope_error=0\n")
parameterFile.write("use_A_ions=0\n")
parameterFile.write("use_B_ions=1\n")
parameterFile.write("use_C_ions=0\n")
parameterFile.write("use_X_ions=0\n")
parameterFile.write("use_Y_ions=1\n")
parameterFile.write("use_Z_ions=0\n")
parameterFile.write("use_NL_ions=0\n")
parameterFile.write("variable_mod01=0.0 X 0 3\n")
parameterFile.write("variable_mod02=0.0 X 0 3\n")
parameterFile.write("[COMET_ENZYME_INFO]\n")
parameterFile.write("0. No_enzyme 0 - -\n")
parameterFile.write("1. Trypsin 1 KR P\n")
parameterFile.close()
#############################################################################
def extractData(inputFileName, columnName, outputFileName):
# dataset = pd.read_csv(inputFileName, sep='\t')
# data_frame = pd.DataFrame(dataset)
data_frame = pd.read_csv(inputFileName, sep='\t')
data_frame = data_frame[[columnName]]
data_frame.to_csv(outputFileName, sep='\t', index=False)
#############################################################################
def runSearch(outputDirectory, searchName, searchParam, database,
psmFile, scoreColumn, confidenceParam):
runCommand("%s %s --output-dir %s --parameter-file %s %s %s %s"
% (CRUX, searchName, outputDirectory, parameterFileName,
searchParam, ms2, database),
psmFile)
confidenceFile = "%s/assign-confidence.target.txt" % outputDirectory
runCommand("%s assign-confidence --output-dir %s %s %s" %
(CRUX, outputDirectory, confidenceParam, psmFile), confidenceFile)
qFile = "%s/%s.q.txt" % (outputDirectory, searchName)
extractData(confidenceFile, "tdc q-value", qFile)
percolatorFile = "%s/percolator.target.psms.txt" % outputDirectory
runCommand("%s percolator --output-dir %s %s"
% (CRUX, outputDirectory, psmFile), percolatorFile)
qFile = "%s/%s.percolator.q.txt" % (outputDirectory, searchName)
extractData(percolatorFile, "q-value", qFile)
fourColFile = "%s/%s.target.four-col.txt" % (outputDirectory, searchName)
dataset = pd.read_csv(psmFile, sep='\t')
data_frame = pd.DataFrame(dataset)
data_frame = data_frame[["scan", "charge", "sequence", scoreColumn]]
data_frame.to_csv(fourColFile, sep='\t', index=False)
reducedFile = "%s/%s.target.reduced.txt" % (outputDirectory, searchName)
runCommand("awk 'NR > 1 {print $1 \"~\" $2 \"~\" $3 \"\t\" $4}' %s | sort -k 1b,1 > %s" %
(fourColFile, reducedFile), "")
# Create a scatter plot of XCorr scores.
def | (xData, xLabel, yData, yLabel, outputRoot):
runCommand("join %s %s | awk -F \"~\" '{print $1 \" \" $2 \" \" $3}' | awk '{print $1 \"\t\" $2 \"\t\" $3 \"\t\" $4 \"\t\" $5}' | sort -n > %s.txt"
% (xData, yData, outputRoot), "")
gnuplotFileName = "%s.gnuplot" % outputRoot
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set xlabel \"%s\"\n" % xLabel)
gnuplotFile.write("set ylabel \"%s\"\n" % yLabel)
gnuplotFile.write("plot x notitle with lines\n")
gnuplotFile.write("replot \"%s.txt\" using 4:5 notitle\n" % outputRoot)
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > %s.png" % (gnuplotFileName, outputRoot), "")
# Create a gnuplot of a list of methods.
def makePerformancePlot(title, listOfMethods):
if not os.path.isdir("plots"):
os.mkdir("plots")
gnuplotFileName = "plots/%s.gnuplot" % title
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set title \"%s\"\n" % title)
gnuplotFile.write("set xlabel \"q-value threshold\"\n")
gnuplotFile.write("set ylabel \"Number of accepted PSMs\"\n")
gnuplotFile.write("set xrange [0:0.1]\n")
gnuplotFile.write("set key bottom right\n")
firstOne = True
for myTuple in listOfMethods:
(dataFileName, seriesTitle) = myTuple
if firstOne:
firstOne = False
else:
gnuplotFile.write("re")
gnuplotFile.write("plot \"%s\" using 1:0 title \"%s\" with lines lw 1\n"
% (dataFileName, seriesTitle))
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > plots/%s.png" % (gnup | makeScatterPlot | identifier_name |
run-performance-test.py | 2\n")
parameterFile.write("missed-cleavages=0\n")
parameterFile.write("allowed_missed_cleavage=0\n")
# Minimums
parameterFile.write("minimum_peaks=10\n")
parameterFile.write("min-peaks=10\n")
# Precursor selection rules.
parameterFile.write("precursor-window=3\n")
parameterFile.write("precursor-window-type=mass\n")
parameterFile.write("peptide_mass_tolerance=3\n")
parameterFile.write("peptide_mass_units=0\n") # 0=amu, 1=mmu, 2=ppm
# Precursor mass type.
parameterFile.write("isotopic-mass=mono\n")
parameterFile.write("mass_type_parent=1\n") # 1=monoisotopic
# Fragment mass type. Tides uses only monoisotopic.
parameterFile.write("fragment-mass=mono\n")
parameterFile.write("mass_type_fragment=1\n") # 1=monoisotopic
# Decoys.
parameterFile.write("decoy-format=peptide-reverse\n")
parameterFile.write("decoy_search=1\n") # 1 = concatenated decoy search
parameterFile.write("concat=T\n")
parameterFile.write("keep-terminal-aminos=C\n") # No corresponding Comet param
# Report the top 5 matches.
parameterFile.write("top-match=5\n")
parameterFile.write("num_results=6\n")
parameterFile.write("num_output_lines=5\n")
# Precursor removal.
parameterFile.write("remove-precursor-peak=T\n")
parameterFile.write("remove-precursor-tolerance=15\n")
parameterFile.write("remove_precursor_peak=1\n")
parameterFile.write("remove_precursor_tolerance=15\n")
# Flanking peaks.
parameterFile.write("use-flanking-peaks=F\n")
parameterFile.write("theoretical_fragment_ions=1\n") # 0 = flanks; 1 = no flanks
parameterFile.write("use-neutral-loss-peaks=F\n")
# Fragment m/z discretization.
parameterFile.write("mz-bin-offset=0.68\n")
parameterFile.write("mz-bin-width=1.0005079\n")
parameterFile.write("fragment_bin_offset=0.68\n")
parameterFile.write("fragment_bin_tol=1.0005079\n")
# Peptide mass range.
parameterFile.write("min-mass=200\n")
parameterFile.write("max-mass=7200\n")
parameterFile.write("digest_mass_range=200 7200\n")
| parameterFile.write("compute-sp=T\n")
parameterFile.write("overwrite=T\n")
parameterFile.write("peptide-list=T\n")
# Comet parameters
parameterFile.write("output_pepxmlfile=0\n")
parameterFile.write("add_C_cysteine=57.021464\n")
# parameterFile.write("num_threads=1\n") # Multithreaded sometimes dumps core.
parameterFile.write("max_fragment_charge=2\n")
parameterFile.write("isotope_error=0\n")
parameterFile.write("use_A_ions=0\n")
parameterFile.write("use_B_ions=1\n")
parameterFile.write("use_C_ions=0\n")
parameterFile.write("use_X_ions=0\n")
parameterFile.write("use_Y_ions=1\n")
parameterFile.write("use_Z_ions=0\n")
parameterFile.write("use_NL_ions=0\n")
parameterFile.write("variable_mod01=0.0 X 0 3\n")
parameterFile.write("variable_mod02=0.0 X 0 3\n")
parameterFile.write("[COMET_ENZYME_INFO]\n")
parameterFile.write("0. No_enzyme 0 - -\n")
parameterFile.write("1. Trypsin 1 KR P\n")
parameterFile.close()
#############################################################################
def extractData(inputFileName, columnName, outputFileName):
# dataset = pd.read_csv(inputFileName, sep='\t')
# data_frame = pd.DataFrame(dataset)
data_frame = pd.read_csv(inputFileName, sep='\t')
data_frame = data_frame[[columnName]]
data_frame.to_csv(outputFileName, sep='\t', index=False)
#############################################################################
def runSearch(outputDirectory, searchName, searchParam, database,
psmFile, scoreColumn, confidenceParam):
runCommand("%s %s --output-dir %s --parameter-file %s %s %s %s"
% (CRUX, searchName, outputDirectory, parameterFileName,
searchParam, ms2, database),
psmFile)
confidenceFile = "%s/assign-confidence.target.txt" % outputDirectory
runCommand("%s assign-confidence --output-dir %s %s %s" %
(CRUX, outputDirectory, confidenceParam, psmFile), confidenceFile)
qFile = "%s/%s.q.txt" % (outputDirectory, searchName)
extractData(confidenceFile, "tdc q-value", qFile)
percolatorFile = "%s/percolator.target.psms.txt" % outputDirectory
runCommand("%s percolator --output-dir %s %s"
% (CRUX, outputDirectory, psmFile), percolatorFile)
qFile = "%s/%s.percolator.q.txt" % (outputDirectory, searchName)
extractData(percolatorFile, "q-value", qFile)
fourColFile = "%s/%s.target.four-col.txt" % (outputDirectory, searchName)
dataset = pd.read_csv(psmFile, sep='\t')
data_frame = pd.DataFrame(dataset)
data_frame = data_frame[["scan", "charge", "sequence", scoreColumn]]
data_frame.to_csv(fourColFile, sep='\t', index=False)
reducedFile = "%s/%s.target.reduced.txt" % (outputDirectory, searchName)
runCommand("awk 'NR > 1 {print $1 \"~\" $2 \"~\" $3 \"\t\" $4}' %s | sort -k 1b,1 > %s" %
(fourColFile, reducedFile), "")
# Create a scatter plot of XCorr scores.
def makeScatterPlot(xData, xLabel, yData, yLabel, outputRoot):
runCommand("join %s %s | awk -F \"~\" '{print $1 \" \" $2 \" \" $3}' | awk '{print $1 \"\t\" $2 \"\t\" $3 \"\t\" $4 \"\t\" $5}' | sort -n > %s.txt"
% (xData, yData, outputRoot), "")
gnuplotFileName = "%s.gnuplot" % outputRoot
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set xlabel \"%s\"\n" % xLabel)
gnuplotFile.write("set ylabel \"%s\"\n" % yLabel)
gnuplotFile.write("plot x notitle with lines\n")
gnuplotFile.write("replot \"%s.txt\" using 4:5 notitle\n" % outputRoot)
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > %s.png" % (gnuplotFileName, outputRoot), "")
# Create a gnuplot of a list of methods.
def makePerformancePlot(title, listOfMethods):
if not os.path.isdir("plots"):
os.mkdir("plots")
gnuplotFileName = "plots/%s.gnuplot" % title
gnuplotFile = open(gnuplotFileName, "w")
gnuplotFile.write("set output \"/dev/null\"\n")
gnuplotFile.write("set terminal png\n")
gnuplotFile.write("set title \"%s\"\n" % title)
gnuplotFile.write("set xlabel \"q-value threshold\"\n")
gnuplotFile.write("set ylabel \"Number of accepted PSMs\"\n")
gnuplotFile.write("set xrange [0:0.1]\n")
gnuplotFile.write("set key bottom right\n")
firstOne = True
for myTuple in listOfMethods:
(dataFileName, seriesTitle) = myTuple
if firstOne:
firstOne = False
else:
gnuplotFile.write("re")
gnuplotFile.write("plot \"%s\" using 1:0 title \"%s\" with lines lw 1\n"
% (dataFileName, seriesTitle))
gnuplotFile.write("set output\n")
gnuplotFile.write("replot\n")
gnuplotFile.close()
runCommand("gnuplot %s > plots/%s.png" % (gnuplot | # Other Crux parameters. | random_line_split |
user.go | }
defer resp.Body.Close()
_, err = io.Copy(newFile, resp.Body)
if err != nil {
log.Error("copy err(%v)", err)
return
}
return
}
//CreateDir 创建文件夹
func (d *Dao) CreateDir(path string) (err error) {
_, err = os.Stat(path)
defer func() {
if os.IsExist(err) {
err = nil
}
}()
if os.IsNotExist(err) {
err = os.Mkdir(path, os.ModePerm)
}
return
}
// ReadLine 按行读取文件,hander回调
func (d *Dao) ReadLine(path string, handler func(string)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
return nil
}
log.Error("read path(%s) err(%v)", path, err)
return nil
}
line = strings.TrimSpace(line)
handler(line)
time.Sleep(time.Duration(1) * time.Second)
}
}
// ReadLines 50条发起一次grpc请求
func (d *Dao) ReadLines(path string, handler func([]int64)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("ReadLine open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
mids := make([]int64, 0, 50)
i := 0
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
err = nil
break
}
log.Error("read path(%s) err(%v)", path, err)
break
}
mid, _ := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
mids = append(mids, mid)
i++
if i == 50 {
handler(mids)
mids = make([]int64, 0, 50)
i = 0
time.Sleep(time.Duration(1) * time.Second)
}
}
if len(mids) != 0 {
handler(mids)
}
return
}
//HandlerUserDmg mid, gender, age, geo, content_tag, viewed_video, content_zone, content_count, follow_ups
func (d *Dao) HandlerUserDmg(user string) {
u := strings.Split(user, "\u0001")
userDmg := &model.UserDmg{
MID: u[0],
Gender: u[1],
Age: u[2],
Geo: u[3],
ContentTag: u[4],
ViewedVideo: d.HandlerViewedVideo(u[5]),
ContentZone: u[6],
ContentCount: u[7],
FollowUps: u[8],
}
d.CacheUserDmg(context.Background(), userDmg)
}
//HandlerUserBbqDmg ..
func (d *Dao) HandlerUserBbqDmg(user string) {
u := strings.Split(user, ",")
userBbqDmg := &model.UserBbqDmg{
MID: u[0],
Tag2: strings.Split(u[1], "\u0002"),
Tag3: strings.Split(u[2], "\u0002"),
Up: strings.Split(u[3], "\u0002"),
}
d.CacheUserBbqDmg(context.Background(), userBbqDmg)
}
//HandlerUserBbqDmgBuvid ..
func (d *Dao) HandlerUserBbqDmgBuvid(user string) {
u := strings.Split(user, ",")
UserBbqBuvidDmg := &model.UserBbqBuvidDmg{
Buvid: u[0],
Tag2: strings.Split(u[1], "\u0002"),
Tag3: strings.Split(u[2], "\u0002"),
Up: strings.Split(u[3], "\u0002"),
}
d.CacheUserBbqDmgBuvid(context.Background(), UserBbqBuvidDmg)
}
// HandlerMids update userbase by mids
func (d *Dao) HandlerMids(mids []int64) {
res, err := d.VideoClient.SyncUserStas(context.Background(), &video.SyncMidsRequset{MIDS: mids})
if err != nil {
log.Error("userbases update failes, mids(%v), err(%v)", mids, err)
return
}
log.Info("userbases update success, affected %v rows", res.Affc)
}
// HandlerMid update userbase by mid
func (d *Dao) HandlerMid(s string) {
mid, _ := strconv.ParseInt(s, 10, 64)
res, err := d.VideoClient.SyncUserSta(context.Background(), &video.SyncMidRequset{MID: mid})
if err != nil {
log.Error("userbase update failes, mid(%v), err(%v)", mid, err)
return
}
if res.Affc == 1 {
log.Info("userbase insert success ,mid(%v)", mid)
} else if res.Affc == 2 {
log.Info("userbase update success , mid(%v)", mid)
}
}
//HandlerViewedVideo 处理看过的视频,保存最近看过的100个
func (d *Dao) HandlerViewedVideo(v string) (res map[int64]string) {
res = make(map[int64]string)
var vv [][]interface{}
var dd string
err := json.Unmarshal([]byte(v), &vv)
if err != nil {
return
}
l := len(vv)
n := 1
for i := l - 1; i >= 0; i-- {
for _, a := range vv[i] {
switch b := a.(type) {
case string:
dd = b
case []interface{}:
ll := len(b)
for j := ll - 1; j >= 0; j-- {
switch c := b[j].(type) {
case float64:
k := int64(c)
if _, ok := res[k]; !ok {
res[k] = dd
n++
}
}
if n > 100 {
return
}
}
}
}
}
return
}
// SelMidFromVideo get distinct mid list from table video
func (d *Dao) SelMidFromVideo() (mids []int64, err error) {
rows, err := d.db.Query(context.Background(), _selMidFromVideo)
if err != nil {
log.Error("SelMidFromVideo failed, err(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
var s string
if err = rows.Scan(&s); err != nil {
panic(err.Error())
}
var mid int64
if mid, err = strconv.ParseInt(s, 10, 64); err != nil {
log.Error("strconv.ParseInt(%s) error(%v)", s, err)
return
}
mids = append(mids, mid)
}
return
}
//MergeUpInfo merge up info
func (d *Dao) MergeUpInfo(mid int64) (err error) {
var (
ctx = context.Background()
params = url.Values{}
req = &http.Request{}
id int64
res struct {
Code int
Data model.UpUserInfoRes
}
)
err = d.db.QueryRow(ctx, "select mid from user_base where mid = ?", mid).Scan(&id)
if err == nil {
log.Infow(ctx, "log", "already has mid in user_base", "mid", mid)
return
}
if err == sql.ErrNoRows {
params.Set("mid", strconv.FormatInt(mid, 10))
req, err = d.HTTPClient.NewRequest("GET", d.c.URLs["account"], "", params)
if err != nil {
log.Error("MergeUpInfo error(%v)", err)
return
}
if err = d.HTTPClient.Do(ctx, req, &res); err != nil {
log.Error("MergeUpInfo http req failed ,err:%v", err)
return
}
res := res.Data
var sex int
switch res.Sex {
case "男":
sex = 1
case "女":
sex = 2
default:
sex = 3
}
_, err = d.db.Exec(ctx,
"insert into user_base (mid,uname,face,sex,user_type,complete_degree)values(?,?,?,?,?,?)",
res.MID,
res.Name,
res.Face, | sex,
model.UserTypeUp, | random_line_split |
|
user.go | (c context.Context, userDmg *model.UserDmg) (mid string, err error) {
conn := d.redis.Get(c)
defer conn.Close()
var b []byte
if b, err = json.Marshal(userDmg); err != nil {
log.Error("cache user dmg marshal err(%v)", err)
return
}
cacheKey := getUserDmgKey(userDmg.MID)
fmt.Println(cacheKey)
if _, err = conn.Do("SET", cacheKey, b, "EX", _userDmgCacheTimeout); err != nil {
log.Error("cache user dmg redis set err(%v)", err)
return
}
return
}
//CacheUserBbqDmg ...
func (d *Dao) CacheUserBbqDmg(c context.Context, userBbqDmg *model.UserBbqDmg) (mid string, err error) {
conn := d.redis.Get(c)
defer conn.Close()
tag2 := strings.Join(userBbqDmg.Tag2, ",")
tag3 := strings.Join(userBbqDmg.Tag3, ",")
up := strings.Join(userBbqDmg.Up, ",")
cacheKey := getUserDmgKey(userBbqDmg.MID)
if err = conn.Send("HSET", cacheKey, "zone", tag2); err != nil {
log.Error("cache user bbq dmg redis set tag2 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "tag", tag3); err != nil {
log.Error("cache user bbq dmg redis set tag3 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "up", up); err != nil {
log.Error("cache user bbq dmg redis set up err(%v)", err)
return
}
return
}
//CacheUserBbqDmgBuvid ...
func (d *Dao) CacheUserBbqDmgBuvid(c context.Context, userBbqDmgBuvid *model.UserBbqBuvidDmg) (Buvid string, err error) {
conn := d.redis.Get(c)
defer conn.Close()
tag2 := strings.Join(userBbqDmgBuvid.Tag2, ",")
tag3 := strings.Join(userBbqDmgBuvid.Tag3, ",")
up := strings.Join(userBbqDmgBuvid.Up, ",")
cacheKey := getUserBuvidDmgKey(userBbqDmgBuvid.Buvid)
if err = conn.Send("HSET", cacheKey, "zone", tag2); err != nil {
log.Error("cache user bbq buvid dmg redis set tag2 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "tag", tag3); err != nil {
log.Error("cache user bbq buvid dmg redis set tag3 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "up", up); err != nil {
log.Error("cache user bbq buvid dmg redis set up err(%v)", err)
return
}
return
}
// AddUpUserDmg .
func (d *Dao) AddUpUserDmg(c context.Context, upUserDmg *model.UpUserDmg) (num int64, err error) {
var res sql.Result
if res, err = d.db.Exec(c, _incrUpUserDmgSQL, upUserDmg.MID, upUserDmg.Uname, upUserDmg.Play, upUserDmg.Fans, upUserDmg.AVs, upUserDmg.Likes); err != nil {
return 0, err
}
return res.LastInsertId()
}
// UpdateUpUserDmg .
func (d *Dao) UpdateUpUserDmg(c context.Context, upUserDmg *model.UpUserDmg) (num int64, err error) {
t := time.Now().AddDate(0, 0, 0).Format("2006-01-02 15:04:05")
var res sql.Result
if res, err = d.db.Exec(c, _updateUpUserDmgSQL, upUserDmg.Uname, upUserDmg.Play, upUserDmg.Fans, upUserDmg.AVs, upUserDmg.Likes, t, upUserDmg.MID); err != nil {
return 0, err
}
return res.RowsAffected()
}
// DelUpUserDmg .
func (d *Dao) DelUpUserDmg(c context.Context) (num int64, err error) {
t := time.Unix(time.Now().Unix(), -int64(36*time.Hour)).Format("2006-01-02 15:04:05")
var res sql.Result
if res, err = d.db.Exec(c, _delUpUserDmgSQL, t); err != nil {
return 0, err
}
return res.RowsAffected()
}
//Download 下载文件
func (d *Dao) Download(url string, name string) (fpath string, err error) {
if name == "" {
u := strings.Split(url, "/")
l := len(u)
name = u[l-1]
}
t := time.Now().AddDate(0, 0, 0).Format("20060102")
path := conf.Conf.Download.File + t
err = d.CreateDir(path)
if err != nil {
log.Error("create dir(%s) err(%v)", path, err)
return
}
fpath = path + "/" + name
newFile, err := os.Create(fpath)
if err != nil {
log.Error("create path(%s) err(%v)", fpath, err)
return
}
defer newFile.Close()
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
log.Error("download url(%s) err(%v)", url, err)
return
}
defer resp.Body.Close()
_, err = io.Copy(newFile, resp.Body)
if err != nil {
log.Error("copy err(%v)", err)
return
}
return
}
//CreateDir 创建文件夹
func (d *Dao) CreateDir(path string) (err error) {
_, err = os.Stat(path)
defer func() {
if os.IsExist(err) {
err = nil
}
}()
if os.IsNotExist(err) {
err = os.Mkdir(path, os.ModePerm)
}
return
}
// ReadLine 按行读取文件,hander回调
func (d *Dao) ReadLine(path string, handler func(string)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
return nil
}
log.Error("read path(%s) err(%v)", path, err)
return nil
}
line = strings.TrimSpace(line)
handler(line)
time.Sleep(time.Duration(1) * time.Second)
}
}
// ReadLines 50条发起一次grpc请求
func (d *Dao) ReadLines(path string, handler func([]int64)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("ReadLine open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
mids := make([]int64, 0, 50)
i := 0
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
err = nil
break
}
log.Error("read path(%s) err(%v)", path, err)
break
}
mid, _ := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
mids = append(mids, mid)
i++
if i == 50 {
handler(mids)
mids = make([]int64, 0, 50)
i = 0
time.Sleep(time.Duration(1) * time.Second)
}
}
if len(mids) != 0 {
handler(mids)
}
return
}
//HandlerUserDmg mid, gender, age, geo, content_tag, viewed_video, content_zone, content_count, follow_ups
func (d *Dao) HandlerUserDmg(user string) {
u := strings.Split(user, "\u0001")
userDmg := &model.UserDmg{
MID: u[0],
Gender: u[1],
Age: u[2],
Geo: u[3],
ContentTag: u[4],
ViewedVideo: d.HandlerViewedVideo(u[5]),
ContentZone: u[6],
ContentCount: u[7],
FollowUps: u[8],
}
d.CacheUserDmg(context.Background(), userDmg)
}
//HandlerUserBb | CacheUserDmg | identifier_name |
|
user.go | mg) (mid string, err error) {
conn := d.redis.Get(c)
defer conn.Close()
tag2 := strings.Join(userBbqDmg.Tag2, ",")
tag3 := strings.Join(userBbqDmg.Tag3, ",")
up := strings.Join(userBbqDmg.Up, ",")
cacheKey := getUserDmgKey(userBbqDmg.MID)
if err = conn.Send("HSET", cacheKey, "zone", tag2); err != nil {
log.Error("cache user bbq dmg redis set tag2 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "tag", tag3); err != nil {
log.Error("cache user bbq dmg redis set tag3 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "up", up); err != nil {
log.Error("cache user bbq dmg redis set up err(%v)", err)
return
}
return
}
//CacheUserBbqDmgBuvid ...
func (d *Dao) CacheUserBbqDmgBuvid(c context.Context, userBbqDmgBuvid *model.UserBbqBuvidDmg) (Buvid string, err error) {
conn := d.redis.Get(c)
defer conn.Close()
tag2 := strings.Join(userBbqDmgBuvid.Tag2, ",")
tag3 := strings.Join(userBbqDmgBuvid.Tag3, ",")
up := strings.Join(userBbqDmgBuvid.Up, ",")
cacheKey := getUserBuvidDmgKey(userBbqDmgBuvid.Buvid)
if err = conn.Send("HSET", cacheKey, "zone", tag2); err != nil {
log.Error("cache user bbq buvid dmg redis set tag2 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "tag", tag3); err != nil {
log.Error("cache user bbq buvid dmg redis set tag3 err(%v)", err)
return
}
if err = conn.Send("HSET", cacheKey, "up", up); err != nil {
log.Error("cache user bbq buvid dmg redis set up err(%v)", err)
return
}
return
}
// AddUpUserDmg .
func (d *Dao) AddUpUserDmg(c context.Context, upUserDmg *model.UpUserDmg) (num int64, err error) {
var res sql.Result
if res, err = d.db.Exec(c, _incrUpUserDmgSQL, upUserDmg.MID, upUserDmg.Uname, upUserDmg.Play, upUserDmg.Fans, upUserDmg.AVs, upUserDmg.Likes); err != nil {
return 0, err
}
return res.LastInsertId()
}
// UpdateUpUserDmg .
func (d *Dao) UpdateUpUserDmg(c context.Context, upUserDmg *model.UpUserDmg) (num int64, err error) {
t := time.Now().AddDate(0, 0, 0).Format("2006-01-02 15:04:05")
var res sql.Result
if res, err = d.db.Exec(c, _updateUpUserDmgSQL, upUserDmg.Uname, upUserDmg.Play, upUserDmg.Fans, upUserDmg.AVs, upUserDmg.Likes, t, upUserDmg.MID); err != nil {
return 0, err
}
return res.RowsAffected()
}
// DelUpUserDmg .
func (d *Dao) DelUpUserDmg(c context.Context) (num int64, err error) {
t := time.Unix(time.Now().Unix(), -int64(36*time.Hour)).Format("2006-01-02 15:04:05")
var res sql.Result
if res, err = d.db.Exec(c, _delUpUserDmgSQL, t); err != nil {
return 0, err
}
return res.RowsAffected()
}
//Download 下载文件
func (d *Dao) Download(url string, name string) (fpath string, err error) {
if name == "" {
u := strings.Split(url, "/")
l := len(u)
name = u[l-1]
}
t := time.Now().AddDate(0, 0, 0).Format("20060102")
path := conf.Conf.Download.File + t
err = d.CreateDir(path)
if err != nil {
log.Error("create dir(%s) err(%v)", path, err)
return
}
fpath = path + "/" + name
newFile, err := os.Create(fpath)
if err != nil {
log.Error("create path(%s) err(%v)", fpath, err)
return
}
defer newFile.Close()
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
log.Error("download url(%s) err(%v)", url, err)
return
}
defer resp.Body.Close()
_, err = io.Copy(newFile, resp.Body)
if err != nil {
log.Error("copy err(%v)", err)
return
}
return
}
//CreateDir 创建文件夹
func (d *Dao) CreateDir(path string) (err error) {
_, err = os.Sta | 文件,hander回调
func (d *Dao) ReadLine(path string, handler func(string)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
return nil
}
log.Error("read path(%s) err(%v)", path, err)
return nil
}
line = strings.TrimSpace(line)
handler(line)
time.Sleep(time.Duration(1) * time.Second)
}
}
// ReadLines 50条发起一次grpc请求
func (d *Dao) ReadLines(path string, handler func([]int64)) (err error) {
f, err := os.Open(path)
if err != nil {
log.Error("ReadLine open path(%s) err(%v)", path, err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
mids := make([]int64, 0, 50)
i := 0
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
err = nil
break
}
log.Error("read path(%s) err(%v)", path, err)
break
}
mid, _ := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
mids = append(mids, mid)
i++
if i == 50 {
handler(mids)
mids = make([]int64, 0, 50)
i = 0
time.Sleep(time.Duration(1) * time.Second)
}
}
if len(mids) != 0 {
handler(mids)
}
return
}
//HandlerUserDmg mid, gender, age, geo, content_tag, viewed_video, content_zone, content_count, follow_ups
func (d *Dao) HandlerUserDmg(user string) {
u := strings.Split(user, "\u0001")
userDmg := &model.UserDmg{
MID: u[0],
Gender: u[1],
Age: u[2],
Geo: u[3],
ContentTag: u[4],
ViewedVideo: d.HandlerViewedVideo(u[5]),
ContentZone: u[6],
ContentCount: u[7],
FollowUps: u[8],
}
d.CacheUserDmg(context.Background(), userDmg)
}
//HandlerUserBbqDmg ..
func (d *Dao) HandlerUserBbqDmg(user string) {
u := strings.Split(user, ",")
userBbqDmg := &model.UserBbqDmg{
MID: u[0],
Tag2: strings.Split(u[1], "\u0002"),
Tag3: strings.Split(u[2], "\u0002"),
Up: strings.Split(u[3], "\u0002"),
}
d.CacheUserBbqDmg(context.Background(), userBbqDmg)
}
//HandlerUserBbqDmgBuvid ..
func (d *Dao) HandlerUserBbqDmgBuvid(user string) {
u := strings.Split(user, ",")
UserBbqBuvidDmg := &model.UserBb | t(path)
defer func() {
if os.IsExist(err) {
err = nil
}
}()
if os.IsNotExist(err) {
err = os.Mkdir(path, os.ModePerm)
}
return
}
// ReadLine 按行读取 | identifier_body |
user.go | , err)
return
}
defer f.Close()
buf := bufio.NewReader(f)
mids := make([]int64, 0, 50)
i := 0
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
err = nil
break
}
log.Error("read path(%s) err(%v)", path, err)
break
}
mid, _ := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
mids = append(mids, mid)
i++
if i == 50 {
handler(mids)
mids = make([]int64, 0, 50)
i = 0
time.Sleep(time.Duration(1) * time.Second)
}
}
if len(mids) != 0 {
handler(mids)
}
return
}
//HandlerUserDmg mid, gender, age, geo, content_tag, viewed_video, content_zone, content_count, follow_ups
func (d *Dao) HandlerUserDmg(user string) {
u := strings.Split(user, "\u0001")
userDmg := &model.UserDmg{
MID: u[0],
Gender: u[1],
Age: u[2],
Geo: u[3],
ContentTag: u[4],
ViewedVideo: d.HandlerViewedVideo(u[5]),
ContentZone: u[6],
ContentCount: u[7],
FollowUps: u[8],
}
d.CacheUserDmg(context.Background(), userDmg)
}
//HandlerUserBbqDmg ..
func (d *Dao) HandlerUserBbqDmg(user string) {
u := strings.Split(user, ",")
userBbqDmg := &model.UserBbqDmg{
MID: u[0],
Tag2: strings.Split(u[1], "\u0002"),
Tag3: strings.Split(u[2], "\u0002"),
Up: strings.Split(u[3], "\u0002"),
}
d.CacheUserBbqDmg(context.Background(), userBbqDmg)
}
//HandlerUserBbqDmgBuvid ..
func (d *Dao) HandlerUserBbqDmgBuvid(user string) {
u := strings.Split(user, ",")
UserBbqBuvidDmg := &model.UserBbqBuvidDmg{
Buvid: u[0],
Tag2: strings.Split(u[1], "\u0002"),
Tag3: strings.Split(u[2], "\u0002"),
Up: strings.Split(u[3], "\u0002"),
}
d.CacheUserBbqDmgBuvid(context.Background(), UserBbqBuvidDmg)
}
// HandlerMids update userbase by mids
func (d *Dao) HandlerMids(mids []int64) {
res, err := d.VideoClient.SyncUserStas(context.Background(), &video.SyncMidsRequset{MIDS: mids})
if err != nil {
log.Error("userbases update failes, mids(%v), err(%v)", mids, err)
return
}
log.Info("userbases update success, affected %v rows", res.Affc)
}
// HandlerMid update userbase by mid
func (d *Dao) HandlerMid(s string) {
mid, _ := strconv.ParseInt(s, 10, 64)
res, err := d.VideoClient.SyncUserSta(context.Background(), &video.SyncMidRequset{MID: mid})
if err != nil {
log.Error("userbase update failes, mid(%v), err(%v)", mid, err)
return
}
if res.Affc == 1 {
log.Info("userbase insert success ,mid(%v)", mid)
} else if res.Affc == 2 {
log.Info("userbase update success , mid(%v)", mid)
}
}
//HandlerViewedVideo 处理看过的视频,保存最近看过的100个
func (d *Dao) HandlerViewedVideo(v string) (res map[int64]string) {
res = make(map[int64]string)
var vv [][]interface{}
var dd string
err := json.Unmarshal([]byte(v), &vv)
if err != nil {
return
}
l := len(vv)
n := 1
for i := l - 1; i >= 0; i-- {
for _, a := range vv[i] {
switch b := a.(type) {
case string:
dd = b
case []interface{}:
ll := len(b)
for j := ll - 1; j >= 0; j-- {
switch c := b[j].(type) {
case float64:
k := int64(c)
if _, ok := res[k]; !ok {
res[k] = dd
n++
}
}
if n > 100 {
return
}
}
}
}
}
return
}
// SelMidFromVideo get distinct mid list from table video
func (d *Dao) SelMidFromVideo() (mids []int64, err error) {
rows, err := d.db.Query(context.Background(), _selMidFromVideo)
if err != nil {
log.Error("SelMidFromVideo failed, err(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
var s string
if err = rows.Scan(&s); err != nil {
panic(err.Error())
}
var mid int64
if mid, err = strconv.ParseInt(s, 10, 64); err != nil {
log.Error("strconv.ParseInt(%s) error(%v)", s, err)
return
}
mids = append(mids, mid)
}
return
}
//MergeUpInfo merge up info
func (d *Dao) MergeUpInfo(mid int64) (err error) {
var (
ctx = context.Background()
params = url.Values{}
req = &http.Request{}
id int64
res struct {
Code int
Data model.UpUserInfoRes
}
)
err = d.db.QueryRow(ctx, "select mid from user_base where mid = ?", mid).Scan(&id)
if err == nil {
log.Infow(ctx, "log", "already has mid in user_base", "mid", mid)
return
}
if err == sql.ErrNoRows {
params.Set("mid", strconv.FormatInt(mid, 10))
req, err = d.HTTPClient.NewRequest("GET", d.c.URLs["account"], "", params)
if err != nil {
log.Error("MergeUpInfo error(%v)", err)
return
}
if err = d.HTTPClient.Do(ctx, req, &res); err != nil {
log.Error("MergeUpInfo http req failed ,err:%v", err)
return
}
res := res.Data
var sex int
switch res.Sex {
case "男":
sex = 1
case "女":
sex = 2
default:
sex = 3
}
_, err = d.db.Exec(ctx,
"insert into user_base (mid,uname,face,sex,user_type,complete_degree)values(?,?,?,?,?,?)",
res.MID,
res.Name,
res.Face,
sex,
model.UserTypeUp,
0)
if err != nil {
log.Error("MergeUpInfo insert upinfo failed,err:%v", err)
return
}
} else {
log.Error("MergeUpInfo query sql failed,err:%v", err)
}
if err = d.db.QueryRow(ctx, "select id from user_statistics where mid = ?", mid).Scan(&id); err != nil {
if err == sql.ErrNoRows {
if _, err = d.db.Exec(ctx, "insert into user_statistics (mid) values (?)", mid); err != nil {
log.Error("init insert user_statistics failed,err:%v", err)
}
} else {
log.Error("init query user_statistics failed,err:%v", err)
}
}
return
}
//UsersByLast 使用lastid批量获取用户
func (d *Dao) UsersByLast(c context.Context, lastid int64) (r []*model.UserBaseDB, err error) {
var rows *xsql.Rows
rows, err = d.db.Query(c, _queryUsersByLast, lastid, _limitSize)
if err != nil {
log.Error("db _queryVideos err(%v)", err)
return
}
for rows.Next() {
u := new(model.UserBaseDB)
if err = rows.Scan(&u.ID, &u.MID, &u.Uname); err != nil {
log.Error("scan err(%v)", err)
continue
}
r = append(r, u)
}
return
}
// SelMidFromUser | Base get distinct mid list from table user_base
fun | conditional_block |
|
lib.rs | ::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len() != 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in .fai file.",
));
}
name_map.insert(p[0].to_owned());
let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in .fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in .fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in .fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in .fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid .fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid .fai index file. The
/// .fai file is created by appending .fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop |
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
| {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
} | conditional_block |
lib.rs | tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid .fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid .fai index file. The
/// .fai file is created by appending .fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
} else {
bc.other += 1;
}
}
bc
}
/// Iterator over the bases in the current view. Bases are returned as `u8` representations of
/// the `char`s in the fasta file. Keep only that chars between 164 and 128 (effectively
/// skipping newlines)
pub fn bases(&self) -> impl Iterator<Item = &'a u8> {
self.0.iter().filter(|&&b| b & 192 == 64)
}
}
/// Returns a newly allocated, utf8-validated string with the sequence data in `Self`
impl<'a> ToString for FastaView<'a> {
fn to_string(&self) -> String {
String::from_utf8(self.bases().cloned().collect()).unwrap()
}
}
impl<'a> Read for FastaView<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
let mut skipped = 0;
for (t, s) in buf.iter_mut().zip(self.0.iter().filter(|&&c| {
let base = c & 192 == 64;
if !base {
skipped += 1;
}
base
})) {
*t = *s;
read += 1;
}
self.0 = &self.0[(skipped + read)..];
Ok(read)
}
}
/// Object that contains count occurrences of the most common bases in DNA genome references: A, C, G,
/// T, N and other.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BaseCounts {
pub a: usize,
pub c: usize,
pub g: usize,
pub t: usize,
pub n: usize,
pub other: usize,
}
/// Initialize basecount with zeros
impl Default for BaseCounts {
fn default() -> BaseCounts | {
BaseCounts {
a: 0,
c: 0,
g: 0,
t: 0,
n: 0,
other: 0,
}
} | identifier_body |
|
lib.rs | std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len() != 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in .fai file.",
));
} | let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in .fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in .fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in .fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in .fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid .fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid .fai index file. The
/// .fai file is created by appending .fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
|
name_map.insert(p[0].to_owned());
| random_line_split |
lib.rs | std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len() != 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in .fai file.",
));
}
name_map.insert(p[0].to_owned());
let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in .fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in .fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in .fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in .fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn | (&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid .fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid .fai index file. The
/// .fai file is created by appending .fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
| name | identifier_name |
backend.rs | -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2) | -> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[ | random_line_split |
|
backend.rs | (&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2)
-> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone | child_storage_hash | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.