file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
connection.go
_, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } msg := &msgs.FEPasswordMsg{PasswordData: passwd} return v.sendMessage(msg) } func (v *connection)
authSendMD5Password
identifier_name
connection.go
, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet
{ passwd = "" }
conditional_block
connection.go
return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error)
// Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" } // Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result
{ s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil }
identifier_body
connection.go
()") return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil } // Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" }
// Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result }
random_line_split
gensynet.py
groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100):
return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s
return -1
conditional_block
gensynet.py
groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100): return -1 return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def
(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format
randomize_subnet_breakdown
identifier_name
gensynet.py
: roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main(): global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n') while True: nodect = int(input("How many network nodes? [500]: ") or "500") if nodect > 4000000: print("That ({}) is just exorbitant. Next time try less than {}.".format(nodect, 4000000)) sys.exit() # setting subnet breakdown ---------------- if OLDVERSION: if (nodect > 50): print('Default Node distribution of {} nodes across Class C subnets: '.format(nodect)) print(' 30% of the nodes will occupy subnets that are 70% populated') print(' 45% of the nodes will occupy subnets that are 20% populated') print(' 25% of the nodes will occupy subnets that are 90% populated') net_breakdown = [(30,70), (45,20), (25,90)] print('Total subnets: {}'.format(calculate_subnets(nodect, net_breakdown))) set_net = input("Manually set network node distribution? [No]: ") or "No" else: set_net = "No" net_breakdown = [(100, 100)] print('Total subnets: 1') if (set_net.lower() != 'no' and set_net.lower() != 'n'): net_breakdown = [] percent = 100 print("Please enter what percentage of the {} nodes would consume what percentage".format(nodect)) print("of the Class C address space...") while percent > 0: nodes = int(input(" Percent of nodes (MAX={}): ".format(percent)) or "100") density = int(input(" Percent of class C space occupied: ") or "100") if (nodes <= 100 and nodes > 1): percent = percent - nodes else: print("Illegal node percentage value ({})".format(nodes)) continue if (density > 100 or density < 1): print("Illegal density percentage value ({})".format(density)) continue net_breakdown.append((nodes, density)) subnets = calculate_subnets(nodect, net_breakdown) print('Total subnets: {}'.format(subnets)) else: MAX_max = MAX_min = -1 while True: subnets = [] if nodect <= 252:
random_line_split
gensynet.py
" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main():
global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n')
identifier_body
base_command.py
_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = f
if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj
ield.split(":")
identifier_name
base_command.py
_report_data_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION
''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj,
def _open_url(self, url): ''' get http request return json
random_line_split
base_command.py
_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order
根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code e lif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj
_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库
identifier_body
base_command.py
_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(
if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj
data, adaptor["path"])
conditional_block
rights.go
// EndorsingRight holds simplified information about the right to endorse // a specific Tezos block type EndorsingRight struct { Delegate string Level int64 Power int } func (r EndorsingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } type StakeInfo struct { ActiveStake int64 `json:"active_stake,string"` Baker tezos.Address `json:"baker"` } type SnapshotInfo struct { LastRoll []string `json:"last_roll"` Nonces []string `json:"nonces"` RandomSeed string `json:"random_seed"` RollSnapshot int `json:"roll_snapshot"` // until v011 Cycle int64 `json:"cycle"` // added, not part of RPC response BakerStake []StakeInfo `json:"selected_stake_distribution,omitempty"` // v012+ TotalStake int64 `json:"total_active_stake,string"` // v012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u
{ type FullBakingRight struct { Delegate string `json:"delegate"` Level int64 `json:"level"` Priority int `json:"priority"` // until v011 Round int `json:"round"` // v012+ } var rr FullBakingRight err := json.Unmarshal(data, &rr) r.Delegate = rr.Delegate r.Level = rr.Level r.Round = rr.Priority + rr.Round return err }
identifier_body
rights.go
012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil
for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle
{ return nil, err }
conditional_block
rights.go
dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int64, p *Params) (*SnapshotIndex, error) { idx := &SnapshotIndex{} if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { // pre-Ithaca we can at most look PRESERVED_CYCLES into the future since // the snapshot happened 2 cycles back from the block we're looking from. var info SnapshotInfo u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &info); err != nil { return nil, err } if info.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } idx.Cycle = cycle
idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = info.RollSnapshot } else { idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle)
random_line_split
rights.go
012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client)
(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int
ListEndorsingRights
identifier_name
lib.rs
let mut buf = [0; 80]; buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key)); *self = SKey::Cipher(buf); } } } fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> { if let SKey::Cipher(ciphertext) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key) .map_err(|_| ErrorKind::PassphraseIncorrect )?; *self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn key(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Passwd { let pwd = Passwd { bytes :SecBytes::with( passwd.len(), |buf| buf.copy_from_slice(passwd.as_bytes()) ), }; unsafe { seckey::zero(passwd.as_bytes_mut()); } pwd } /// Prompt the user for a `Passwd` on stdin. pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> { let stdout = stdout(); let mut stdout = stdout.lock(); let stdin = stdin(); let mut stdin = stdin.lock(); stdout.write_all(prompt.as_ref().as_bytes())?; stdout.flush()?; let mut passwd = stdin.read_passwd(&mut stdout)? .ok_or(ErrorKind::Io( io::Error::new( io::ErrorKind::UnexpectedEof, "Invalid Password Input", ) ))?; println!(); Ok(Passwd::new(&mut passwd)) } /// Prompt for a password on stdin and confirm it. For configurable /// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt). pub fn prompt_new() -> Result<Passwd, Error> { let passwd = Passwd::prompt( "Please enter a new passphrase (leave empty to store the key in plaintext): " )?; let confirm = Passwd::prompt("Please re-enter the passphrase: ")?; if passwd != confirm { bail!(ErrorKind::PassphraseMismatch); } Ok(passwd) } /// Get a key for symmetric key encryption from a password. fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> { if self.bytes.read().len() > 0 { let mut key = secretbox::Key([0; secretbox::KEYBYTES]); let secretbox::Key(ref mut binary_key) = key; pwhash::derive_key( binary_key, &self.bytes.read(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE, ).expect("Failed to get key from password"); Some(key) } else { None } } } impl PartialEq for Passwd { fn eq(&self, other: &Passwd) -> bool { self.bytes.read().deref() == other.bytes.read().deref() } } impl Eq for Passwd {} /// Generate a new keypair. The new keys will be saved to `file`. The user /// will be prompted on stdin for a password, empty passwords will cause the /// secret key to be stored in plain text. Note that parent /// directories will not be created. pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> {
let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; let (pkey_file, mut skey_file) = SecretKeyFile::new();
random_line_split
lib.rs
) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn key(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Passwd { let pwd = Passwd { bytes :SecBytes::with( passwd.len(), |buf| buf.copy_from_slice(passwd.as_bytes()) ), }; unsafe { seckey::zero(passwd.as_bytes_mut()); } pwd } /// Prompt the user for a `Passwd` on stdin. pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> { let stdout = stdout(); let mut stdout = stdout.lock(); let stdin = stdin(); let mut stdin = stdin.lock(); stdout.write_all(prompt.as_ref().as_bytes())?; stdout.flush()?; let mut passwd = stdin.read_passwd(&mut stdout)? .ok_or(ErrorKind::Io( io::Error::new( io::ErrorKind::UnexpectedEof, "Invalid Password Input", ) ))?; println!(); Ok(Passwd::new(&mut passwd)) } /// Prompt for a password on stdin and confirm it. For configurable /// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt). pub fn prompt_new() -> Result<Passwd, Error> { let passwd = Passwd::prompt( "Please enter a new passphrase (leave empty to store the key in plaintext): " )?; let confirm = Passwd::prompt("Please re-enter the passphrase: ")?; if passwd != confirm { bail!(ErrorKind::PassphraseMismatch); } Ok(passwd) } /// Get a key for symmetric key encryption from a password. fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> { if self.bytes.read().len() > 0 { let mut key = secretbox::Key([0; secretbox::KEYBYTES]); let secretbox::Key(ref mut binary_key) = key; pwhash::derive_key( binary_key, &self.bytes.read(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE, ).expect("Failed to get key from password"); Some(key) } else { None } } } impl PartialEq for Passwd { fn eq(&self, other: &Passwd) -> bool { self.bytes.read().deref() == other.bytes.read().deref() } } impl Eq for Passwd {} /// Generate a new keypair. The new keys will be saved to `file`. The user /// will be prompted on stdin for a password, empty passwords will cause the /// secret key to be stored in plain text. Note that parent /// directories will not be created. pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> { let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; let (pkey_file, mut skey_file) = SecretKeyFile::new(); skey_file.encrypt(passwd); skey_file.save(skey_path)?; pkey_file.save(pkey_path)?; println!("Generated {} and {}", pkey_path.display(), skey_path.display()); Ok((pkey_file, skey_file)) } fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error>
{ let mut key_file = SecretKeyFile::open(skey_path)?; if key_file.is_encrypted() { let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display())) .chain_err(|| skey_path )?; key_file.decrypt(passwd) .chain_err(|| skey_path )?; } Ok(key_file) }
identifier_body
lib.rs
secretbox, sign}; //TODO: Macro? pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| pwhash::Salt(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 24]>::from_hex(s) .map(|val| secretbox::Nonce(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| sign::PublicKey(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } } /// Standard pkgar public key format definition. Use serde to serialize/deserialize /// files into this struct (helper methods available). #[derive(Deserialize, Serialize)] pub struct PublicKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")] pub pkey: sign::PublicKey, } impl PublicKeyFile { /// Parse a `PublicKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(self)?.as_bytes())?; Ok(()) } /// Shortcut to write the public key to `file` pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( File::create(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } } enum SKey { Cipher([u8; 80]), Plain(sign::SecretKey), } impl SKey { fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) { if let SKey::Plain(skey) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let mut buf = [0; 80]; buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key)); *self = SKey::Cipher(buf); } } } fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> { if let SKey::Cipher(ciphertext) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key) .map_err(|_| ErrorKind::PassphraseIncorrect )?; *self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn
(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Pass
key
identifier_name
fwliir.py
:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n)) for sos in iir ]) def genStablePrototype(nlimit, nbits=32): """ Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una
for gen in range(1, n
proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo.
identifier_body
fwliir.py
# Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, ngen + 1): # Seleccina la próxima generación de individuos. offspring = toolbox.select(population, len(population)) # Varia el pool de individuos, aplicando cruza y mutación. offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Actualiza el grupo de mejores individuos. if halloffame is not None: halloffame.update(offspring) # Reemplaza la población actual con los mejores del conjunto # compuesta por su descendencia y la elite. elite_count = int(len(population) * eprop) elite = tools.selBest(population, elite_count) population[:] = tools.selBest(offspring + elite, len(population)) # Toma nota de las estadísticas de la generación actual. record = stats.compile(population) if stats else {} logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) return population, logbook def configure_genetic_approx(*, nbits=16, nlimit=8, nsln=10, cxpb=0
.7, ndpb=0.5,
identifier_name
fwliir.py
respuesta al impulso renormalizando la salida # al intervalo [-1, 1) # salida del t = ts * np.arange(n) im = y[-1, 2:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n)) for sos in iir ]) def genStablePrototype(nlimit, nbits=32): """ Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits
enumerate(iir, start=1): b0, b1, b2, a1, a2, k = sos # Computar la ecuación de diferencias, truncando # y saturando el resultado para ser representado # en punto fijo 1.(`nbits`-1) y[i, j] = np.clip((k * ( b0 * y[i - 1, j] + b1 * y[i - 1, j - 1] + b2 * y[i - 1, j - 2] - a1 * y[i, j - 1] - a2 * y[i, j - 2] )) >> (iir.nbits - 1), -2**(iir.nbits - 1), 2**(iir.nbits - 1) - 1) # Retorna
conditional_block
fwliir.py
:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n))
""" Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, n
for sos in iir ]) def genStablePrototype(nlimit, nbits=32):
random_line_split
main.rs
_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self)
fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port
{ let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } }
identifier_body
main.rs
_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn
(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port
wants_read
identifier_name
main.rs
_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable()
if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port
{ self.do_write(); }
conditional_block
main.rs
x_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 }
let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port:
} } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1;
random_line_split
mod.rs
had any information sent into it aside from /// what is necessary for the underlying protocol. After the object is created, the `Display` will poll /// the server for setup information. #[inline] pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init(auth)?; Ok(d) } /// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection` /// function for more information. #[cfg(feature = "async")] #[inline] pub async fn from_connection_async( connection: Conn, auth: Option<AuthInfo>, ) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init_async(auth).await?; Ok(d) } /// Generate the setup from the authentication info. #[inline] fn create_setup(auth: AuthInfo) -> SetupRequest { let AuthInfo { name, data, .. } = auth; SetupRequest { byte_order: endian_byte(), protocol_major_version: 11, protocol_minor_version: 0, authorization_protocol_name: name, authorization_protocol_data: data, } } /// Initialize the setup. #[inline] fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get(), }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet(&bytes[0..len])?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet(&mut bytes)?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet(&mut bytes[8..])?; let (setup, _) = Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Initialize the setup, async redox. /// /// TODO; lots of copy-pasted code, redo this at some point #[cfg(feature = "async")] #[inline] async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get_async().await, }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet_async(&bytes[0..len]).await?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet_async(&mut bytes).await?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet_async(&mut bytes[8..]).await?; let (setup, _) = Setup::from_bytes(&bytes) .ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Get the setup associates with this display. #[inline] pub fn setup(&self) -> &Setup { &self.setup } #[inline] pub fn default_root(&self) -> Window { self.default_screen().root } #[inline] pub fn default_screen(&self) -> &Screen { &self.setup.roots[self.default_screen] } #[inline] pub fn default_white_pixel(&self) -> u32 { self.default_screen().white_pixel } #[inline] pub fn default_black_pixel(&self) -> u32 { self.default_screen().black_pixel } #[inline] pub fn default_visual_id(&self) -> Visualid { self.default_screen().root_visual } #[inline] pub fn default_visual(&self) -> &Visualtype { self.visual_id_to_visual(self.default_visual_id()).unwrap() } #[inline] pub fn default_colormap(&self) -> Colormap { self.default_screen().default_colormap } /// Get a visual type from a visual ID. #[inline] pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> { self.setup .roots .iter() .flat_map(|s| s.allowed_depths.iter()) .flat_map(|d| d.visuals.iter()) .find(|v| v.visual_id == id) } /// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions /// will generate this for you. If you'd like to circumvent them, this will generate ID's for you. #[inline] pub fn generate_xid(&mut self) -> crate::Result<XID> { Ok(self.xid.next().unwrap()) } /// Wait for an event to be generated by the X server. /// /// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the /// server for new events. #[inline] pub fn wait_for_event(&mut self) -> crate::Result<Event> { log::debug!("Beginning event wait..."); loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait()?, } } } /// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for /// more information. #[cfg(feature = "async")] #[inline] pub async fn wait_for_event_async(&mut self) -> crate::Result<Event> { loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait_async().await?, } } } /// If there is an event currently in the queue that matches the predicate, returns true. #[inline] pub fn check_if_event<F: FnMut(&Event) -> bool>(&self, predicate: F) -> bool { self.event_queue.iter().any(predicate) } /* /// Save a pointer into this display's map of contexts. #[inline] pub fn save_context(&mut self, xid: XID, context: ContextID, data: NonNull<c_void>) { self.context.insert((xid, context), data); } /// Retrieve a pointer from the context. #[inline] pub fn find_context(&mut self, xid: XID, context: ContextID) -> Option<NonNull<c_void>> { self.context.get(&(xid, context)).copied() } /// Delete an entry in the context. #[inline] pub fn delete_context(&mut self, xid: XID, context: ContextID) { self.context.remove(&(xid, context)); } */ }
/// A variant of `Display` that uses X11's default connection mechanisms to connect to the server. In /// most cases, you should be using this over any variant of `Display`. #[cfg(feature = "std")] pub type DisplayConnection = Display<name::NameConnection>;
random_line_split
mod.rs
<RequestCookie<R>>> + Send + 'future>> { Box::pin(self.send_request_internal_async(req)) } /// Wait for a request from the X11 server, async redox. See the `resolve_request` function for more /// information. #[cfg(feature = "async")] #[inline] pub async fn resolve_request_async<R: Request>( &mut self, token: RequestCookie<R>, ) -> crate::Result<R::Reply> where R::Reply: Default, { if mem::size_of::<R::Reply>() == 0 { return Ok(Default::default()); } loop { match self.pending_replies.remove(&token.sequence) { Some(reply) => { break Self::decode_reply::<R>(reply); } None => self.wait_async().await?, } } } #[inline] fn from_connection_internal(connection: Conn) -> Self { Self { connection, setup: Default::default(), xid: Default::default(), default_screen: 0, event_queue: VecDeque::with_capacity(8), pending_requests: VecDeque::new(), pending_replies: HashMap::with_capacity(4), request_number: 1, wm_protocols_atom: None, // context: HashMap::new(), extensions: HashMap::with_capacity(8), } } /// Creates a new `Display` from a connection and authentication info. /// /// It is expected that the connection passed in has not had any information sent into it aside from /// what is necessary for the underlying protocol. After the object is created, the `Display` will poll /// the server for setup information. #[inline] pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init(auth)?; Ok(d) } /// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection` /// function for more information. #[cfg(feature = "async")] #[inline] pub async fn from_connection_async( connection: Conn, auth: Option<AuthInfo>, ) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init_async(auth).await?; Ok(d) } /// Generate the setup from the authentication info. #[inline] fn create_setup(auth: AuthInfo) -> SetupRequest { let AuthInfo { name, data, .. } = auth; SetupRequest { byte_order: endian_byte(), protocol_major_version: 11, protocol_minor_version: 0, authorization_protocol_name: name, authorization_protocol_data: data, } } /// Initialize the setup. #[inline] fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get(), }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet(&bytes[0..len])?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet(&mut bytes)?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet(&mut bytes[8..])?; let (setup, _) = Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Initialize the setup, async redox. /// /// TODO; lots of copy-pasted code, redo this at some point #[cfg(feature = "async")] #[inline] async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get_async().await, }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet_async(&bytes[0..len]).await?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet_async(&mut bytes).await?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet_async(&mut bytes[8..]).await?; let (setup, _) = Setup::from_bytes(&bytes) .ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Get the setup associates with this display. #[inline] pub fn setup(&self) -> &Setup { &self.setup } #[inline] pub fn default_root(&self) -> Window { self.default_screen().root } #[inline] pub fn default_screen(&self) -> &Screen { &self.setup.roots[self.default_screen] } #[inline] pub fn default_white_pixel(&self) -> u32 { self.default_screen().white_pixel } #[inline] pub fn default_black_pixel(&self) -> u32 { self.default_screen().black_pixel } #[inline] pub fn default_visual_id(&self) -> Visualid { self.default_screen().root_visual } #[inline] pub fn default_visual(&self) -> &Visualtype { self.visual_id_to_visual(self.default_visual_id()).unwrap() } #[inline] pub fn default_colormap(&self) -> Colormap { self.default_screen().default_colormap } /// Get a visual type from a visual ID. #[inline] pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> { self.setup .roots .iter() .flat_map(|s| s.allowed_depths.iter()) .flat_map(|d| d.visuals.iter()) .find(|v| v.visual_id == id) } /// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions /// will generate this for you. If you'd like to circumvent them, this will generate ID's for you. #[inline] pub fn generate_xid(&mut self) -> crate::Result<XID> { Ok(self.xid.next().unwrap()) } /// Wait for an event to be generated by the X server. /// /// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the /// server for new events. #[inline] pub fn wait_for_event(&mut self) -> crate::Result<Event> { log::debug!("Beginning event wait..."); loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait()?, } } } /// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for /// more information. #[cfg(feature = "async")] #[inline] pub async fn
wait_for_event_async
identifier_name
solvers.py
alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs
class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization."""
random_line_split
solvers.py
rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization.""" def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL2nz(_LstsqL2Solver): """Least-squares with L2 regularization on non-zero components.""" def __call__(self, A, Y, rng=None, E=None): # Compute the equivalent noise standard deviation. This equals the # base amplitude (noise_amp times the overall max activation) times # the square-root of the fraction of non-zero components. sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) # sigma == 0 means the neuron is never active, so won't be used, but # we have to make sigma != 0 for numeric reasons. sigma[sigma == 0] = sigma.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL1(Solver): """Least-squares with L1 and L2 regularization (elastic net). This method is well suited for creating sparse decoders or weight matrices. """ def __init__(self, weights=False, l1=1e-4, l2=1e-6): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. l1 : float, optional Amount of L1 regularization. l2 : float, optional Amount of L2 regularization. """ import sklearn.linear_model # noqa F401, import to check existence assert sklearn.linear_model self.weights = weights self.l1 = l1 self.l2 = l2 def __call__(self, A, Y, rng=None, E=None): import sklearn.linear_model Y = self.mul_encoders(Y, E, copy=True) # copy since 'fit' may modify Y # TODO: play around with regularization constants (I just guessed). # Do we need to scale regularization by number of neurons, to get # same level of sparsity? esp. with weights? Currently, setting # l1=1e-3 works well with weights when connecting 1D populations # with 100 neurons each. a = self.l1 * A.max() # L1 regularization b = self.l2 * A.max()**2 # L2 regularization alpha = a + b l1_ratio = a / (a + b) # --- solve least-squares A * X = Y model = sklearn.linear_model.ElasticNet( alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000) model.fit(A, Y) X = model.coef_.T X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],) infos = {'rmses': _rmses(A, X, Y)} return X, infos class LstsqDrop(Solver): """Find sparser decoders/weights by dropping small values. This solver first solves for coefficients (decoders/weights) with L2 regularization, drops those nearest to zero, and retrains remaining. """ def __init__(self, weights=False, drop=0.25, solver1=LstsqL2nz(reg=0.1), solver2=LstsqL2nz(reg=0.01)):
""" weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. drop : float, optional Fraction of decoders or weights to set to zero. solver1 : Solver, optional Solver for finding the initial decoders. solver2 : Solver, optional Used for re-solving for the decoders after dropout. """ self.weights = weights self.drop = drop self.solver1 = solver1 self.solver2 = solver2
identifier_body
solvers.py
r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization.""" def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class
LstsqL2nz
identifier_name
solvers.py
_, _ = scipy.sparse.linalg.lsmr( A, Y[:, i], damp=damp, atol=tol, btol=tol) info = {'rmses': _rmses(A, X, Y), 'iterations': itns} return X if matrix_in else X.flatten(), info def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6): """Solve the single-RHS linear system using conjugate gradient.""" if maxiters is None: maxiters = b.shape[0] r = b - calcAx(x) p = r.copy() rsold = np.dot(r, r) for i in range(maxiters): Ap = calcAx(p) alpha = rsold / np.dot(p, Ap) x += alpha * p r -= alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else:
elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self
raise ValueError("array is too large to hash")
conditional_block
models.py
} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else:
with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self
dbEntriesCreate.append(entry)
conditional_block
models.py
} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField(
) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date
blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry",
random_line_split
models.py
.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date is None: self.date = datetime.datetime.now() # Save super(Entry, self).save(*args, **kwargs) class Meta: ordering = ('-updated_at',) verbose_name_plural = 'entries' # two users can have the same feed but one migh force update and the other # wants to keep old version, so make it unique even though it make entries redundant unique_together = ['feed', 'guid'] # Notification class
Notification
identifier_name
models.py
} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model):
flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self
''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True)
identifier_body
server.rs
for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tile_pbf( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse> { let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile { Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) } lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static files from directory '{}'", dir); app = app.service(fs::Files::new(&static_dir.path, dir)); } else { warn!("Static file directory '{}' not found", dir); } } app = app .service( web::resource("/{tileset}.style.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_style_json), ), ) .service( web::resource("/{tileset}/metadata.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_metadata_json), ), ) .service( web::resource("/{tileset}.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_tilejson), ), )
random_line_split
server.rs
" xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> { let json = service.get_mvt_metadata()?; Ok(HttpResponse::Ok().json(&json)) } /// Font list for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn
( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse> { let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile { Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) } lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static
tile_pbf
identifier_name
server.rs
" xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> { let json = service.get_mvt_metadata()?; Ok(HttpResponse::Ok().json(&json)) } /// Font list for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tile_pbf( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse>
Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) } lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static
{ let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile {
identifier_body
NeighborChainCli.py
[self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else:
@staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path
raise NeighborChainError(stderr)
conditional_block
NeighborChainCli.py
(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] =
get_default_conn
identifier_name
NeighborChainCli.py
class NeighborChainError(BaseException): pass class BnbCli: _bnb_host = 'data-seed-pre-0-s1.binance.org' _bnb_rpc_port = 443 _bnb_rpc_protocol = 'https' _path = f'{os.getcwd()}/bin' _binary = {'darwin': f'{_path}/tbnbcli-mac', 'linux': f'{_path}/tbnbcli-linux', '*': f'{_path}/tbnbcli-win'} tbnbcli = _binary.get(sys.platform, _binary["*"]) def __init__(self, cmd=tbnbcli, chain_id="Binance-Chain-Ganges", node=None): if node is None: self.node = f'tcp://{BnbCli._bnb_host}:80' self.cmd = cmd self.chain_id = chain_id self.trust = '--trust-node' self.stdout = subprocess.PIPE self.stderr = subprocess.PIPE def get_default_conn(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom):
if token_id == PBNB_ID: return BnbCli() elif token_id == PBTC_ID: return BtcGo()
identifier_body
NeighborChainCli.py
= [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path
random_line_split
dots.py
SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def
(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.input
put_image_dual
identifier_name
dots.py
if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue":
else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.input
sub.inputData(trial, "ACC", 1)
random_line_split
dots.py
if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs):
def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.input
global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames')
identifier_body
dots.py
SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else:
print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.inputData(tr
instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB)
conditional_block
interaction.rs
XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button {
{ self.inner.hands_skeleton_pose() }
identifier_body
interaction.rs
XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action)
else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state
{ *state }
conditional_block
interaction.rs
/// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) } pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false) } pub fn binary_value(&self, action: &str) -> bool { if let Some(XrActionState::Binary(value)) = self.current_states.get(action) { *value } else { self.button_pressed(action) } } pub fn scalar_value(&self, action: &str) -> f32 { if let Some(XrActionState::Scalar(value) | XrActionState::Button { value, .. }) = self.current_states.get(action) { *value } else { 0.0 } } pub fn vec_2d_value(&self, action: &str) -> Vec2 { if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) { *value } else { Vec2::ZERO } } pub fn set(&mut self, states: HashMap<String, XrActionState>) { self.previous_states = self.current_states.clone(); self.current_states = states; } pub fn clear(&mut self) { self.current_states.clear(); self.previous_states.clear(); } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrVibrationEventType { Apply { duration: Duration, frequency: f32, amplitude: f32, }, Stop, } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct XrVibrationEvent { pub hand: XrHandType, pub command: XrVibrationEventType, } /// Active interaction profiles. The format is backend-specific. They can be used to choose the /// controller 3D models to display. /// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles. /// The correct 3D model to display can be decided depending on if skeletal hand tracking data is /// available or not. #[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)] pub struct
XrProfiles
identifier_name
interaction.rs
a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) }
pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false)
random_line_split
recorder_test.go
AndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID)
() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.Match
Len
identifier_name
recorder_test.go
.Interface for byTimeAndName func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil
{ t.Fatal(err) }
conditional_block
recorder_test.go
AndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName func (a byTimeAndName) Len() int
func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.Match
{ return len(a) }
identifier_body
recorder_test.go
func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp
// byTimeAndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName
random_line_split
TransformExpression.ts
are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) { returnValueIterator = this.ensureUpdateListWrapper(this._returnExpr)( dynamicContext, executionParameters ); } const rv = returnValueIterator.next(IterationHint.NONE); if (!rv.ready) { return rv; } // The result of the copy modify expression is the XDM instance returned, as well as a pending update list constructed by merging the pending update lists returned by any of the copy modify expression's copy or return clause operand expressions using upd:mergeUpdates. During evaluation of the return clause, changes applied to copied nodes by the preceding step are visible. return ready({ xdmValue: rv.value.xdmValue, pendingUpdateList: mergeUpdates(rv.value.pendingUpdateList, ...toMergePuls) }); } }; } public performStaticEvaluation(staticContext: StaticContext) { staticContext.introduceScope(); this._variableBindings.forEach( variableBinding => (variableBinding.registeredVariable = staticContext.registerVariable( variableBinding.varRef.namespaceURI, variableBinding.varRef.localName )) ); super.performStaticEvaluation(staticContext); staticContext.removeScope(); // If all of the copy modify expression's copy and return clauses have operand expressions // that are simple expressions, then the copy modify expression is a simple expression. // If any of the copy modify expression's copy or return clauses have operand expressions // that are updating expressions, then the copy modify expression is a updating expression. this.isUpdating = this._variableBindings.some(varBinding => varBinding.sourceExpr.isUpdating) || this._returnExpr.isUpdating; } public
evaluate
identifier_name
TransformExpression.ts
/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready)
// resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator)
{ return mv; }
conditional_block
TransformExpression.ts
/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression )
public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator)
{ super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; }
identifier_body
TransformExpression.ts
/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data);
function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) {
} }
random_line_split
index.js
''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="author">' + '<div class="author-info"><a target="_blank" href="https://juejin.im/user/' + userInfo.user_id + '"><div class="profile-photo" style="' + 'background-image: url(' + userInfo.avatar_large + ');"></div>' + '<a class="author-name" target="_blank" href="http://juejin.im/user/' + userInfo.user_id + '">' + userInfo.user_name + '</a>' + levelPic + '</a>' + '</div>' + '<div class="author-desc">' + '<span>' + userInfo.job_title + userInfo.company + '</span>' + '</div>' + '</div>' + '<div class="other">' + '<div class="price">' + '<a href="https://juejin.im/books/payment/' + item.booklet_id + '">' + '<div class="price-text">¥' + parseInt(info.price) / 100 + '</div>' + '</a>' + tooltip + '</div>' + '<div class="messages">' + '<span class="message">' + '<span>' + info.section_count + '小节</span>' + '</span>' + '<span class="message">' + '<span>' + ' ' + info.buy_count + '</span>' + '<span> 人已购买</span>' + '</span>' + '</div>' + '</div>' + '</div>' + '</div>' + '</object></a>'; brochureList += brochureItem; }); document.querySelector('.brochure-list').innerHTML = brochureList; } // 倒计时 function countdown(endTime) { var time; var timer = setInterval(function () { const msec = endTime - +new Date(); if (msec > 0) { // 计算时分秒数 let day = parseInt(msec / 1000 / 60 / 60 / 24); let hr = parseInt((msec / 1000 /
60 / 60)
identifier_name
index.js
xOf(className) >= 0) { titles.forEach(function (item) { item.classList.remove('active'); }); topic.style.display = 'none'; ad.style.display = 'none'; homepage.style.display = 'none'; brochureSubtitleBox.style.display = 'none'; homePageSubtitleBox.style.display = 'none'; brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="
; if (className && classNameArr.inde
conditional_block
index.js
brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</
' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="
div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count +
identifier_body
index.js
brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data);
console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="
}, error: function (error) {
random_line_split
futures.rs
Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; // Run the runnable. let index = *runnable.metadata(); runnable.run(); // If the runnable finished with a result, call the callback. let mut active_guard = state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().unwrap(); if let Some(state) = active_tasks.get(index) { if state.is_finished() { // Take out the state and provide it to the caller. let result = match active_tasks.remove(index) { Active::Finished(result) => result, _ => unreachable!(), }; callback(result, &mut ()); } } } clear_readiness }; // Clear the readiness of the ping source if there are no more runnables. if clear_readiness { self.ping .process_events(readiness, token, |(), &mut ()| {}) .map_err(ExecutorError::WakeError)?; } Ok(PostAction::Continue) } fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> { self.ping.register(poll, token_factory)?; Ok(()) } fn reregister( &mut self, poll: &mut Poll, token_factory: &mut TokenFactory, ) -> crate::Result<()> { self.ping.reregister(poll, token_factory)?; Ok(()) } fn
unregister
identifier_name
futures.rs
::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, }; /// A future executor as an event source #[derive(Debug)] pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)>
)) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; //
{ let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state },
identifier_body
futures.rs
::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, };
pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; //
/// A future executor as an event source #[derive(Debug)]
random_line_split
futures.rs
::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, }; /// A future executor as an event source #[derive(Debug)] pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable)
// If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; //
{ // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); }
conditional_block
manifests.go
/sql" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository
err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon
random_line_split
manifests.go
" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance
_, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of
{ err = j.performManifestSync(*account, repo) if err != nil { return err } }
conditional_block
manifests.go
" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor)
() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it
SyncManifestsInNextRepo
identifier_name
manifests.go
" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error)
} //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon
{ defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err
identifier_body
path_through.rs
} async fn do_forget(&self, forgets: &[Forget]) { let mut inodes = self.inodes.lock().await; for forget in forgets { if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) { let refcount = { let mut inode = entry.get_mut().lock().await; inode.refcount = inode.refcount.saturating_sub(forget.nlookup()); inode.refcount }; if refcount == 0 { tracing::debug!("remove ino={}", entry.key()); drop(entry.remove()); } } } } async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> { let file = match op.fh() { Some(fh) => { let files = self.files.lock().await; files.get(fh as usize).cloned() } None => None, }; let mut file = match file { Some(ref file) => { let mut file = file.lock().await; file.file.sync_all().await?; Some(file) // keep file lock } None => None, }; let inode = { let inodes = self.inodes.lock().await; inodes.get(op.ino()).ok_or_else(no_entry)? }; let inode = inode.lock().await; let path = Arc::new(self.source.join(&inode.path)); enum FileRef<'a> { Borrowed(&'a mut File), Owned(File), } impl AsMut<File> for FileRef<'_> { fn as_mut(&mut self) -> &mut File { match self { Self::Borrowed(file) => file, Self::Owned(file) => file, } } } let mut file = match file { Some(ref mut file) => FileRef::Borrowed(&mut file.file), None => FileRef::Owned(File::open(&*path).await?), }; // chmod if let Some(mode) = op.mode() { let perm = std::fs::Permissions::from_mode(mode); file.as_mut().set_permissions(perm).await?; } // truncate if let Some(size) = op.size() { file.as_mut().set_len(size).await?; } // chown match (op.uid(), op.gid()) { (None, None) => (), (uid, gid) => { let path = path.clone(); let uid = uid.map(nix::unistd::Uid::from_raw); let gid = gid.map(nix::unistd::Gid::from_raw); tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid)) .await? .map_err(nix_to_io_error)?; } } // TODO: utimes let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; tokio::fs::read_link(self.source.join(&inode.path)).await } async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let dir = DirHandle { read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?, last_entry: None, offset: 1, }; let mut dirs = self.dirs.lock().await; let key = dirs.insert(Arc::new(Mutex::new(dir))); Ok(ReplyOpen::new(key as u64)) } async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> { let dirs = self.dirs.lock().await; let dir = dirs .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut dir = dir.lock().await; let dir = &mut *dir; let mut entries = vec![]; let mut total_len = 0; if let Some(mut entry) = dir.last_entry.take() { if total_len + entry.as_ref().len() > op.size() as usize { return Err(io::Error::from_raw_os_error(libc::ERANGE)); } entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } while let Some(entry) = dir.read_dir.next_entry().await? { match entry.file_name() { name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue, _ => (), } let metadata = entry.metadata().await?; let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0); if total_len + entry.as_ref().len() <= op.size() as usize { entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } else { dir.last_entry.replace(entry); } } Ok(entries) } async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> { let mut dirs = self.dirs.lock().await; let dir = dirs.remove(op.fh() as usize); drop(dir); Ok(()) } async fn do_open(&self, op: &op::Open<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let options = OpenOptions::from({ let mut options = std::fs::OpenOptions::new(); match op.flags() as i32 & libc::O_ACCMODE { libc::O_RDONLY => { options.read(true); } libc::O_WRONLY => { options.write(true); } libc::O_RDWR => { options.read(true).write(true); } _ => (), } options.custom_flags(op.flags() as i32 & !libc::O_NOFOLLOW); options }); let file = FileHandle { file: options.open(self.source.join(&inode.path)).await?, }; let mut files = self.files.lock().await; let key = files.insert(Arc::new(Mutex::new(file))); Ok(ReplyOpen::new(key as u64)) } async fn do_read(&self, op: &op::Read<'_>) -> io::Result<impl Reply> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; let mut buf = Vec::<u8>::with_capacity(op.size() as usize); use tokio::io::AsyncReadExt; tokio::io::copy(&mut file.take(op.size() as u64), &mut buf).await?; Ok(buf) } async fn do_write<R: ?Sized>( &self, op: &op::Write<'_>, reader: &mut R, ) -> io::Result<ReplyWrite> where R: Reader + Unpin, { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; // At here, the data is transferred via the temporary buffer due to // the incompatibility between the I/O abstraction in `futures` and // `tokio`. // // In order to efficiently transfer the large files, both of zero // copying support in `polyfuse` and resolution of impedance mismatch // between `futures::io` and `tokio::io` are required.
let mut buf = Vec::with_capacity(op.size() as usize);
random_line_split
path_through.rs
(&self, path: &Path) -> Option<Arc<Mutex<INode>>> { let ino = self.path_to_ino.get(path).copied()?; self.get(ino) } } struct VacantEntry<'a> { table: &'a mut INodeTable, ino: Ino, } impl VacantEntry<'_> { fn insert(mut self, inode: INode) { let path = inode.path.clone(); self.table.map.insert(self.ino, Arc::new(Mutex::new(inode))); self.table.path_to_ino.insert(path, self.ino); self.table.next_ino += 1; } } struct DirHandle { read_dir: ReadDir, last_entry: Option<DirEntry>, offset: u64, } struct FileHandle { file: File, } struct PathThrough { source: PathBuf, inodes: Mutex<INodeTable>, dirs: Mutex<Slab<Arc<Mutex<DirHandle>>>>, files: Mutex<Slab<Arc<Mutex<FileHandle>>>>, } impl PathThrough { fn new(source: PathBuf) -> io::Result<Self> { let source = source.canonicalize()?; let mut inodes = INodeTable::new(); inodes.vacant_entry().insert(INode { ino: 1, path: PathBuf::new(), refcount: u64::max_value() / 2, }); Ok(Self { source, inodes: Mutex::new(inodes), dirs: Mutex::default(), files: Mutex::default(), }) } fn make_entry_out(&self, ino: Ino, attr: FileAttr) -> io::Result<ReplyEntry> { let mut reply = ReplyEntry::default(); reply.ino(ino); reply.attr(attr); Ok(reply) } async fn get_attr(&self, path: impl AsRef<Path>) -> io::Result<FileAttr> { let metadata = tokio::fs::symlink_metadata(self.source.join(path)).await?; metadata .try_into() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } async fn do_lookup(&self, op: &op::Lookup<'_>) -> io::Result<ReplyEntry> { let mut inodes = self.inodes.lock().await; let parent = inodes.get(op.parent()).ok_or_else(no_entry)?; let parent = parent.lock().await; let path = parent.path.join(op.name()); let metadata = self.get_attr(&path).await?; let ino; match inodes.get_path(&path) { Some(inode) => { let mut inode = inode.lock().await; ino = inode.ino; inode.refcount += 1; } None => { let entry = inodes.vacant_entry(); ino = entry.ino; entry.insert(INode { ino, path, refcount: 1, }) } } self.make_entry_out(ino, metadata) } async fn do_forget(&self, forgets: &[Forget]) { let mut inodes = self.inodes.lock().await; for forget in forgets { if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) { let refcount = { let mut inode = entry.get_mut().lock().await; inode.refcount = inode.refcount.saturating_sub(forget.nlookup()); inode.refcount }; if refcount == 0 { tracing::debug!("remove ino={}", entry.key()); drop(entry.remove()); } } } } async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> { let file = match op.fh() { Some(fh) => { let files = self.files.lock().await; files.get(fh as usize).cloned() } None => None, }; let mut file = match file { Some(ref file) => { let mut file = file.lock().await; file.file.sync_all().await?; Some(file) // keep file lock } None => None, }; let inode = { let inodes = self.inodes.lock().await; inodes.get(op.ino()).ok_or_else(no_entry)? }; let inode = inode.lock().await; let path = Arc::new(self.source.join(&inode.path)); enum FileRef<'a> { Borrowed(&'a mut File), Owned(File), } impl AsMut<File> for FileRef<'_> { fn as_mut(&mut self) -> &mut File { match self { Self::Borrowed(file) => file, Self::Owned(file) => file, } } } let mut file = match file { Some(ref mut file) => FileRef::Borrowed(&mut file.file), None => FileRef::Owned(File::open(&*path).await?), }; // chmod if let Some(mode) = op.mode() { let perm = std::fs::Permissions::from_mode(mode); file.as_mut().set_permissions(perm).await?; } // truncate if let Some(size) = op.size() { file.as_mut().set_len(size).await?; } // chown match (op.uid(), op.gid()) { (None, None) => (), (uid, gid) => { let path = path.clone(); let uid = uid.map(nix::unistd::Uid::from_raw); let gid = gid.map(nix::unistd::Gid::from_raw); tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid)) .await? .map_err(nix_to_io_error)?; } } // TODO: utimes let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; tokio::fs::read_link(self.source.join(&inode.path)).await } async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let dir = DirHandle { read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?, last_entry: None, offset: 1, }; let mut dirs = self.dirs.lock().await; let key = dirs.insert(Arc::new(Mutex::new(dir))); Ok(ReplyOpen::new(key as u64)) } async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> { let dirs = self.dirs.lock().await; let dir = dirs .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut dir = dir.lock().await; let dir = &mut *dir; let mut entries = vec![]; let mut total_len = 0; if let Some(mut entry) = dir.last_entry.take() { if total_len + entry.as_ref().len() > op.size() as usize { return Err(io::Error::from_raw_os_error(libc::ERANGE)); } entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } while let Some(entry) = dir.read_dir.next_entry().await? { match entry.file_name() { name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue, _ => (), } let metadata = entry.metadata().await?; let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0); if total_len + entry.as_ref().len() <= op.size() as usize { entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } else { dir.last_entry.replace(entry); } } Ok(entries) } async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> { let mut dirs = self.dirs.lock().await; let dir = dirs
get_path
identifier_name
vvMakeVjetsShapes.py
histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys(): keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]} if "Zjets" in histos.keys() and "TTbar" in histos.keys(): params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"],'ratio_TT': scales["TTbar"]/scales_nonRes["TTbar"]} fitter.drawVjets("Vjets_mjetRes_"+leg+"_"+purity+".pdf",histos,histos_nonRes,scales,scales_nonRes) del histos,histos_nonRes,fitter,fitterZ graphs={} projections=[[1,3],[4,6],[7,10],[11,15],[16,20],[21,26],[27,35],[36,50],[51,61],[62,75],[76,80]] for key in keys: graphs[key]=ROOT.TGraphErrors() n=0 for p in projections: i1 = histos2D[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2 = histos2D_nonRes_l2[key].ProjectionY("tmp2",p[0],p[1]).Integral() i1_l2 = histos2D_l2[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2_l2 = histos2D_nonRes[key].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.) if (key=="Wjets") and ("TTbar" in histos2D.keys()): norm = histos2D["TTbar"].Integral()/histos2D["Wjets"].Integral() tt_i1 = histos2D["TTbar"].ProjectionY("tmp1",p[0],p[1]).Integral()*norm tt_i2 = histos2D_nonRes_l2["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() tt_i1_l2 = histos2D_l2["TTbar"].ProjectionY("tmp1",p[0],p[1]) .Integral()*norm tt_i2_l2 = histos2D_nonRes["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.+(tt_i1/tt_i2 + tt_i1_l2/tt_i2_l2)/2.) err = ROOT.TMath.Sqrt(pow(ROOT.TMath.Sqrt(i1)/i2 + ROOT.TMath.Sqrt(i2)*i1/(i2*i2),2)+pow(ROOT.TMath.Sqrt(i1_l2)/i2_l2 + ROOT.TMath.Sqrt(i2_l2)*i1_l2/(i2_l2*i2_l2),2)) graphs[key].SetPointError(n,0,err) print "set point errors "+str(err) n+=1 func=ROOT.TF1("pol","pol6",55,215) func2=ROOT.TF1("pol","pol6",55,215) l="ratio" for key in graphs.keys(): if key.find("Z")!=-1: l="ratio_Z" if key.find("T")!=-1: l="ratio_TT" if key.find("W")!=-1: l="ratio" if key.find("W")!=-1: graphs[key].Fit(func) st = returnString(func,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st else: graphs[key].Fit(func2) st = returnString(func2,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func2,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st graphs[key].SetMarkerColor(ROOT.kBlack) graphs[key].SetMarkerStyle(1) graphs[key].SetMarkerColor(ROOT.kBlue) graphs[key].SetMarkerStyle(2) graphs[key].GetXaxis().SetTitle("m_{jet1}") graphs[key].GetYaxis().SetTitle("res/nonRes") graphs[key].GetFunction("pol").SetLineColor(ROOT.kBlack) graphs[key].GetXaxis().SetRangeUser(55,215) graphs[key].SetMinimum(0) c =getCanvas("c") graphs["Wjets"].Draw("AP") graphs["Wjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Wjets"],"ratio W+jets + t#bar{t}","lp") legend.AddEntry(graphs["Wjets"].GetFunction("pol"),"fit ","lp") legend.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Wjets.pdf") if 'Zjets' in graphs.keys(): c = getCanvas("zjets") graphs["Zjets"].Draw("AP") graphs["Zjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0)
random_line_split
vvMakeVjetsShapes.py
.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name):
label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions:
c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c
identifier_body
vvMakeVjetsShapes.py
(func,ftype,varname): if ftype.find("pol")!=-1: st='0' for i in range(0,func.GetNpar()): st=st+"+("+str(func.GetParameter(i))+")"+("*{varname}".format(varname=varname)*i) return st else: return "" def doFit(fitter,histo,histo_nonRes,label,leg): params={} print "fitting "+histo.GetName()+" contribution " exp = ROOT.TF1("gaus" ,"gaus",55,215) histo_nonRes.Fit(exp,"R") gauss = ROOT.TF1("gauss" ,"gaus",74,94) if histo.GetName().find("Z")!=-1: gauss = ROOT.TF1("gauss","gaus",80,100) histo.Fit(gauss,"R") mean = gauss.GetParameter(1) sigma = gauss.GetParameter(2) print "____________________________________" print "mean "+str(mean) print "sigma "+str(sigma) print "set paramters of double CB constant aground the ones from gaussian fit" fitter.w.var("mean").setVal(mean) fitter.w.var("mean").setConstant(1) #fitter.w.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name): c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("H
returnString
identifier_name
vvMakeVjetsShapes.py
.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name): c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys():
keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]}
conditional_block
reset.go
(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return nil, doltdb.Roots{}, err } headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB
resetHardTables
identifier_name
reset.go
HardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return nil, doltdb.Roots{}, err } headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error)
// IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB
{ newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil }
identifier_body
reset.go
HardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil
headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB
{ return nil, doltdb.Roots{}, err }
conditional_block
reset.go
nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() // All other errors merely indicate an invalid ref spec. // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return false, nil } headRef, err := rsr.CWBHeadRef() if err == doltdb.ErrOperationNotSupportedInDetachedHead { // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. headRef = nil } else if err != nil { return false, err } _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return false, nil } return true, nil } // CleanUntracked deletes untracked tables from the working root. // Evaluates untracked tables as: all working tables - all staged tables. func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dryrun bool, force bool) (doltdb.Roots, error) { untrackedTables := make(map[string]struct{}) var err error if len(tables) == 0 { tables, err = roots.Working.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, nil } } for i := range tables { name := tables[i] _, _, err = roots.Working.GetTable(ctx, name) if err != nil { return doltdb.Roots{}, err } untrackedTables[name] = struct{}{} } // untracked tables = working tables - staged tables headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, err } for _, name := range headTblNames { delete(untrackedTables, name) } newRoot := roots.Working var toDelete []string for t := range untrackedTables { toDelete = append(toDelete, t) }
newRoot, err = newRoot.RemoveTables(ctx, force, force, toDelete...) if err != nil { return doltdb.Roots{}, fmt.Errorf("failed to remove tables; %w", err)
random_line_split
clickhouse_output.go
descMap[c] = "" } for rows.Next() { values := make([]interface{}, 0) for range columns { var a string values = append(values, &a) } if err := rows.Scan(values...); err != nil { glog.Fatalf("scan rows error: %s", err) } descMap := make(map[string]string) for i, c := range columns { value := *values[i].(*string) if c == "type" { // 特殊处理枚举类型 if strings.HasPrefix(value, "Enum16") { value = "Enum16" } else if strings.HasPrefix(value, "Enum8") { value = "Enum8" } } descMap[c] = value } b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s
{ nextdb := c.dbSelector.Next() db := nextdb.(*sql.DB) rows, err := db.Query(query) if err != nil { glog.Errorf("query %q error: %s", query, err) continue } defer rows.Close() columns, err := rows.Columns() if err != nil { glog.Fatalf("could not get columns from query `%s`: %s", query, err) } glog.V(10).Infof("desc table columns: %v", columns) descMap := make(map[string]string) for _, c := range columns {
conditional_block
clickhouse_output.go
} b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbNam
lt" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields
e := "defau
identifier_name
clickhouse_output.go
} b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: c
if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields,
onfig, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") }
identifier_body
clickhouse_output.go
}
} rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields, ","
b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err)
random_line_split
privacy-statement.js
<td> any applicable law relating to the processing of personal Data, including but not limited to the Directive 96/46/EC (Data Protection Directive) or the GDPR, and any national implementing laws, regulations and secondary legislation, for as long as the GDPR is effective in the UK; </td> </tr> <tr> <th scope="row">GDPR</th> <td>the General Data Protection Regulation (EU) 2016/679;</td> </tr> <tr> <th scope="row">Jugendstil Ltd, we or us</th> <td> Jugendstil Ltd, a company incorporated in England and Wales with registered number 11226642 whose registered office is at 20-22 Wenlock Road, N1 7GU London; </td> </tr> <tr> <th scope="row">UK and EU Cookie Law</th> <td> the Privacy and Electronic Communications (EC Directive) Regulations 2003 as amended by the Privacy and Electronic Communications (EC Directive) (Amendment) Regulations 2011; </td> </tr> <tr> <th scope="row">User or you</th> <td> any third party that accesses the Website and is not either (i) employed by Jugendstil Ltd and acting in the course of their employment or (ii) engaged as a consultant or otherwise providing services to Jugendstil Ltd and accessing the Website in connection with the provision of such services; and </td> </tr> <tr> <th scope="row">Website</th> <td> the website that you are currently using, https://jugendstil.io, and any sub-domains of this site unless expressly excluded by their own terms and conditions. </td> </tr> </tbody> </table> <p> In this privacy policy, unless the context requires a different interpretation: </p> <ol> <li>the singular includes the plural and vice versa;</li> <li> references to sub-clauses, clauses, schedules or appendices are to sub-clauses, clauses, schedules or appendices of this privacy policy; </li> <li> a reference to a person includes firms, companies, government entities, trusts and partnerships; </li> <li> "including" is understood to mean "including without limitation"; </li> <li> reference to any statutory provision includes any modification or amendment of it; </li> <li> the headings and sub-headings do not form part of this privacy policy. </li> </ol> <h2>Scope of this privacy policy</h2> <p> This privacy policy applies only to the actions of Jugendstil Ltd and Users with respect to this Website. It does not extend to any websites that can be accessed from this Website including, but not limited to, any links we may provide to social media websites. </p> <p> For purposes of the applicable Data Protection Laws, Jugendstil Ltd is the "data controller". This means that Jugendstil Ltd determines the purposes for which, and the manner in which, your Data is processed. </p> <h2>Data collected</h2> <p> We may collect the following Data, which includes personal Data, from you: </p> <ol> <li>name;</li> <li>date of birth;</li> <li>gender;</li> <li>job title;</li> <li>profession;</li> <li> contact Information such as email addresses and telephone numbers; </li> <li> demographic information such as postcode, preferences and interests; </li> <li>financial information such as credit / debit card numbers;</li> <li>IP address (automatically collected);</li> <li>web browser type and version (automatically collected);</li> <li>operating system (automatically collected);</li> <li> a list of URLs starting with a referring site, your activity on this Website, and the site you exit to (automatically collected); </li> </ol> <p>in each case, in accordance with this privacy policy.</p> <h2>How we collect Data</h2> <p>We collect Data in the following ways:</p> <ol> <li>data is given to us by you; and</li> <li>data is collected automatically.</li> </ol> <h2>Data that is given to us by you</h2> <p> Jugendstil Ltd will collect your Data in a number of ways, for example: </p> <ol> <li> when you contact us through the Website, by telephone, post, e-mail or through any other means; </li> <li> when you register with us and set up an account to receive our products/services; </li> <li> when you complete surveys that we use for research purposes (although you are not obliged to respond to them); </li> <li> when you enter a competition or promotion through a social media channel; </li> <li> when you make payments to us, through this Website or otherwise; </li> <li>when you elect to receive marketing communications from us;</li> <li>when you use our services;</li> </ol> <p>in each case, in accordance with this privacy policy.</p> <h2>Data that is collected automatically</h2> <p> To the extent that you access the Website, we will collect your Data automatically, for example: </p> <ol> <li> we automatically collect some information about your visit to the Website. This information helps us to make improvements to Website content and navigation, and includes your IP address, the date, times and frequency with which you access the Website and the way you use and interact with its content. </li> <li> we will collect your Data automatically via cookies, in line with the cookie settings on your browser. For more information about cookies, and how we use them on the Website, see the section below, headed "Cookies". </li> </ol> <h2>Our use of Data</h2> <p> Any or all of the above Data may be required by us from time to time in order to provide you with the best possible service and experience when using our Website. Specifically, Data may be used by us for the following reasons: </p> <ol> <li>internal record keeping;</li> <li>improvement of our products / services;</li> <li> transmission by email of marketing materials that may be of interest to you; </li> <li> contact for market research purposes which may be done using email, telephone, fax or mail. Such information may be used to customise or update the Website; </li> </ol> <p>in each case, in accordance with this privacy policy.</p> <p> We may use your Data for the above purposes if we deem it necessary to do so for our legitimate interests. If you are not satisfied with this, you have the right to object in certain circumstances (see the section headed "Your rights" below). </p> p> <p> For the delivery of direct marketing to you via e-mail, we'll need your consent, whether via an opt-in or soft-opt-in: </p> <ol> <li> soft opt-in consent is a specific type of consent which applies when you have previously engaged with us (for example, you contact us to ask us for more details about a particular product/service, and we are marketing similar products/services). Under "soft opt-in" consent, we will take your consent as given unless you opt-out. </li> <li> for other types of e-marketing, we are required to obtain your explicit consent; that is, you need to take positive and affirmative action when consenting by, for example, checking a tick box that we'll provide. </li> <li> if you are not satisfied about our approach to marketing, you have the right to withdraw consent at any time. To find out how to withdraw your consent, see the section headed "Your rights" below. </li> </ol> <p> When you
by this Website are set out in the clause below (Cookies); </td> </tr> <tr> <th scope="row">Data Protection Laws</th>
random_line_split
lib.rs
fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create_section_reader(section_id, bv, endian, false) }; let dwarf = Dwarf::load(&section_reader).unwrap(); let mut iter = dwarf.units(); while let Some(header) = iter.next().unwrap() { let unit = dwarf.unit(header).unwrap(); let mut entries = unit.entries(); let mut depth = 0; if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() { depth += delta_depth; assert!(depth >= 0); let mut tree = unit.entries_tree(Some(entry.offset())).unwrap(); let root = tree.root().unwrap(); process_tree(&view, &dwarf, &unit, &graph, &graph_root, root); } } view.show_graph_report("DWARF", graph); } struct DWARFDump; impl Command for DWARFDump { fn action(&self, view: &BinaryView) { dump_dwarf(view); } fn valid(&self, view: &BinaryView) -> bool { is_valid(view) } } #[no_mangle] pub extern "C" fn UIPluginInit() -> bool
{ register( "DWARF Dump", "Show embedded DWARF info as a tree structure for you to navigate", DWARFDump {}, ); true }
identifier_body
lib.rs
See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value()
else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create
{ let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); }
conditional_block
lib.rs
// See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian
// distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
random_line_split
lib.rs
See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn
<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create
get_info_string
identifier_name
tree.py
#If there are 3 x's in a row, x's win def win_state_x(self): if self.three_in_a_row('x') and not(self.three_in_a_row('o')): return True return False #If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1:
for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: #horizontally return True if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == player: #vertically return True if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == player: #diagonally return True if self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == player: #diagonally return True return False
identifier_body
tree.py
If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def
(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10
valid_position
identifier_name
tree.py
#If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it
elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10
#Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8
random_line_split
tree.py
#If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0:
elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10
two_in_a_row = currentNode.tictactoe.two_in_a_row('o')
conditional_block
uploadSourcemap.ts
Promise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor()
action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger
{ this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ],
identifier_body
uploadSourcemap.ts
minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor() { this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static
validateOnCLIInput
identifier_name
uploadSourcemap.ts
AsPromise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor() { this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = '';
if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger
random_line_split
uploadSourcemap.ts
the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static validateOnCLIInput(cmd: UploadSourcemapCMDParams | string) { if (typeof cmd === 'string') { throw new ValidationError(`Invalid args params: '${cmd}'`); } if (!cmd.apiKey || !cmd.apiSecret || !cmd.release || !cmd.minifiedDir || !cmd.projectRoot) { throw new ValidationError('Command params (apiKey, apiSecret,' + ' release, projectRoot and minifiedDir) are required'); } if (!fs.existsSync(cmd.minifiedDir)) { throw new ValidationError('Command param `minifiedDir` is not exists'); } if (!fs.lstatSync(cmd.minifiedDir).isDirectory()) { throw new ValidationError('Command param `minifiedDir` should be directory path'); } const urlRegexValidator = urlRegex({exact: true, strict: false}); if (!urlRegexValidator.test(cmd.projectRoot)) { throw new ValidationError('Invalid `project-root`.' + ' You should add valid url like `http://your-domain.com`'); } if (cmd.endPoint && !urlRegexValidator.test(cmd.endPoint))
{ throw new ValidationError('Invalid `end-point`.' + ' You should add valid url like `http://your-domain.com`'); }
conditional_block
decl.go
untyped numeric constants, make sure the value // representation matches what the rest of the // compiler (really just iexport) expects. // TODO(mdempsky): Revisit after #43891 is resolved. val := obj.(*types2.Const).Val() switch name.Type() { case types.UntypedInt, types.UntypedRune: val = constant.ToInt(val) case types.UntypedFloat: val = constant.ToFloat(val) case types.UntypedComplex: val = constant.ToComplex(val) } name.SetVal(val) out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name)) } } func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) { assert(g.curDecl == "") // Set g.curDecl to the function name, as context for the type params declared // during types2-to-types1 translation if this is a generic function. g.curDecl = decl.Name.Value obj2 := g.info.Defs[decl.Name] recv := types2.AsSignature(obj2.Type()).Recv() if recv != nil { t2 := deref2(recv.Type()) // This is a method, so set g.curDecl to recvTypeName.methName instead. g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl } fn := ir.NewFunc(g.pos(decl)) fn.Nname, _ = g.def(decl.Name) fn.Nname.Func = fn fn.Nname.Defn = fn fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas) if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 { base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined") } if fn.Pragma&ir.Nointerface != 0 { // Propagate //go:nointerface from Func.Pragma to Field.Nointerface. // This is a bit roundabout, but this is the earliest point where we've // processed the function's pragma flags, and we've also already created // the Fields to represent the receiver's method set. if recv := fn.Type().Recv(); recv != nil { typ := types.ReceiverBaseType(recv.Type) if orig := typ.OrigType(); orig != nil { // For a generic method, we mark the methods on the // base generic type, since those are the methods // that will be stenciled. typ = orig } meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0) meth.SetNointerface(true) } } if decl.Body != nil && fn.Pragma&ir.Noescape != 0 { base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations") } if decl.Name.Value == "init" && decl.Recv == nil { g.target.Inits = append(g.target.Inits, fn) } saveHaveEmbed := g.haveEmbed saveCurDecl := g.curDecl g.curDecl = "" g.later(func() { defer func(b bool, s string) { // Revert haveEmbed and curDecl back to what they were before // the "later" function. g.haveEmbed = b g.curDecl = s }(g.haveEmbed, g.curDecl) // Set haveEmbed and curDecl to what they were for this funcDecl. g.haveEmbed = saveHaveEmbed g.curDecl = saveCurDecl if fn.Type().HasTParam() { g.topFuncIsGeneric = true } g.funcBody(fn, decl.Recv, decl.Type, decl.Body) g.topFuncIsGeneric = false if fn.Type().HasTParam() && fn.Body != nil { // Set pointers to the dcls/body of a generic function/method in // the Inl struct, so it is marked for export, is available for // stenciling, and works with Inline_Flood(). fn.Inl = &ir.Inline{ Cost: 1, Dcl: fn.Dcl, Body: fn.Body, } } out.Append(fn) }) } func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) { // Set the position for any error messages we might print (e.g. too large types). base.Pos = g.pos(decl) assert(ir.CurFunc != nil || g.curDecl == "") // Set g.curDecl to the type name, as context for the type params declared // during types2-to-types1 translation if this is a generic type. saveCurDecl := g.curDecl g.curDecl = decl.Name.Value if decl.Alias { name, _ := g.def(decl.Name) g.pragmaFlags(decl.Pragma, 0) assert(name.Alias()) // should be set by irgen.obj out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name)) g.curDecl = "" return } // Prevent size calculations until we set the underlying type. types.DeferCheckSize() name, obj := g.def(decl.Name) ntyp, otyp := name.Type(), obj.Type() if ir.CurFunc != nil { ntyp.SetVargen() } pragmas := g.pragmaFlags(decl.Pragma, typePragmas) name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed? if pragmas&ir.NotInHeap != 0 { ntyp.SetNotInHeap(true) } // We need to use g.typeExpr(decl.Type) here to ensure that for // chained, defined-type declarations like: // // type T U // // //go:notinheap // type U struct { … } // // we mark both T and U as NotInHeap. If we instead used just // g.typ(otyp.Underlying()), then we'd instead set T's underlying // type directly to the struct type (which is not marked NotInHeap) // and fail to mark T as NotInHeap. // // Also, we rely here on Type.SetUnderlying allowing passing a // defined type and handling forward references like from T to U // above. Contrast with go/types's Named.SetUnderlying, which // disallows this. // // [mdempsky: Subtleties like these are why I always vehemently // object to new type pragmas.] ntyp.SetUnderlying(g.typeExpr(decl.Type)) tparams := otyp.(*types2.Named).TypeParams() if n := tparams.Len(); n > 0 { rparams := make([]*types.Type, n) for i := range rparams { rparams[i] = g.typ(tparams.At(i)) } // This will set hasTParam flag if any rparams are not concrete types. ntyp.SetRParams(rparams) } types.ResumeCheckSize() g.curDecl = saveCurDecl if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 { methods := make([]*types.Field, otyp.NumMethods()) for i := range methods { m := otyp.Method(i) // Set g.curDecl to recvTypeName.methName, as context for the // method-specific type params in the receiver. g.curDecl = decl.Name.Value + "." + m.Name() meth := g.obj(m) methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type()) methods[i].Nname = meth g.curDecl = "" } ntyp.Methods().Set(methods) } out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name)) } func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) { pos := g.pos(decl) // Set the position for any error messages we might print (e.g. too large types). base.Pos = pos names := make([]*ir.Name, len(decl.NameList)) for i, name := range decl.NameList { names[i], _ = g.def(name) } if decl.Pragma != nil { pragma := decl.Pragma.(*pragmas) varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed) g.reportUnused(pragma) } haveEmbed := g.haveEmbed do := func() { defer func(b bool) { g.haveEmbed = b }(g.haveEmbed) g.haveEmbed = haveEmbed values := g.exprList(decl.Values) var as2 *ir.AssignListStmt if len(values) != 0 && len(names) != len(values) {
as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values) }
conditional_block