file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
resp.go
return "Integer" case '$': return "BulkString" case '*': return "Array" case 'R': return "RDB" } } // Value represents the data of a valid RESP type. type Value struct { Typ Type IntegerV int Str []byte ArrayV []Value Null bool RDB bool Size int } func (v Value) ReplInfo() (runID string, offset int64) { if v.Type() != Rdb { return } buf := bytes.Split(v.Str, []byte(" ")) if len(buf) < 3 { return } _offset, err := strconv.ParseInt(string(buf[2]), 10, 64) if err != nil
return string(buf[1]), _offset } // Integer converts Value to an int. If Value cannot be converted, Zero is returned. func (v Value) Integer() int { switch v.Typ { default: n, _ := strconv.ParseInt(v.String(), 10, 64) return int(n) case ':': return v.IntegerV } } // String converts Value to a string. func (v Value) String() string { if v.Typ == '$' { return string(v.Str) } switch v.Typ { case '+', '-': return string(v.Str) case ':': return strconv.FormatInt(int64(v.IntegerV), 10) case '*': buf := bytes.NewBuffer(nil) concatArray(buf, v.ArrayV...) return strings.TrimSuffix(buf.String(), " ") case '\r': return "\r\n" } return "" } func concatArray(wr io.Writer, vs ...Value) { for i := range vs { _, err := wr.Write([]byte(vs[i].String())) if err != nil { panic(err) } _, err = wr.Write([]byte("\r\n")) if err != nil { panic(err) } concatArray(wr, vs[i].Array()...) } } // Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array. // If it's a RESP Null value, nil is returned. func (v Value) Bytes() []byte { switch v.Typ { default: return []byte(v.String()) case '$', '+', '-': return v.Str } } // Float converts Value to a float64. If Value cannot be converted // Zero is returned. func (v Value) Float() float64 { switch v.Typ { default: f, _ := strconv.ParseFloat(v.String(), 64) return f case ':': return float64(v.IntegerV) } } // IsNull indicates whether or not the base value is null. func (v Value) IsNull() bool { return v.Null } // Bool converts Value to an bool. If Value cannot be converted, false is returned. func (v Value) Bool() bool { return v.Integer() != 0 } // Error converts the Value to an error. If Value is not an error, nil is returned. func (v Value) Error() error { switch v.Typ { case '-': return errors.New(string(v.Str)) } return nil } // Array converts the Value to a an array. // If Value is not an array or when it's is a RESP Null value, nil is returned. func (v Value) Array() []Value { if v.Typ == '*' && !v.Null { return v.ArrayV } return nil } // Type returns the underlying RESP type. // The following types are represent valid RESP values. func (v Value) Type() Type { return v.Typ } func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) { bb := make([]byte, 3+len(b)) bb[0] = byte(typ) copy(bb[1:], b) bb[1+len(b)+0] = '\r' bb[1+len(b)+1] = '\n' return bb, nil } func marshalBulkRESP(v Value) ([]byte, error) { if v.Null { return []byte("$-1\r\n"), nil } szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10)) bb := make([]byte, 5+len(szb)+len(v.Str)) bb[0] = '$' copy(bb[1:], szb) bb[1+len(szb)+0] = '\r' bb[1+len(szb)+1] = '\n' copy(bb[1+len(szb)+2:], v.Str) bb[1+len(szb)+2+len(v.Str)+0] = '\r' bb[1+len(szb)+2+len(v.Str)+1] = '\n' return bb, nil } func marshalArrayRESP(v Value) ([]byte, error) { if v.Null { return []byte("*-1\r\n"), nil } szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10)) var buf bytes.Buffer buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer buf.WriteByte('*') buf.Write(szb) buf.WriteByte('\r') buf.WriteByte('\n') for i := 0; i < len(v.ArrayV); i++ { data, err := v.ArrayV[i].MarshalRESP() if err != nil { return nil, err } buf.Write(data) } return buf.Bytes(), nil } func marshalAnyRESP(v Value) ([]byte, error) { switch v.Typ { default: if v.Typ == 0 && v.Null { return []byte("$-1\r\n"), nil } return nil, errors.New("unknown resp type encountered") case '-', '+': return marshalSimpleRESP(v.Typ, v.Str) case ':': return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10))) case '$': return marshalBulkRESP(v) case '*': return marshalArrayRESP(v) } } // Equals compares one value to another value. func (v Value) Equals(value Value) bool { data1, err := v.MarshalRESP() if err != nil { return false } data2, err := value.MarshalRESP() if err != nil { return false } return string(data1) == string(data2) } // MarshalRESP returns the original serialized byte representation of Value. // For more information on this format please see http://redis.io/topics/protocol. func (v Value) MarshalRESP() ([]byte, error) { return marshalAnyRESP(v) } var NilValue = Value{Null: true} type ErrProtocol struct{ Msg string } func (err ErrProtocol) Error() string { return "Protocol error: " + err.Msg } // AnyValue returns a RESP value from an interface. // This function infers the types. Arrays are not allowed. func AnyValue(v interface{}) Value { switch v := v.(type) { default: return StringValue(fmt.Sprintf("%v", v)) case nil: return NullValue() case int: return IntegerValue(int(v)) case uint: return IntegerValue(int(v)) case int8: return IntegerValue(int(v)) case uint8: return IntegerValue(int(v)) case int16: return IntegerValue(int(v)) case uint16: return IntegerValue(int(v)) case int32: return IntegerValue(int(v)) case uint32: return IntegerValue(int(v)) case int64: return IntegerValue(int(v)) case uint64: return IntegerValue(int(v)) case bool: return BoolValue(v) case float32: return FloatValue(float64(v)) case float64: return FloatValue(float64(v)) case []byte: return BytesValue(v) case string: return StringValue(v) } } // SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces. func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} } // BytesValue returns a RESP bulk string. A bulk string can represent any data. func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} } // StringValue returns a RESP bulk string. A bulk string can represent any data. func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} } // NullValue returns a RESP null bulk string. func NullValue() Value { return Value{Typ: '$', Null: true} } // ErrorValue returns a RESP error. func ErrorValue(err error) Value { if err == nil { return Value{Typ: '-'} } return Value{Typ: '-', Str: []byte(err.Error())} } // IntegerValue returns a RESP integer. func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} } // BoolValue returns a RESP integer representation of a bool. func BoolValue(t bool) Value { if t { return Value{Typ: ':', IntegerV: 1} } return Value{Typ: ':', IntegerV: 0} } // FloatValue returns a RESP bulk string representation of a float. func FloatValue(f float
{ return }
conditional_block
resp.go
return "Integer" case '$': return "BulkString" case '*': return "Array" case 'R': return "RDB" } } // Value represents the data of a valid RESP type. type Value struct { Typ Type IntegerV int Str []byte ArrayV []Value Null bool RDB bool Size int } func (v Value) ReplInfo() (runID string, offset int64) { if v.Type() != Rdb { return } buf := bytes.Split(v.Str, []byte(" ")) if len(buf) < 3 { return } _offset, err := strconv.ParseInt(string(buf[2]), 10, 64) if err != nil { return } return string(buf[1]), _offset } // Integer converts Value to an int. If Value cannot be converted, Zero is returned. func (v Value) Integer() int { switch v.Typ { default: n, _ := strconv.ParseInt(v.String(), 10, 64) return int(n) case ':': return v.IntegerV } } // String converts Value to a string. func (v Value) String() string { if v.Typ == '$' { return string(v.Str) } switch v.Typ { case '+', '-': return string(v.Str) case ':': return strconv.FormatInt(int64(v.IntegerV), 10) case '*': buf := bytes.NewBuffer(nil) concatArray(buf, v.ArrayV...) return strings.TrimSuffix(buf.String(), " ") case '\r': return "\r\n" } return "" } func concatArray(wr io.Writer, vs ...Value) { for i := range vs { _, err := wr.Write([]byte(vs[i].String())) if err != nil { panic(err) } _, err = wr.Write([]byte("\r\n")) if err != nil { panic(err) } concatArray(wr, vs[i].Array()...) } } // Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array. // If it's a RESP Null value, nil is returned. func (v Value) Bytes() []byte { switch v.Typ { default: return []byte(v.String()) case '$', '+', '-': return v.Str } } // Float converts Value to a float64. If Value cannot be converted // Zero is returned. func (v Value) Float() float64 { switch v.Typ { default: f, _ := strconv.ParseFloat(v.String(), 64) return f case ':': return float64(v.IntegerV) } } // IsNull indicates whether or not the base value is null. func (v Value) IsNull() bool { return v.Null } // Bool converts Value to an bool. If Value cannot be converted, false is returned. func (v Value) Bool() bool { return v.Integer() != 0 } // Error converts the Value to an error. If Value is not an error, nil is returned. func (v Value) Error() error { switch v.Typ { case '-': return errors.New(string(v.Str)) } return nil } // Array converts the Value to a an array. // If Value is not an array or when it's is a RESP Null value, nil is returned. func (v Value) Array() []Value { if v.Typ == '*' && !v.Null { return v.ArrayV } return nil } // Type returns the underlying RESP type. // The following types are represent valid RESP values. func (v Value) Type() Type { return v.Typ } func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) { bb := make([]byte, 3+len(b)) bb[0] = byte(typ) copy(bb[1:], b) bb[1+len(b)+0] = '\r' bb[1+len(b)+1] = '\n' return bb, nil } func marshalBulkRESP(v Value) ([]byte, error) { if v.Null { return []byte("$-1\r\n"), nil } szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10)) bb := make([]byte, 5+len(szb)+len(v.Str)) bb[0] = '$' copy(bb[1:], szb) bb[1+len(szb)+0] = '\r' bb[1+len(szb)+1] = '\n' copy(bb[1+len(szb)+2:], v.Str) bb[1+len(szb)+2+len(v.Str)+0] = '\r' bb[1+len(szb)+2+len(v.Str)+1] = '\n' return bb, nil } func marshalArrayRESP(v Value) ([]byte, error) { if v.Null { return []byte("*-1\r\n"), nil } szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10)) var buf bytes.Buffer buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer buf.WriteByte('*') buf.Write(szb) buf.WriteByte('\r') buf.WriteByte('\n') for i := 0; i < len(v.ArrayV); i++ { data, err := v.ArrayV[i].MarshalRESP() if err != nil { return nil, err } buf.Write(data) } return buf.Bytes(), nil } func marshalAnyRESP(v Value) ([]byte, error) { switch v.Typ { default: if v.Typ == 0 && v.Null { return []byte("$-1\r\n"), nil } return nil, errors.New("unknown resp type encountered") case '-', '+': return marshalSimpleRESP(v.Typ, v.Str) case ':': return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10))) case '$': return marshalBulkRESP(v) case '*': return marshalArrayRESP(v) } } // Equals compares one value to another value. func (v Value) Equals(value Value) bool { data1, err := v.MarshalRESP() if err != nil { return false } data2, err := value.MarshalRESP() if err != nil { return false } return string(data1) == string(data2) } // MarshalRESP returns the original serialized byte representation of Value. // For more information on this format please see http://redis.io/topics/protocol. func (v Value) MarshalRESP() ([]byte, error) { return marshalAnyRESP(v) } var NilValue = Value{Null: true} type ErrProtocol struct{ Msg string } func (err ErrProtocol) Error() string { return "Protocol error: " + err.Msg } // AnyValue returns a RESP value from an interface. // This function infers the types. Arrays are not allowed. func AnyValue(v interface{}) Value { switch v := v.(type) { default: return StringValue(fmt.Sprintf("%v", v)) case nil: return NullValue() case int: return IntegerValue(int(v)) case uint: return IntegerValue(int(v)) case int8: return IntegerValue(int(v)) case uint8: return IntegerValue(int(v)) case int16: return IntegerValue(int(v)) case uint16: return IntegerValue(int(v)) case int32: return IntegerValue(int(v)) case uint32: return IntegerValue(int(v)) case int64: return IntegerValue(int(v)) case uint64: return IntegerValue(int(v)) case bool: return BoolValue(v) case float32: return FloatValue(float64(v)) case float64: return FloatValue(float64(v)) case []byte: return BytesValue(v) case string: return StringValue(v) } } // SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces. func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} } // BytesValue returns a RESP bulk string. A bulk string can represent any data. func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} } // StringValue returns a RESP bulk string. A bulk string can represent any data. func StringValue(s string) Value
// NullValue returns a RESP null bulk string. func NullValue() Value { return Value{Typ: '$', Null: true} } // ErrorValue returns a RESP error. func ErrorValue(err error) Value { if err == nil { return Value{Typ: '-'} } return Value{Typ: '-', Str: []byte(err.Error())} } // IntegerValue returns a RESP integer. func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} } // BoolValue returns a RESP integer representation of a bool. func BoolValue(t bool) Value { if t { return Value{Typ: ':', IntegerV: 1} } return Value{Typ: ':', IntegerV: 0} } // FloatValue returns a RESP bulk string representation of a float. func FloatValue(f float
{ return Value{Typ: '$', Str: []byte(s)} }
identifier_body
agent.go
/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/env" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/actor" "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/topo" ) var ( vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.") ) type tabletChangeItem struct { oldTablet topo.Tablet newTablet topo.Tablet context string queuedTime time.Time } // ActionAgent is the main class for the agent. type ActionAgent struct { // The following fields are set during creation TopoServer topo.Server TabletAlias topo.TabletAlias Mysqld *mysqlctl.Mysqld DBConfigs *dbconfigs.DBConfigs SchemaOverrides []tabletserver.SchemaOverride BinlogPlayerMap *BinlogPlayerMap // Internal variables vtActionBinFile string // path to vtaction binary done chan struct{} // closed when we are done. // This is the History of the health checks, public so status // pages can display it History *history.History // actionMutex is there to run only one action at a time. If // both agent.actionMutex and agent.mutex needs to be taken, // take actionMutex first. actionMutex sync.Mutex // to run only one action at a time // mutex is protecting the rest of the members mutex sync.Mutex changeItems chan tabletChangeItem _tablet *topo.TabletInfo } func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { var schemaOverrides []tabletserver.SchemaOverride if overridesFile == "" { return schemaOverrides } if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil { log.Warningf("can't read overridesFile %v: %v", overridesFile, err) } else { data, _ := json.MarshalIndent(schemaOverrides, "", " ") log.Infof("schemaOverrides: %s\n", data) } return schemaOverrides } // NewActionAgent creates a new ActionAgent and registers all the // associated services func NewActionAgent( tabletAlias topo.TabletAlias, dbcfgs *dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, port, securePort int, overridesFile string, ) (agent *ActionAgent, err error) { schemaOverrides := loadSchemaOverrides(overridesFile) topoServer := topo.GetServer() mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl) agent = &ActionAgent{ TopoServer: topoServer, TabletAlias: tabletAlias, Mysqld: mysqld, DBConfigs: dbcfgs, SchemaOverrides: schemaOverrides, done: make(chan struct{}), History: history.New(historyLength), changeItems: make(chan tabletChangeItem, 100), } // Start the binlog player services, not playing at start. agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld) RegisterBinlogPlayerMap(agent.BinlogPlayerMap) // try to figure out the mysql port mysqlPort := mycnf.MysqlPort if mysqlPort == 0 { // we don't know the port, try to get it from mysqld var err error mysqlPort, err = mysqld.GetMysqlPort() if err != nil { log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err) } } if err := agent.Start(mysqlPort, port, securePort); err != nil { return nil, err } // register the RPC services from the agent agent.registerQueryService() // start health check if needed agent.initHeathCheck() return agent, nil } func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) { agent.mutex.Lock() // Access directly since we have the lock. newTablet := agent._tablet.Tablet agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()} log.Infof("Queued tablet callback: %v", context) agent.mutex.Unlock() } func (agent *ActionAgent) executeCallbacksLoop() { for { select { case changeItem := <-agent.changeItems: wg := sync.WaitGroup{} agent.mutex.Lock() log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context) wg.Add(1) go func() { defer wg.Done() agent.changeCallback(changeItem.oldTablet, changeItem.newTablet) }() agent.mutex.Unlock() wg.Wait() case <-agent.done: return } } } func (agent *ActionAgent) readTablet() error { tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) if err != nil { return err } agent.mutex.Lock() agent._tablet = tablet agent.mutex.Unlock() return nil } func (agent *ActionAgent) Tablet() *topo.TabletInfo
func (agent *ActionAgent) resolvePaths() error { var p string if *vtactionBinaryPath != "" { p = *vtactionBinaryPath } else { vtroot, err := env.VtRoot() if err != nil { return err } p = path.Join(vtroot, "bin/vtaction") } if _, err := os.Stat(p); err != nil { return fmt.Errorf("vtaction binary %s not found: %v", p, err) } agent.vtActionBinFile = p return nil } // A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() log.Infof("action dispatch %v", actionPath) actionNode, err := actionnode.ActionNodeFromJson(data, actionPath) if err != nil { log.Errorf("action decode failed: %v %v", actionPath, err) return nil } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, } cmd = append(cmd, logutil.GetSubprocessFlags()...) cmd = append(cmd, topo.GetSubprocessFlags()...) cmd = append(cmd, dbconfigs.GetSubprocessFlags()...) cmd = append(cmd, mysqlctl.GetSubprocessFlags()...) log.Infof("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } log.Infof("Agent action completed %v %s", actionPath, stdOut) agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA) return nil } // afterAction needs to be run after an action may have changed the current // state of the tablet. func (agent *ActionAgent) afterAction(context string, reloadSchema bool) { log.Infof("Executing post-action change callbacks") // Save the old tablet so callbacks can have a better idea of // the precise nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err) } else { if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil { agent.mutex.Lock() agent._tablet = updatedTablet agent.mutex.Unlock() } agent.runChangeCallback(oldTablet, context) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if reloadSchema { tabletserver.ReloadSchema() } log.Infof("Done with post-action change callbacks
{ agent.mutex.Lock() tablet := agent._tablet agent.mutex.Unlock() return tablet }
identifier_body
agent.go
/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/env" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/actor" "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/topo" ) var ( vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.") ) type tabletChangeItem struct { oldTablet topo.Tablet newTablet topo.Tablet context string queuedTime time.Time } // ActionAgent is the main class for the agent. type ActionAgent struct { // The following fields are set during creation TopoServer topo.Server TabletAlias topo.TabletAlias Mysqld *mysqlctl.Mysqld DBConfigs *dbconfigs.DBConfigs SchemaOverrides []tabletserver.SchemaOverride BinlogPlayerMap *BinlogPlayerMap // Internal variables vtActionBinFile string // path to vtaction binary done chan struct{} // closed when we are done. // This is the History of the health checks, public so status // pages can display it History *history.History // actionMutex is there to run only one action at a time. If // both agent.actionMutex and agent.mutex needs to be taken, // take actionMutex first. actionMutex sync.Mutex // to run only one action at a time // mutex is protecting the rest of the members mutex sync.Mutex changeItems chan tabletChangeItem _tablet *topo.TabletInfo } func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { var schemaOverrides []tabletserver.SchemaOverride if overridesFile == "" { return schemaOverrides } if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil { log.Warningf("can't read overridesFile %v: %v", overridesFile, err) } else { data, _ := json.MarshalIndent(schemaOverrides, "", " ") log.Infof("schemaOverrides: %s\n", data) } return schemaOverrides } // NewActionAgent creates a new ActionAgent and registers all the // associated services func NewActionAgent( tabletAlias topo.TabletAlias, dbcfgs *dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, port, securePort int, overridesFile string, ) (agent *ActionAgent, err error) { schemaOverrides := loadSchemaOverrides(overridesFile) topoServer := topo.GetServer() mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl) agent = &ActionAgent{ TopoServer: topoServer, TabletAlias: tabletAlias, Mysqld: mysqld, DBConfigs: dbcfgs, SchemaOverrides: schemaOverrides, done: make(chan struct{}), History: history.New(historyLength), changeItems: make(chan tabletChangeItem, 100), } // Start the binlog player services, not playing at start. agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld) RegisterBinlogPlayerMap(agent.BinlogPlayerMap) // try to figure out the mysql port mysqlPort := mycnf.MysqlPort if mysqlPort == 0 { // we don't know the port, try to get it from mysqld var err error mysqlPort, err = mysqld.GetMysqlPort() if err != nil { log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err) } } if err := agent.Start(mysqlPort, port, securePort); err != nil { return nil, err } // register the RPC services from the agent agent.registerQueryService() // start health check if needed agent.initHeathCheck() return agent, nil } func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) { agent.mutex.Lock() // Access directly since we have the lock. newTablet := agent._tablet.Tablet agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()} log.Infof("Queued tablet callback: %v", context) agent.mutex.Unlock() } func (agent *ActionAgent) executeCallbacksLoop() { for { select { case changeItem := <-agent.changeItems: wg := sync.WaitGroup{} agent.mutex.Lock() log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context) wg.Add(1) go func() { defer wg.Done() agent.changeCallback(changeItem.oldTablet, changeItem.newTablet) }() agent.mutex.Unlock() wg.Wait() case <-agent.done: return } } } func (agent *ActionAgent)
() error { tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) if err != nil { return err } agent.mutex.Lock() agent._tablet = tablet agent.mutex.Unlock() return nil } func (agent *ActionAgent) Tablet() *topo.TabletInfo { agent.mutex.Lock() tablet := agent._tablet agent.mutex.Unlock() return tablet } func (agent *ActionAgent) resolvePaths() error { var p string if *vtactionBinaryPath != "" { p = *vtactionBinaryPath } else { vtroot, err := env.VtRoot() if err != nil { return err } p = path.Join(vtroot, "bin/vtaction") } if _, err := os.Stat(p); err != nil { return fmt.Errorf("vtaction binary %s not found: %v", p, err) } agent.vtActionBinFile = p return nil } // A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() log.Infof("action dispatch %v", actionPath) actionNode, err := actionnode.ActionNodeFromJson(data, actionPath) if err != nil { log.Errorf("action decode failed: %v %v", actionPath, err) return nil } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, } cmd = append(cmd, logutil.GetSubprocessFlags()...) cmd = append(cmd, topo.GetSubprocessFlags()...) cmd = append(cmd, dbconfigs.GetSubprocessFlags()...) cmd = append(cmd, mysqlctl.GetSubprocessFlags()...) log.Infof("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } log.Infof("Agent action completed %v %s", actionPath, stdOut) agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA) return nil } // afterAction needs to be run after an action may have changed the current // state of the tablet. func (agent *ActionAgent) afterAction(context string, reloadSchema bool) { log.Infof("Executing post-action change callbacks") // Save the old tablet so callbacks can have a better idea of // the precise nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err) } else { if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil { agent.mutex.Lock() agent._tablet = updatedTablet agent.mutex.Unlock() } agent.runChangeCallback(oldTablet, context) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if reloadSchema { tabletserver.ReloadSchema() } log.Infof("Done with post-action change callbacks")
readTablet
identifier_name
agent.go
/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/env" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/actor" "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/topo" ) var ( vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.") ) type tabletChangeItem struct { oldTablet topo.Tablet newTablet topo.Tablet context string queuedTime time.Time } // ActionAgent is the main class for the agent. type ActionAgent struct { // The following fields are set during creation TopoServer topo.Server TabletAlias topo.TabletAlias Mysqld *mysqlctl.Mysqld DBConfigs *dbconfigs.DBConfigs SchemaOverrides []tabletserver.SchemaOverride BinlogPlayerMap *BinlogPlayerMap // Internal variables vtActionBinFile string // path to vtaction binary done chan struct{} // closed when we are done. // This is the History of the health checks, public so status // pages can display it History *history.History // actionMutex is there to run only one action at a time. If // both agent.actionMutex and agent.mutex needs to be taken, // take actionMutex first. actionMutex sync.Mutex // to run only one action at a time // mutex is protecting the rest of the members mutex sync.Mutex changeItems chan tabletChangeItem _tablet *topo.TabletInfo } func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { var schemaOverrides []tabletserver.SchemaOverride if overridesFile == "" { return schemaOverrides } if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil { log.Warningf("can't read overridesFile %v: %v", overridesFile, err) } else { data, _ := json.MarshalIndent(schemaOverrides, "", " ") log.Infof("schemaOverrides: %s\n", data) } return schemaOverrides } // NewActionAgent creates a new ActionAgent and registers all the // associated services func NewActionAgent( tabletAlias topo.TabletAlias, dbcfgs *dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, port, securePort int, overridesFile string, ) (agent *ActionAgent, err error) { schemaOverrides := loadSchemaOverrides(overridesFile) topoServer := topo.GetServer() mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl) agent = &ActionAgent{ TopoServer: topoServer, TabletAlias: tabletAlias, Mysqld: mysqld, DBConfigs: dbcfgs, SchemaOverrides: schemaOverrides, done: make(chan struct{}), History: history.New(historyLength), changeItems: make(chan tabletChangeItem, 100), } // Start the binlog player services, not playing at start. agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld) RegisterBinlogPlayerMap(agent.BinlogPlayerMap) // try to figure out the mysql port mysqlPort := mycnf.MysqlPort if mysqlPort == 0 { // we don't know the port, try to get it from mysqld var err error mysqlPort, err = mysqld.GetMysqlPort() if err != nil { log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err) } } if err := agent.Start(mysqlPort, port, securePort); err != nil { return nil, err } // register the RPC services from the agent agent.registerQueryService() // start health check if needed agent.initHeathCheck() return agent, nil } func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) { agent.mutex.Lock() // Access directly since we have the lock. newTablet := agent._tablet.Tablet agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()} log.Infof("Queued tablet callback: %v", context) agent.mutex.Unlock() } func (agent *ActionAgent) executeCallbacksLoop() { for { select { case changeItem := <-agent.changeItems: wg := sync.WaitGroup{} agent.mutex.Lock() log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context) wg.Add(1) go func() { defer wg.Done() agent.changeCallback(changeItem.oldTablet, changeItem.newTablet) }() agent.mutex.Unlock() wg.Wait() case <-agent.done: return } } } func (agent *ActionAgent) readTablet() error { tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) if err != nil { return err } agent.mutex.Lock() agent._tablet = tablet agent.mutex.Unlock() return nil } func (agent *ActionAgent) Tablet() *topo.TabletInfo { agent.mutex.Lock() tablet := agent._tablet agent.mutex.Unlock() return tablet } func (agent *ActionAgent) resolvePaths() error { var p string if *vtactionBinaryPath != "" { p = *vtactionBinaryPath } else { vtroot, err := env.VtRoot() if err != nil { return err } p = path.Join(vtroot, "bin/vtaction") } if _, err := os.Stat(p); err != nil { return fmt.Errorf("vtaction binary %s not found: %v", p, err) } agent.vtActionBinFile = p return nil } // A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() log.Infof("action dispatch %v", actionPath) actionNode, err := actionnode.ActionNodeFromJson(data, actionPath) if err != nil { log.Errorf("action decode failed: %v %v", actionPath, err) return nil } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, } cmd = append(cmd, logutil.GetSubprocessFlags()...) cmd = append(cmd, topo.GetSubprocessFlags()...) cmd = append(cmd, dbconfigs.GetSubprocessFlags()...) cmd = append(cmd, mysqlctl.GetSubprocessFlags()...) log.Infof("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } log.Infof("Agent action completed %v %s", actionPath, stdOut) agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA) return nil } // afterAction needs to be run after an action may have changed the current // state of the tablet. func (agent *ActionAgent) afterAction(context string, reloadSchema bool) { log.Infof("Executing post-action change callbacks") // Save the old tablet so callbacks can have a better idea of // the precise nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err)
} else { if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil { agent.mutex.Lock() agent._tablet = updatedTablet agent.mutex.Unlock() } agent.runChangeCallback(oldTablet, context) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if reloadSchema { tabletserver.ReloadSchema() } log.Infof("Done with post-action change callbacks") }
random_line_split
agent.go
/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/env" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/actor" "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/topo" ) var ( vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.") ) type tabletChangeItem struct { oldTablet topo.Tablet newTablet topo.Tablet context string queuedTime time.Time } // ActionAgent is the main class for the agent. type ActionAgent struct { // The following fields are set during creation TopoServer topo.Server TabletAlias topo.TabletAlias Mysqld *mysqlctl.Mysqld DBConfigs *dbconfigs.DBConfigs SchemaOverrides []tabletserver.SchemaOverride BinlogPlayerMap *BinlogPlayerMap // Internal variables vtActionBinFile string // path to vtaction binary done chan struct{} // closed when we are done. // This is the History of the health checks, public so status // pages can display it History *history.History // actionMutex is there to run only one action at a time. If // both agent.actionMutex and agent.mutex needs to be taken, // take actionMutex first. actionMutex sync.Mutex // to run only one action at a time // mutex is protecting the rest of the members mutex sync.Mutex changeItems chan tabletChangeItem _tablet *topo.TabletInfo } func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { var schemaOverrides []tabletserver.SchemaOverride if overridesFile == "" { return schemaOverrides } if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil { log.Warningf("can't read overridesFile %v: %v", overridesFile, err) } else { data, _ := json.MarshalIndent(schemaOverrides, "", " ") log.Infof("schemaOverrides: %s\n", data) } return schemaOverrides } // NewActionAgent creates a new ActionAgent and registers all the // associated services func NewActionAgent( tabletAlias topo.TabletAlias, dbcfgs *dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, port, securePort int, overridesFile string, ) (agent *ActionAgent, err error) { schemaOverrides := loadSchemaOverrides(overridesFile) topoServer := topo.GetServer() mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl) agent = &ActionAgent{ TopoServer: topoServer, TabletAlias: tabletAlias, Mysqld: mysqld, DBConfigs: dbcfgs, SchemaOverrides: schemaOverrides, done: make(chan struct{}), History: history.New(historyLength), changeItems: make(chan tabletChangeItem, 100), } // Start the binlog player services, not playing at start. agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld) RegisterBinlogPlayerMap(agent.BinlogPlayerMap) // try to figure out the mysql port mysqlPort := mycnf.MysqlPort if mysqlPort == 0 { // we don't know the port, try to get it from mysqld var err error mysqlPort, err = mysqld.GetMysqlPort() if err != nil { log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err) } } if err := agent.Start(mysqlPort, port, securePort); err != nil
// register the RPC services from the agent agent.registerQueryService() // start health check if needed agent.initHeathCheck() return agent, nil } func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) { agent.mutex.Lock() // Access directly since we have the lock. newTablet := agent._tablet.Tablet agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()} log.Infof("Queued tablet callback: %v", context) agent.mutex.Unlock() } func (agent *ActionAgent) executeCallbacksLoop() { for { select { case changeItem := <-agent.changeItems: wg := sync.WaitGroup{} agent.mutex.Lock() log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context) wg.Add(1) go func() { defer wg.Done() agent.changeCallback(changeItem.oldTablet, changeItem.newTablet) }() agent.mutex.Unlock() wg.Wait() case <-agent.done: return } } } func (agent *ActionAgent) readTablet() error { tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) if err != nil { return err } agent.mutex.Lock() agent._tablet = tablet agent.mutex.Unlock() return nil } func (agent *ActionAgent) Tablet() *topo.TabletInfo { agent.mutex.Lock() tablet := agent._tablet agent.mutex.Unlock() return tablet } func (agent *ActionAgent) resolvePaths() error { var p string if *vtactionBinaryPath != "" { p = *vtactionBinaryPath } else { vtroot, err := env.VtRoot() if err != nil { return err } p = path.Join(vtroot, "bin/vtaction") } if _, err := os.Stat(p); err != nil { return fmt.Errorf("vtaction binary %s not found: %v", p, err) } agent.vtActionBinFile = p return nil } // A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() log.Infof("action dispatch %v", actionPath) actionNode, err := actionnode.ActionNodeFromJson(data, actionPath) if err != nil { log.Errorf("action decode failed: %v %v", actionPath, err) return nil } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, } cmd = append(cmd, logutil.GetSubprocessFlags()...) cmd = append(cmd, topo.GetSubprocessFlags()...) cmd = append(cmd, dbconfigs.GetSubprocessFlags()...) cmd = append(cmd, mysqlctl.GetSubprocessFlags()...) log.Infof("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } log.Infof("Agent action completed %v %s", actionPath, stdOut) agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA) return nil } // afterAction needs to be run after an action may have changed the current // state of the tablet. func (agent *ActionAgent) afterAction(context string, reloadSchema bool) { log.Infof("Executing post-action change callbacks") // Save the old tablet so callbacks can have a better idea of // the precise nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err) } else { if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil { agent.mutex.Lock() agent._tablet = updatedTablet agent.mutex.Unlock() } agent.runChangeCallback(oldTablet, context) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if reloadSchema { tabletserver.ReloadSchema() } log.Infof("Done with post-action change callbacks
{ return nil, err }
conditional_block
mpsse.go
.mpsseVerify(); err != nil { return err } } // Initialize MPSSE to a known state. // Reset the clock since it is impossible to read back the current clock rate. // Reset all the GPIOs are inputs since it is impossible to read back the // state of each GPIO (if they are input or output). cmd := []byte{ clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable, gpioSetC, 0x00, 0x00, gpioSetD, 0x00, 0x00, } if _, err := h.Write(cmd); err != nil { return err } // Success!! return nil } // mpsseVerify sends an invalid MPSSE command and verifies the returned value // is incorrect. // // In practice this takes around 2ms. func (h *handle) mpsseVerify() error { var b [2]byte for _, v := range []byte{0xAA, 0xAB} { // Write a bad command and ensure it returned correctly. // Unlike what the application note proposes, include a flush op right // after. Without the flush, the device will only flush after the delay // specified to SetLatencyTimer. The flush removes this unneeded wait, // which enables increasing the delay specified to SetLatencyTimer. b[0] = v b[1] = flush if _, err := h.Write(b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } p, e := h.h.GetQueueStatus() if e != 0 { return toErr("Read/GetQueueStatus", e) } if p != 2 { return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p) } ctx, cancel := context200ms() defer cancel() if _, err := h.ReadAll(ctx, b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } // 0xFA means invalid command, 0xAA is the command echoed back. if b[0] != 0xFA || b[1] != v { return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b) } } return nil } // // MPSSERegRead reads the memory mapped registers from the device. func (h *handle) MPSSERegRead(addr uint16) (byte, error) { // Unlike most other operations, the uint16 byte order is <hi>, <lo>. b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush} if _, err := h.Write(b[:]); err != nil { return 0, err } ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, b[:1]) return b[0], err } // MPSSEClock sets the clock at the closest value and returns it. func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) { // TODO(maruel): Memory clock and skip if the same value. clk := clock30MHz base := 30 * physic.MegaHertz div := base / f if div >= 65536 { clk = clock6MHz base /= 5 div = base / f if div >= 65536 { return 0, errors.New("ftdi: clock frequency is too low") } } b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)} _, err := h.Write(b[:]) return base / div, err } // mpsseTxOp returns the right MPSSE command byte for the stream. func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte { op := byte(0) if lsbf { op |= dataLSBF } if w { op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } } if r { op |= dataIn if er == gpio.FallingEdge { op |= dataInFall } } return op } // MPSSETx runs a transaction on the clock on pins D0, D1 and D2. // // It can only do it on a multiple of 8 bits. func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error { l := len(w) if len(w) != 0 { // TODO(maruel): This is easy to fix by daisy chaining operations. if len(w) > 65536 { return errors.New("ftdi: write buffer too long; max 65536") } } if len(r) != 0 { if len(r) > 65536 { return errors.New("ftdi: read buffer too long; max 65536") } if l != 0 && len(r) != l { return errors.New("ftdi: mismatched buffer lengths") } l = len(r) } // The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done. // TODO(maruel): Test. // Flush can be useful if rbits != 0. op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf) cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)} cmd = append(cmd, w...) cmd = append(cmd, flush) if _, err := h.Write(cmd); err != nil { return err } if len(r) != 0 { ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, r) return err } return nil } // MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte // or less: between 1 and 8 bits. func (h *handle) MPSSETxShort(w byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) { op := byte(dataBit) if lsbf { op |= dataLSBF } l := wbits if wbits != 0 { if wbits > 8 { return 0, errors.New("ftdi: write buffer too long; max 8") } op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } } if rbits != 0 { if rbits > 8 { return 0, errors.New("ftdi: read buffer too long; max 8") } op |= dataIn if er == gpio.FallingEdge { op |= dataInFall } if l != 0 && rbits != l { return 0, errors.New("ftdi: mismatched buffer lengths") } l = rbits } b := [3]byte{op, byte(l - 1)} cmd := b[:2] if wbits != 0 { cmd = append(cmd, w) } if rbits != 0 { cmd = append(cmd, flush) } if _, err := h.Write(cmd); err != nil { return 0, err } if rbits != 0 { ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, b[:1]) return b[0], err } return 0, nil } // MPSSECBus operates on 8 GPIOs at a time C0~C7. // // Direction 1 means output, 0 means input. func (h *handle) MPSSECBus(mask, value byte) error { b := [...]byte{gpioSetC, value, mask} _, err := h.Write(b[:]) return err } // MPSSEDBus operates on 8 GPIOs at a time D0~D7. // // Direction 1 means output, 0 means input. func (h *handle) MPSSEDBus(mask, value byte) error { b := [...]byte{gpioSetD, value, mask} _, err := h.Write(b[:]) return err } // MPSSECBusRead reads all the CBus pins C0~C7. func (h *handle) MPSSECBusRead() (byte, error) {
b := [...]byte{gpioReadC, flush} if _, err := h.Write(b[:]); err != nil { return 0, err } ctx, cancel := context200ms() defer cancel() if _, err := h.ReadAll(ctx, b[:1]); err != nil { return 0, err } return b[0], nil }
identifier_body
mpsse.go
// Disables adaptive clocking. clockNormal byte = 0x97 // CPU mode. // // Access the device registers like a memory mapped device. // // <op>, <addrLow> cpuReadShort byte = 0x90 // <op>, <addrHi>, <addrLow> cpuReadFar byte = 0x91 // <op>, <addrLow>, <data> cpuWriteShort byte = 0x92 // <op>, <addrHi>, <addrLow>, <data> cpuWriteFar byte = 0x91 // Buffer operations. // // Flush the buffer back to the host. flush byte = 0x87 // Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as // high, the MPSSE engine moves on to process the next instruction. waitHigh byte = 0x88 waitLow byte = 0x89 ) // InitMPSSE sets the device into MPSSE mode. // // This requires a f232h, ft2232, ft2232h or a ft4232h. // // Use only one of Init or InitMPSSE. func (h *handle) InitMPSSE() error { // http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf // Pre-state: // - Write EEPROM i.IsFifo = true so the device DBus is started in tristate. // Try to verify the MPSSE controller without initializing it first. This is // the 'happy path', which enables reusing the device is its current state // without affecting current GPIO state. if h.mpsseVerify() != nil { // Do a full reset. Just trying to set the MPSSE controller will // likely not work. That's a layering violation (since the retry with reset // is done in driver.go) but we've survived worse things... // // TODO(maruel): This is not helping in practice, this need to be fine // tuned. if err := h.Reset(); err != nil { return err } if err := h.Init(); err != nil { return err } // That does the magic thing. if err := h.SetBitMode(0, bitModeMpsse); err != nil { return err } if err := h.mpsseVerify(); err != nil { return err } } // Initialize MPSSE to a known state. // Reset the clock since it is impossible to read back the current clock rate. // Reset all the GPIOs are inputs since it is impossible to read back the // state of each GPIO (if they are input or output). cmd := []byte{ clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable, gpioSetC, 0x00, 0x00, gpioSetD, 0x00, 0x00, } if _, err := h.Write(cmd); err != nil { return err } // Success!! return nil } // mpsseVerify sends an invalid MPSSE command and verifies the returned value // is incorrect. // // In practice this takes around 2ms. func (h *handle) mpsseVerify() error { var b [2]byte for _, v := range []byte{0xAA, 0xAB} { // Write a bad command and ensure it returned correctly. // Unlike what the application note proposes, include a flush op right // after. Without the flush, the device will only flush after the delay // specified to SetLatencyTimer. The flush removes this unneeded wait, // which enables increasing the delay specified to SetLatencyTimer. b[0] = v b[1] = flush if _, err := h.Write(b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } p, e := h.h.GetQueueStatus() if e != 0 { return toErr("Read/GetQueueStatus", e) } if p != 2 { return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p) } ctx, cancel := context200ms() defer cancel() if _, err := h.ReadAll(ctx, b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } // 0xFA means invalid command, 0xAA is the command echoed back. if b[0] != 0xFA || b[1] != v { return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b) } } return nil } // // MPSSERegRead reads the memory mapped registers from the device. func (h *handle) MPSSERegRead(addr uint16) (byte, error) { // Unlike most other operations, the uint16 byte order is <hi>, <lo>. b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush} if _, err := h.Write(b[:]); err != nil { return 0, err } ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, b[:1]) return b[0], err } // MPSSEClock sets the clock at the closest value and returns it. func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) { // TODO(maruel): Memory clock and skip if the same value. clk := clock30MHz base := 30 * physic.MegaHertz div := base / f if div >= 65536 { clk = clock6MHz base /= 5 div = base / f if div >= 65536 { return 0, errors.New("ftdi: clock frequency is too low") } } b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)} _, err := h.Write(b[:]) return base / div, err } // mpsseTxOp returns the right MPSSE command byte for the stream. func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte { op := byte(0) if lsbf { op |= dataLSBF } if w { op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } } if r { op |= dataIn if er == gpio.FallingEdge { op |= dataInFall } } return op } // MPSSETx runs a transaction on the clock on pins D0, D1 and D2. // // It can only do it on a multiple of 8 bits. func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error { l := len(w) if len(w) != 0 { // TODO(maruel): This is easy to fix by daisy chaining operations. if len(w) > 65536 { return errors.New("ftdi: write buffer too long; max 65536") } } if len(r) != 0 { if len(r) > 65536 { return errors.New("ftdi: read buffer too long; max 65536") } if l != 0 && len(r) != l { return errors.New("ftdi: mismatched buffer lengths") } l = len(r) } // The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done. // TODO(maruel): Test. // Flush can be useful if rbits != 0. op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf) cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)} cmd = append(cmd, w...) cmd = append(cmd, flush) if _, err := h.Write(cmd); err != nil { return err } if len(r) != 0 { ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, r) return err } return nil } // MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte // or less: between 1 and 8 bits. func (h *handle) MP
SSETxShort(w
identifier_name
mpsse.go
- 0(1) 6MHz / 30MHz // - 1(2) 3MHz / 15MHz // - 2(3) 2MHz / 10MHz // - 3(4) 1.5MHz / 7.5MHz // - 4(5) 1.25MHz / 6MHz // - ... // - 0xFFFF(65536) 91.553Hz / 457.763Hz // // <op>, <valueL-1>, <valueH-1> clockSetDivisor byte = 0x86 // Uses 3 phases data clocking: data is valid on both clock edges. Needed // for I²C. clock3Phase byte = 0x8C // Uses normal 2 phases data clocking. clock2Phase byte = 0x8D // Enables clock even while not doing any operation. Used with JTAG. // Enables the clock between [1, 8] pulses. // <op>, <length-1> clockOnShort byte = 0x8E // Enables the clock between [8, 524288] pulses in 8 multiples. // <op>, <lengthL-1>, <lengthH-1> clockOnLong byte = 0x8F // Enables clock until D5 is high or low. Used with JTAG. clockUntilHigh byte = 0x94 clockUntilLow byte = 0x95 // <op>, <lengthL-1>, <lengthH-1> in 8 multiples. clockUntilHighLong byte = 0x9C clockUntilLowLong byte = 0x9D // Enables adaptive clocking. Used with JTAG. // // This causes the controller to wait for D7 signal state as an ACK. clockAdaptive byte = 0x96 // Disables adaptive clocking. clockNormal byte = 0x97 // CPU mode. // // Access the device registers like a memory mapped device. // // <op>, <addrLow> cpuReadShort byte = 0x90 // <op>, <addrHi>, <addrLow> cpuReadFar byte = 0x91 // <op>, <addrLow>, <data> cpuWriteShort byte = 0x92 // <op>, <addrHi>, <addrLow>, <data> cpuWriteFar byte = 0x91 // Buffer operations. // // Flush the buffer back to the host. flush byte = 0x87 // Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as // high, the MPSSE engine moves on to process the next instruction. waitHigh byte = 0x88 waitLow byte = 0x89 ) // InitMPSSE sets the device into MPSSE mode. // // This requires a f232h, ft2232, ft2232h or a ft4232h. // // Use only one of Init or InitMPSSE. func (h *handle) InitMPSSE() error { // http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf // Pre-state: // - Write EEPROM i.IsFifo = true so the device DBus is started in tristate. // Try to verify the MPSSE controller without initializing it first. This is // the 'happy path', which enables reusing the device is its current state // without affecting current GPIO state. if h.mpsseVerify() != nil { // Do a full reset. Just trying to set the MPSSE controller will // likely not work. That's a layering violation (since the retry with reset // is done in driver.go) but we've survived worse things... // // TODO(maruel): This is not helping in practice, this need to be fine // tuned. if err := h.Reset(); err != nil { return err } if err := h.Init(); err != nil { return err } // That does the magic thing. if err := h.SetBitMode(0, bitModeMpsse); err != nil { return err } if err := h.mpsseVerify(); err != nil { return err } } // Initialize MPSSE to a known state. // Reset the clock since it is impossible to read back the current clock rate. // Reset all the GPIOs are inputs since it is impossible to read back the // state of each GPIO (if they are input or output). cmd := []byte{ clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable, gpioSetC, 0x00, 0x00, gpioSetD, 0x00, 0x00, } if _, err := h.Write(cmd); err != nil { return err } // Success!! return nil } // mpsseVerify sends an invalid MPSSE command and verifies the returned value // is incorrect. // // In practice this takes around 2ms. func (h *handle) mpsseVerify() error { var b [2]byte for _, v := range []byte{0xAA, 0xAB} { // Write a bad command and ensure it returned correctly. // Unlike what the application note proposes, include a flush op right // after. Without the flush, the device will only flush after the delay // specified to SetLatencyTimer. The flush removes this unneeded wait, // which enables increasing the delay specified to SetLatencyTimer. b[0] = v b[1] = flush if _, err := h.Write(b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } p, e := h.h.GetQueueStatus() if e != 0 { return toErr("Read/GetQueueStatus", e) } if p != 2 { return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p) } ctx, cancel := context200ms() defer cancel() if _, err := h.ReadAll(ctx, b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } // 0xFA means invalid command, 0xAA is the command echoed back. if b[0] != 0xFA || b[1] != v { return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b) } } return nil } // // MPSSERegRead reads the memory mapped registers from the device. func (h *handle) MPSSERegRead(addr uint16) (byte, error) { // Unlike most other operations, the uint16 byte order is <hi>, <lo>. b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush} if _, err := h.Write(b[:]); err != nil { return 0, err } ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, b[:1]) return b[0], err } // MPSSEClock sets the clock at the closest value and returns it. func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) { // TODO(maruel): Memory clock and skip if the same value. clk := clock30MHz base := 30 * physic.MegaHertz div := base / f if div >= 65536 { clk = clock6MHz base /= 5 div = base / f if div >= 65536 { return 0, errors.New("ftdi: clock frequency is too low") } } b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)} _, err := h.Write(b[:]) return base / div, err } // mpsseTxOp returns the right MPSSE command byte for the stream. func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte { op := byte(0) if lsbf { op |= dataLSBF } if w { op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } } if r { op |= dataIn if er == gpio.FallingEdge {
op |= dataInFall }
conditional_block
mpsse.go
0x91 // Buffer operations. // // Flush the buffer back to the host. flush byte = 0x87 // Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as // high, the MPSSE engine moves on to process the next instruction. waitHigh byte = 0x88 waitLow byte = 0x89 ) // InitMPSSE sets the device into MPSSE mode. // // This requires a f232h, ft2232, ft2232h or a ft4232h. // // Use only one of Init or InitMPSSE. func (h *handle) InitMPSSE() error { // http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf // Pre-state: // - Write EEPROM i.IsFifo = true so the device DBus is started in tristate. // Try to verify the MPSSE controller without initializing it first. This is // the 'happy path', which enables reusing the device is its current state // without affecting current GPIO state. if h.mpsseVerify() != nil { // Do a full reset. Just trying to set the MPSSE controller will // likely not work. That's a layering violation (since the retry with reset // is done in driver.go) but we've survived worse things... // // TODO(maruel): This is not helping in practice, this need to be fine // tuned. if err := h.Reset(); err != nil { return err } if err := h.Init(); err != nil { return err } // That does the magic thing. if err := h.SetBitMode(0, bitModeMpsse); err != nil { return err } if err := h.mpsseVerify(); err != nil { return err } } // Initialize MPSSE to a known state. // Reset the clock since it is impossible to read back the current clock rate. // Reset all the GPIOs are inputs since it is impossible to read back the // state of each GPIO (if they are input or output). cmd := []byte{ clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable, gpioSetC, 0x00, 0x00, gpioSetD, 0x00, 0x00, } if _, err := h.Write(cmd); err != nil { return err } // Success!! return nil } // mpsseVerify sends an invalid MPSSE command and verifies the returned value // is incorrect. // // In practice this takes around 2ms. func (h *handle) mpsseVerify() error { var b [2]byte for _, v := range []byte{0xAA, 0xAB} { // Write a bad command and ensure it returned correctly. // Unlike what the application note proposes, include a flush op right // after. Without the flush, the device will only flush after the delay // specified to SetLatencyTimer. The flush removes this unneeded wait, // which enables increasing the delay specified to SetLatencyTimer. b[0] = v b[1] = flush if _, err := h.Write(b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } p, e := h.h.GetQueueStatus() if e != 0 { return toErr("Read/GetQueueStatus", e) } if p != 2 { return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p) } ctx, cancel := context200ms() defer cancel() if _, err := h.ReadAll(ctx, b[:]); err != nil { return fmt.Errorf("ftdi: MPSSE verification failed: %w", err) } // 0xFA means invalid command, 0xAA is the command echoed back. if b[0] != 0xFA || b[1] != v { return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b) } } return nil } // // MPSSERegRead reads the memory mapped registers from the device. func (h *handle) MPSSERegRead(addr uint16) (byte, error) { // Unlike most other operations, the uint16 byte order is <hi>, <lo>. b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush} if _, err := h.Write(b[:]); err != nil { return 0, err } ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, b[:1]) return b[0], err } // MPSSEClock sets the clock at the closest value and returns it. func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) { // TODO(maruel): Memory clock and skip if the same value. clk := clock30MHz base := 30 * physic.MegaHertz div := base / f if div >= 65536 { clk = clock6MHz base /= 5 div = base / f if div >= 65536 { return 0, errors.New("ftdi: clock frequency is too low") } } b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)} _, err := h.Write(b[:]) return base / div, err } // mpsseTxOp returns the right MPSSE command byte for the stream. func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte { op := byte(0) if lsbf { op |= dataLSBF } if w { op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } } if r { op |= dataIn if er == gpio.FallingEdge { op |= dataInFall } } return op } // MPSSETx runs a transaction on the clock on pins D0, D1 and D2. // // It can only do it on a multiple of 8 bits. func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error { l := len(w) if len(w) != 0 { // TODO(maruel): This is easy to fix by daisy chaining operations. if len(w) > 65536 { return errors.New("ftdi: write buffer too long; max 65536") } } if len(r) != 0 { if len(r) > 65536 { return errors.New("ftdi: read buffer too long; max 65536") } if l != 0 && len(r) != l { return errors.New("ftdi: mismatched buffer lengths") } l = len(r) } // The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done. // TODO(maruel): Test. // Flush can be useful if rbits != 0. op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf) cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)} cmd = append(cmd, w...) cmd = append(cmd, flush) if _, err := h.Write(cmd); err != nil { return err } if len(r) != 0 { ctx, cancel := context200ms() defer cancel() _, err := h.ReadAll(ctx, r) return err } return nil } // MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte // or less: between 1 and 8 bits. func (h *handle) MPSSETxShort(w byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) { op := byte(dataBit) if lsbf { op |= dataLSBF } l := wbits if wbits != 0 { if wbits > 8 { return 0, errors.New("ftdi: write buffer too long; max 8") } op |= dataOut if ew == gpio.FallingEdge { op |= dataOutFall } }
if rbits != 0 { if rbits > 8 {
random_line_split
subscription_group.go
metric") } } if impacted { serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{ "task-id": id, "plugin-to-unload": plgToUnload.Key(), }) } return serr } func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) { // gathers collectors based on requested metrics pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree) controlLogger.WithFields(log.Fields{ "collectors": fmt.Sprintf("%+v", plugins), "metrics": fmt.Sprintf("%+v", s.requestedMetrics), }).Debug("gathered collectors") // notice that requested plugins contains only processors and publishers for _, plugin := range s.requestedPlugins { // add defaults to plugins (exposed in a plugins ConfigPolicy) if lp, err := s.pluginManager.get( fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d", plugin.TypeName(), plugin.Name(), plugin.Version())); err == nil && lp.ConfigPolicy != nil { if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 { // set defaults to plugin config plugin.Config().ApplyDefaults(policy.Defaults()) } // update version info for subscribed processor or publisher version := plugin.Version() if version < 1 { version = lp.Version() } s := subscribedPlugin{ name: plugin.Name(), typeName: plugin.TypeName(), version: version, config: plugin.Config(), } // add processors and publishers to collectors just gathered plugins = append(plugins, s) } } // calculates those plugins that need to be subscribed and unsubscribed to subs, unsubs := comparePlugins(plugins, s.plugins) controlLogger.WithFields(log.Fields{ "subs": fmt.Sprintf("%+v", subs), "unsubs": fmt.Sprintf("%+v", unsubs), }).Debug("subscriptions") if len(subs) > 0 { if errs := s.subscribePlugins(id, subs); errs != nil { serrs = append(serrs, errs...) } } if len(unsubs) > 0 { if errs := s.unsubscribePlugins(id, unsubs); errs != nil { serrs = append(serrs, errs...) } } // updating view // metrics are grouped by plugin s.metrics = pluginToMetricMap s.plugins = plugins s.errors = serrs return serrs } func (s *subscriptionGroup) subscribePlugins(id string, plugins []core.SubscribedPlugin) (serrs []serror.SnapError) { plgs := make([]*loadedPlugin, len(plugins)) // First range through plugins to verify if all required plugins // are available for i, sub := range plugins { plg, err := s.pluginManager.get(key(sub)) if err != nil { serrs = append(serrs, pluginNotFoundError(sub)) return serrs } plgs[i] = plg } // If all plugins are available, subscribe to pools and start // plugins as needed for _, plg := range plgs { controlLogger.WithFields(log.Fields{ "name": plg.Name(), "type": plg.TypeName(), "version": plg.Version(), "_block": "subscriptionGroup.subscribePlugins", }).Debug("plugin subscription") if plg.Details.Uri != nil { // this is a remote plugin pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key()) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } if pool.Count() < 1 { var resp plugin.Response res, err := http.Get(plg.Details.Uri.String()) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } body, err := ioutil.ReadAll(res.Body) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } err = json.Unmarshal(body, &resp) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } ap.SetIsRemote(true) err = pool.Insert(ap) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } } } else { pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key()) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } pool.Subscribe(id) if pool.Eligible() { err = s.verifyPlugin(plg) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } err = s.pluginRunner.runPlugin(plg.Name(), plg.Details) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } } } serr := s.sendPluginSubscriptionEvent(id, plg) if serr != nil { serrs = append(serrs, serr) return serrs } } return serrs } func (p *subscriptionGroup) unsubscribePlugins(id string, plugins []core.SubscribedPlugin) (serrs []serror.SnapError) { for _, plugin := range plugins { controlLogger.WithFields(log.Fields{ "name": plugin.Name(), "type": plugin.TypeName(), "version": plugin.Version(), "_block": "subscriptionGroup.unsubscribePlugins", }).Debug("plugin unsubscription") pool, err := p.pluginRunner.AvailablePlugins().getPool(key(plugin)) if err != nil { serrs = append(serrs, err) return serrs } if pool != nil { pool.Unsubscribe(id) } serr := p.sendPluginUnsubscriptionEvent(id, plugin) if serr != nil { serrs = append(serrs, serr) } } return } func (p *subscriptionGroup) sendPluginSubscriptionEvent(taskID string, pl core.Plugin) serror.SnapError { pt, err := core.ToPluginType(pl.TypeName()) if err != nil { return serror.New(err) } e := &control_event.PluginSubscriptionEvent{ TaskId: taskID, PluginType: int(pt), PluginName: pl.Name(), PluginVersion: pl.Version(), } if _, err := p.eventManager.Emit(e); err != nil { return serror.New(err) } return nil } func (p *subscriptionGroup) sendPluginUnsubscriptionEvent(taskID string, pl core.Plugin) serror.SnapError { pt, err := core.ToPluginType(pl.TypeName()) if err != nil { return serror.New(err) } e := &control_event.PluginUnsubscriptionEvent{ TaskId: taskID, PluginType: int(pt), PluginName: pl.Name(), PluginVersion: pl.Version(), } if _, err := p.eventManager.Emit(e); err != nil { return serror.New(err) } return nil } // comparePlugins compares the new state of plugins with the previous state. // It returns an array of plugins that need to be subscribed and an array of // plugins that need to be unsubscribed. func comparePlugins(newPlugins, oldPlugins []core.SubscribedPlugin) (adds, removes []core.SubscribedPlugin) { newMap := make(map[string]int) oldMap := make(map[string]int) for _, n := range newPlugins { newMap[key(n)]++ } for _, o := range oldPlugins { oldMap[key(o)]++ } for _, n := range newPlugins { if oldMap[key(n)] > 0 { oldMap[key(n)]-- continue } adds = append(adds, n) } for _, o := range oldPlugins { if newMap[key(o)] > 0 { newMap[key(o)]-- continue } removes = append(removes, o) } return } func pluginNotFoundError(pl core.SubscribedPlugin) serror.SnapError { se := serror.New(fmt.Errorf("Plugin not found: type(%s) name(%s) version(%d)", pl.TypeName(), pl.Name(), pl.Version())) se.SetFields(map[string]interface{}{ "name": pl.Name(), "version": pl.Version(), "type": pl.TypeName(), }) return se } func key(p core.SubscribedPlugin) string
{ return fmt.Sprintf("%v"+core.Separator+"%v"+core.Separator+"%v", p.TypeName(), p.Name(), p.Version()) }
identifier_body
subscription_group.go
!= nil { return errs } s.subscriptionMap[id] = subscriptionGroup return nil } // Remove removes a subscription group given a subscription group ID. func (s subscriptionGroups) Remove(id string) []serror.SnapError { s.Lock() defer s.Unlock() return s.remove(id) } func (s subscriptionGroups) remove(id string) []serror.SnapError { subscriptionGroup, ok := s.subscriptionMap[id] if !ok { return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)} } serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins) delete(s.subscriptionMap, id) return serrs } // Get returns the metrics (core.Metric) and an array of serror.SnapError when // provided a subscription ID. The array of serror.SnapError returned was // produced the last time `process` was run which is important since // unloading/loading a plugin may produce errors when the requested metrics // are looked up in the metric catalog. Those errors will be provided back to // the caller of the subscription group on the next `CollectMetrics`. // Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group // does not exist. func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) { s.Lock() defer s.Unlock() return s.get(id) } func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) { if _, ok := s.subscriptionMap[id]; !ok { return nil, nil, ErrSubscriptionGroupDoesNotExist } sg := s.subscriptionMap[id] return sg.metrics, sg.errors, nil } // Process compares the new set of plugins with the previous set of plugins // for the given subscription group subscribing to plugins that were added // and unsubscribing to those that were removed since the last time the // subscription group was processed. // Returns an array of errors ([]serror.SnapError) which can occur when // mapping requested metrics to collector plugins and getting a core.Plugin // from a core.Requested.Plugin. // When processing a subscription group the resulting metrics grouped by plugin // (subscriptionGroup.metrics) for all subscription groups are updated based // on the requested metrics (subscriptionGroup.requestedMetrics). Similarly // the required plugins (subscriptionGroup.plugins) are also updated. func (s *subscriptionGroups) Process() (errs []serror.SnapError) { s.Lock() defer s.Unlock() for id, group := range s.subscriptionMap { if serrs := group.process(id); serrs != nil { errs = append(errs, serrs...) } } return errs } func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric, plugins []core.SubscribedPlugin, configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) { // resolve requested metrics and map to collectors pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree) if errs != nil { serrs = append(serrs, errs...) } // Validate if schedule type is streaming and we have a non-streaming plugin or vice versa for _, assert := range asserts { if serr := assert(collectors); serr != nil { serrs = append(serrs, serr) } } if len(serrs) > 0 { return serrs } // validateMetricsTypes for _, pmt := range pluginToMetricMap { for _, mt := range pmt.Metrics() { errs := s.validateMetric(mt) if len(errs) > 0 { serrs = append(serrs, errs...) } } } // add collectors to plugins (processors and publishers) for _, collector := range collectors { plugins = append(plugins, collector) } // validate plugins for _, plg := range plugins { typ, err := core.ToPluginType(plg.TypeName()) if err != nil { return []serror.SnapError{serror.New(err)} } mergedConfig := plg.Config().ReverseMerge( s.Config.Plugins.getPluginConfigDataNode( typ, plg.Name(), plg.Version())) errs := s.validatePluginSubscription(plg, mergedConfig) if len(errs) > 0 { serrs = append(serrs, errs...) return serrs } } return } // validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks. // If the plugin is used by running task and there is no replacements, return an error with appropriate message // containing ids of tasks which use the plugin, what blocks unloading process until they are stopped func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) { s.Lock() defer s.Unlock() for id, group := range s.subscriptionMap { if err := group.validatePluginUnloading(id, pluginToUnload); err != nil { errs = append(errs, err) } } return errs } func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError { var serrs = []serror.SnapError{} controlLogger.WithFields(log.Fields{ "_block": "validate-plugin-subscription", "plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()), }).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version())) lp, err := p.pluginManager.get(key(pl)) if err != nil { serrs = append(serrs, pluginNotFoundError(pl)) return serrs } if lp.ConfigPolicy != nil { ncd := lp.ConfigPolicy.Get([]string{""}) _, errs := ncd.Process(mergedConfig.Table()) if errs != nil && errs.HasErrors() { for _, e := range errs.Errors() { se := serror.New(e) se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()}) serrs = append(serrs, se) } } } return serrs } func (s *subscriptionGroups) validateMetric( metric core.Metric) (serrs []serror.SnapError) { mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version()) if err != nil { serrs = append(serrs, serror.New(err, map[string]interface{}{ "name": metric.Namespace().String(), "version": metric.Version(), })) return serrs } for _, m := range mts { // No metric found return error. if m == nil { serrs = append( serrs, serror.New( fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)", metric.Namespace(), metric.Version()))) continue } m.config = metric.Config() typ, serr := core.ToPluginType(m.Plugin.TypeName()) if serr != nil { serrs = append(serrs, serror.New(err)) continue } // merge global plugin config if m.config != nil { m.config.ReverseMergeInPlace( s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version())) } else { m.config = s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version()) } // When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy. // If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode. // Checking m.policy for nil will not work, we need to check if rules are nil. if m.policy.HasRules() { if m.Config() == nil { fields := log.Fields{ "metric": m.Namespace(), "version": m.Version(), "plugin": m.Plugin.Name(), } serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields)) continue } ncdTable, errs := m.policy.Process(m.Config().Table()) if errs != nil && errs.HasErrors() { for _, e := range errs.Errors() { serrs = append(serrs, serror.New(e)) } continue } m.config = cdata.FromTable(*ncdTable)
} // pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins // in the following subscription group func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool { // range over subscribed plugins to find if the plugin is there for _, sp := range s.plugins { if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() { return true } } return false } // validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
} } return serrs
random_line_split
subscription_group.go
fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)", metric.Namespace(), metric.Version()))) continue } m.config = metric.Config() typ, serr := core.ToPluginType(m.Plugin.TypeName()) if serr != nil { serrs = append(serrs, serror.New(err)) continue } // merge global plugin config if m.config != nil { m.config.ReverseMergeInPlace( s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version())) } else { m.config = s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version()) } // When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy. // If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode. // Checking m.policy for nil will not work, we need to check if rules are nil. if m.policy.HasRules() { if m.Config() == nil { fields := log.Fields{ "metric": m.Namespace(), "version": m.Version(), "plugin": m.Plugin.Name(), } serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields)) continue } ncdTable, errs := m.policy.Process(m.Config().Table()) if errs != nil && errs.HasErrors() { for _, e := range errs.Errors() { serrs = append(serrs, serror.New(e)) } continue } m.config = cdata.FromTable(*ncdTable) } } return serrs } // pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins // in the following subscription group func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool { // range over subscribed plugins to find if the plugin is there for _, sp := range s.plugins { if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() { return true } } return false } // validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) { impacted := false if !s.pluginIsSubscribed(plgToUnload) { // the plugin is not subscribed, so the task is not impacted by its unloading return nil } controlLogger.WithFields(log.Fields{ "_block": "subscriptionGroup.validatePluginUnloading", "task-id": id, "plugin-to-unload": plgToUnload.Key(), }).Debug("validating impact of unloading the plugin") for _, requestedMetric := range s.requestedMetrics { // get all plugins exposing the requested metric plgs, _ := s.GetPlugins(requestedMetric.Namespace()) // when requested version is fixed (greater than 0), take into account only plugins in the requested version if requestedMetric.Version() > 0 { // skip those which are not impacted by unloading (version different than plgToUnload.Version()) if requestedMetric.Version() == plgToUnload.Version() { plgsInVer := []core.CatalogedPlugin{} for _, plg := range plgs { if plg.Version() == requestedMetric.Version() { plgsInVer = append(plgsInVer, plg) } } // set plugins only in the requested version plgs = plgsInVer } } if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() { // the requested metric is exposed only by the single plugin and there is no replacement impacted = true controlLogger.WithFields(log.Fields{ "_block": "subscriptionGroup.validatePluginUnloading", "task-id": id, "plugin-to-unload": plgToUnload.Key(), "requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()), }).Errorf("unloading the plugin would cause missing in collection the requested metric") } } if impacted { serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{ "task-id": id, "plugin-to-unload": plgToUnload.Key(), }) } return serr } func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) { // gathers collectors based on requested metrics pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree) controlLogger.WithFields(log.Fields{ "collectors": fmt.Sprintf("%+v", plugins), "metrics": fmt.Sprintf("%+v", s.requestedMetrics), }).Debug("gathered collectors") // notice that requested plugins contains only processors and publishers for _, plugin := range s.requestedPlugins { // add defaults to plugins (exposed in a plugins ConfigPolicy) if lp, err := s.pluginManager.get( fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d", plugin.TypeName(), plugin.Name(), plugin.Version())); err == nil && lp.ConfigPolicy != nil { if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 { // set defaults to plugin config plugin.Config().ApplyDefaults(policy.Defaults()) } // update version info for subscribed processor or publisher version := plugin.Version() if version < 1 { version = lp.Version() } s := subscribedPlugin{ name: plugin.Name(), typeName: plugin.TypeName(), version: version, config: plugin.Config(), } // add processors and publishers to collectors just gathered plugins = append(plugins, s) } } // calculates those plugins that need to be subscribed and unsubscribed to subs, unsubs := comparePlugins(plugins, s.plugins) controlLogger.WithFields(log.Fields{ "subs": fmt.Sprintf("%+v", subs), "unsubs": fmt.Sprintf("%+v", unsubs), }).Debug("subscriptions") if len(subs) > 0 { if errs := s.subscribePlugins(id, subs); errs != nil { serrs = append(serrs, errs...) } } if len(unsubs) > 0 { if errs := s.unsubscribePlugins(id, unsubs); errs != nil { serrs = append(serrs, errs...) } } // updating view // metrics are grouped by plugin s.metrics = pluginToMetricMap s.plugins = plugins s.errors = serrs return serrs } func (s *subscriptionGroup) subscribePlugins(id string, plugins []core.SubscribedPlugin) (serrs []serror.SnapError) { plgs := make([]*loadedPlugin, len(plugins)) // First range through plugins to verify if all required plugins // are available for i, sub := range plugins { plg, err := s.pluginManager.get(key(sub)) if err != nil { serrs = append(serrs, pluginNotFoundError(sub)) return serrs } plgs[i] = plg } // If all plugins are available, subscribe to pools and start // plugins as needed for _, plg := range plgs { controlLogger.WithFields(log.Fields{ "name": plg.Name(), "type": plg.TypeName(), "version": plg.Version(), "_block": "subscriptionGroup.subscribePlugins", }).Debug("plugin subscription") if plg.Details.Uri != nil { // this is a remote plugin pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key()) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } if pool.Count() < 1 { var resp plugin.Response res, err := http.Get(plg.Details.Uri.String()) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } body, err := ioutil.ReadAll(res.Body) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } err = json.Unmarshal(body, &resp) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity) if err != nil { serrs = append(serrs, serror.New(err)) return serrs } ap.SetIsRemote(true) err = pool.Insert(ap) if err != nil
{ serrs = append(serrs, serror.New(err)) return serrs }
conditional_block
subscription_group.go
!= nil { return errs } s.subscriptionMap[id] = subscriptionGroup return nil } // Remove removes a subscription group given a subscription group ID. func (s subscriptionGroups) Remove(id string) []serror.SnapError { s.Lock() defer s.Unlock() return s.remove(id) } func (s subscriptionGroups) remove(id string) []serror.SnapError { subscriptionGroup, ok := s.subscriptionMap[id] if !ok { return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)} } serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins) delete(s.subscriptionMap, id) return serrs } // Get returns the metrics (core.Metric) and an array of serror.SnapError when // provided a subscription ID. The array of serror.SnapError returned was // produced the last time `process` was run which is important since // unloading/loading a plugin may produce errors when the requested metrics // are looked up in the metric catalog. Those errors will be provided back to // the caller of the subscription group on the next `CollectMetrics`. // Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group // does not exist. func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) { s.Lock() defer s.Unlock() return s.get(id) } func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) { if _, ok := s.subscriptionMap[id]; !ok { return nil, nil, ErrSubscriptionGroupDoesNotExist } sg := s.subscriptionMap[id] return sg.metrics, sg.errors, nil } // Process compares the new set of plugins with the previous set of plugins // for the given subscription group subscribing to plugins that were added // and unsubscribing to those that were removed since the last time the // subscription group was processed. // Returns an array of errors ([]serror.SnapError) which can occur when // mapping requested metrics to collector plugins and getting a core.Plugin // from a core.Requested.Plugin. // When processing a subscription group the resulting metrics grouped by plugin // (subscriptionGroup.metrics) for all subscription groups are updated based // on the requested metrics (subscriptionGroup.requestedMetrics). Similarly // the required plugins (subscriptionGroup.plugins) are also updated. func (s *subscriptionGroups) Process() (errs []serror.SnapError) { s.Lock() defer s.Unlock() for id, group := range s.subscriptionMap { if serrs := group.process(id); serrs != nil { errs = append(errs, serrs...) } } return errs } func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric, plugins []core.SubscribedPlugin, configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) { // resolve requested metrics and map to collectors pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree) if errs != nil { serrs = append(serrs, errs...) } // Validate if schedule type is streaming and we have a non-streaming plugin or vice versa for _, assert := range asserts { if serr := assert(collectors); serr != nil { serrs = append(serrs, serr) } } if len(serrs) > 0 { return serrs } // validateMetricsTypes for _, pmt := range pluginToMetricMap { for _, mt := range pmt.Metrics() { errs := s.validateMetric(mt) if len(errs) > 0 { serrs = append(serrs, errs...) } } } // add collectors to plugins (processors and publishers) for _, collector := range collectors { plugins = append(plugins, collector) } // validate plugins for _, plg := range plugins { typ, err := core.ToPluginType(plg.TypeName()) if err != nil { return []serror.SnapError{serror.New(err)} } mergedConfig := plg.Config().ReverseMerge( s.Config.Plugins.getPluginConfigDataNode( typ, plg.Name(), plg.Version())) errs := s.validatePluginSubscription(plg, mergedConfig) if len(errs) > 0 { serrs = append(serrs, errs...) return serrs } } return } // validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks. // If the plugin is used by running task and there is no replacements, return an error with appropriate message // containing ids of tasks which use the plugin, what blocks unloading process until they are stopped func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) { s.Lock() defer s.Unlock() for id, group := range s.subscriptionMap { if err := group.validatePluginUnloading(id, pluginToUnload); err != nil { errs = append(errs, err) } } return errs } func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError { var serrs = []serror.SnapError{} controlLogger.WithFields(log.Fields{ "_block": "validate-plugin-subscription", "plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()), }).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version())) lp, err := p.pluginManager.get(key(pl)) if err != nil { serrs = append(serrs, pluginNotFoundError(pl)) return serrs } if lp.ConfigPolicy != nil { ncd := lp.ConfigPolicy.Get([]string{""}) _, errs := ncd.Process(mergedConfig.Table()) if errs != nil && errs.HasErrors() { for _, e := range errs.Errors() { se := serror.New(e) se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()}) serrs = append(serrs, se) } } } return serrs } func (s *subscriptionGroups) validateMetric( metric core.Metric) (serrs []serror.SnapError) { mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version()) if err != nil { serrs = append(serrs, serror.New(err, map[string]interface{}{ "name": metric.Namespace().String(), "version": metric.Version(), })) return serrs } for _, m := range mts { // No metric found return error. if m == nil { serrs = append( serrs, serror.New( fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)", metric.Namespace(), metric.Version()))) continue } m.config = metric.Config() typ, serr := core.ToPluginType(m.Plugin.TypeName()) if serr != nil { serrs = append(serrs, serror.New(err)) continue } // merge global plugin config if m.config != nil { m.config.ReverseMergeInPlace( s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version())) } else { m.config = s.Config.Plugins.getPluginConfigDataNode(typ, m.Plugin.Name(), m.Plugin.Version()) } // When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy. // If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode. // Checking m.policy for nil will not work, we need to check if rules are nil. if m.policy.HasRules() { if m.Config() == nil { fields := log.Fields{ "metric": m.Namespace(), "version": m.Version(), "plugin": m.Plugin.Name(), } serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields)) continue } ncdTable, errs := m.policy.Process(m.Config().Table()) if errs != nil && errs.HasErrors() { for _, e := range errs.Errors() { serrs = append(serrs, serror.New(e)) } continue } m.config = cdata.FromTable(*ncdTable) } } return serrs } // pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins // in the following subscription group func (s *subscriptionGroup)
(plugin *loadedPlugin) bool { // range over subscribed plugins to find if the plugin is there for _, sp := range s.plugins { if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() { return true } } return false } // validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError
pluginIsSubscribed
identifier_name
prod.go
[0] p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875" clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName) if err != nil { return nil, err } p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0] p.e2eStorageAccountName = "arov4e2e" p.e2eStorageAccountRGName = "global" p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4" if p.ACRResourceID() != "" { // TODO: ugh! acrResource, err := azure.ParseResourceID(p.ACRResourceID()) if err != nil { return nil, err } p.acrName = acrResource.ResourceName } else { p.acrName = "arointsvc" } return p, nil } func (p *prod) InitializeAuthorizers() error { p.armClientAuthorizer = clientauthorizer.NewARM(p.log) adminClientAuthorizer, err := clientauthorizer.NewAdmin( p.log, "/etc/aro-rp/admin-ca-bundle.pem", os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"), ) if err != nil { return err } p.adminClientAuthorizer = adminClientAuthorizer return nil } func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.armClientAuthorizer } func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.adminClientAuthorizer } func (p *prod) ACRResourceID() string { return os.Getenv("ACR_RESOURCE_ID") } func (p *prod) ACRName() string { return p.acrName } func (p *prod) AROOperatorImage() string { return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit) } func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error { databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer) accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup()) if err != nil { return err } if len(*accts.Value) != 1 { return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value)) } keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name) if err != nil { return err } p.cosmosDBAccountName = *(*accts.Value)[0].Name p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey return nil } func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error { zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer) zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } if len(zs) != 1 { return fmt.Errorf("found %d zones, expected 1", len(zs)) } p.domain = *zs[0].Name return nil } func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error { vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer) vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } for _, v := range vs { if v.Tags[generator.KeyVaultTagName] != nil { switch *v.Tags[generator.KeyVaultTagName] { case generator.ClustersKeyVaultTagValue: p.clustersKeyvaultURI = *v.Properties.VaultURI case generator.ServiceKeyVaultTagValue: p.serviceKeyvaultURI = *v.Properties.VaultURI } } } if p.clustersKeyvaultURI == "" { return fmt.Errorf("clusters key vault not found") } if p.serviceKeyvaultURI == "" { return fmt.Errorf("service key vault not found") } return nil } func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error { c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer) skus, err := c.List(ctx, "") if err != nil { return err } p.zones = map[string][]string{} for _, sku := range skus { if !strings.EqualFold((*sku.Locations)[0], p.Location()) || *sku.ResourceType != "virtualMachines" { continue } p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones } return nil } func (p *prod) ClustersGenevaLoggingConfigVersion() string { return p.clustersGenevaLoggingConfigVersion } func (p *prod) ClustersGenevaLoggingEnvironment() string { return p.clustersGenevaLoggingEnvironment } func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) { return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate } func (p *prod) ClustersKeyvaultURI() string { return p.clustersKeyvaultURI } func (p *prod) CosmosDB() (string, string) { return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey } func (p *prod) DatabaseName() string { return "ARO" } func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext(ctx, network, address) } func (p *prod) Domain() string { return p.domain } func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) { oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) if err != nil { return nil, err } sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource) if err != nil { return nil, err } return refreshable.NewAuthorizer(sp), nil } func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "") if err != nil { return nil, nil, err } key, certs, err := pem.Parse([]byte(*bundle.Value)) if err != nil { return nil, nil, err } if key == nil { return nil, nil, fmt.Errorf("no private key found") } if len(certs) == 0 { return nil, nil, fmt.Errorf("no certificate found") } return key, certs, nil } func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "") if err != nil { return nil, err } return base64.StdEncoding.DecodeString(*bundle.Value) } func (p *prod) Listen() (net.Listener, error) { return net.Listen("tcp", ":8443") } // ManagedDomain returns the fully qualified domain of a cluster if we manage // it. If we don't, it returns the empty string. We manage only domains of the // form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of // the former). func (p *prod) ManagedDomain(domain string) (string, error) { if domain == "" || strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") { // belt and braces: validation should already prevent this return "", fmt.Errorf("invalid domain %q", domain) } domain = strings.TrimSuffix(domain, "."+p.Domain()) if strings.ContainsRune(domain, '.') { return "", nil } return domain + "." + p.Domain(), nil } func (p *prod) MetricsSocketPath() string { return "/var/etw/mdm_statsd.socket" } func (p *prod) Zones(vmSize string) ([]string, error) { zones, found := p.zones[vmSize] if !found { return nil, fmt.Errorf("zone information not found for vm size %q", vmSize) } return zones, nil } func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error
{ // ARM ResourceGroup role assignments are not required in production. return nil }
identifier_body
prod.go
P/pkg/util/pem" "github.com/Azure/ARO-RP/pkg/util/refreshable" "github.com/Azure/ARO-RP/pkg/util/version" ) type prod struct { instancemetadata.InstanceMetadata armClientAuthorizer clientauthorizer.ClientAuthorizer adminClientAuthorizer clientauthorizer.ClientAuthorizer keyvault basekeyvault.BaseClient acrName string clustersKeyvaultURI string cosmosDBAccountName string cosmosDBPrimaryMasterKey string domain string serviceKeyvaultURI string zones map[string][]string fpCertificate *x509.Certificate fpPrivateKey *rsa.PrivateKey fpServicePrincipalID string clustersGenevaLoggingCertificate *x509.Certificate clustersGenevaLoggingPrivateKey *rsa.PrivateKey clustersGenevaLoggingConfigVersion string clustersGenevaLoggingEnvironment string e2eStorageAccountName string e2eStorageAccountRGName string e2eStorageAccountSubID string log *logrus.Entry envType environmentType } func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) { p := &prod{ InstanceMetadata: instancemetadata, keyvault: basekeyvault.New(kvAuthorizer), clustersGenevaLoggingEnvironment: "DiagnosticsProd", clustersGenevaLoggingConfigVersion: "2.2", log: log, envType: environmentTypeProduction, } err := p.populateCosmosDB(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateDomain(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateVaultURIs(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateZones(ctx, rpAuthorizer) if err != nil { return nil, err } fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName) if err != nil { return nil, err } p.fpPrivateKey = fpPrivateKey p.fpCertificate = fpCertificates[0] p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875" clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName) if err != nil { return nil, err } p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0] p.e2eStorageAccountName = "arov4e2e" p.e2eStorageAccountRGName = "global" p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4" if p.ACRResourceID() != "" { // TODO: ugh! acrResource, err := azure.ParseResourceID(p.ACRResourceID()) if err != nil { return nil, err } p.acrName = acrResource.ResourceName } else { p.acrName = "arointsvc" } return p, nil } func (p *prod) InitializeAuthorizers() error { p.armClientAuthorizer = clientauthorizer.NewARM(p.log) adminClientAuthorizer, err := clientauthorizer.NewAdmin( p.log, "/etc/aro-rp/admin-ca-bundle.pem", os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"), ) if err != nil { return err } p.adminClientAuthorizer = adminClientAuthorizer return nil } func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.armClientAuthorizer } func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.adminClientAuthorizer } func (p *prod) ACRResourceID() string { return os.Getenv("ACR_RESOURCE_ID") } func (p *prod) ACRName() string { return p.acrName } func (p *prod) AROOperatorImage() string { return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit) } func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error { databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer) accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup()) if err != nil { return err } if len(*accts.Value) != 1
keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name) if err != nil { return err } p.cosmosDBAccountName = *(*accts.Value)[0].Name p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey return nil } func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error { zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer) zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } if len(zs) != 1 { return fmt.Errorf("found %d zones, expected 1", len(zs)) } p.domain = *zs[0].Name return nil } func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error { vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer) vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } for _, v := range vs { if v.Tags[generator.KeyVaultTagName] != nil { switch *v.Tags[generator.KeyVaultTagName] { case generator.ClustersKeyVaultTagValue: p.clustersKeyvaultURI = *v.Properties.VaultURI case generator.ServiceKeyVaultTagValue: p.serviceKeyvaultURI = *v.Properties.VaultURI } } } if p.clustersKeyvaultURI == "" { return fmt.Errorf("clusters key vault not found") } if p.serviceKeyvaultURI == "" { return fmt.Errorf("service key vault not found") } return nil } func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error { c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer) skus, err := c.List(ctx, "") if err != nil { return err } p.zones = map[string][]string{} for _, sku := range skus { if !strings.EqualFold((*sku.Locations)[0], p.Location()) || *sku.ResourceType != "virtualMachines" { continue } p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones } return nil } func (p *prod) ClustersGenevaLoggingConfigVersion() string { return p.clustersGenevaLoggingConfigVersion } func (p *prod) ClustersGenevaLoggingEnvironment() string { return p.clustersGenevaLoggingEnvironment } func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) { return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate } func (p *prod) ClustersKeyvaultURI() string { return p.clustersKeyvaultURI } func (p *prod) CosmosDB() (string, string) { return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey } func (p *prod) DatabaseName() string { return "ARO" } func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext(ctx, network, address) } func (p *prod) Domain() string { return p.domain } func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) { oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) if err != nil { return nil, err } sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource) if err != nil { return nil, err } return refreshable.NewAuthorizer(sp), nil } func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName,
{ return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value)) }
conditional_block
prod.go
-RP/pkg/util/pem" "github.com/Azure/ARO-RP/pkg/util/refreshable" "github.com/Azure/ARO-RP/pkg/util/version" ) type prod struct { instancemetadata.InstanceMetadata armClientAuthorizer clientauthorizer.ClientAuthorizer adminClientAuthorizer clientauthorizer.ClientAuthorizer keyvault basekeyvault.BaseClient acrName string clustersKeyvaultURI string cosmosDBAccountName string cosmosDBPrimaryMasterKey string domain string serviceKeyvaultURI string zones map[string][]string fpCertificate *x509.Certificate fpPrivateKey *rsa.PrivateKey fpServicePrincipalID string clustersGenevaLoggingCertificate *x509.Certificate clustersGenevaLoggingPrivateKey *rsa.PrivateKey clustersGenevaLoggingConfigVersion string clustersGenevaLoggingEnvironment string e2eStorageAccountName string e2eStorageAccountRGName string e2eStorageAccountSubID string log *logrus.Entry envType environmentType } func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) { p := &prod{ InstanceMetadata: instancemetadata, keyvault: basekeyvault.New(kvAuthorizer), clustersGenevaLoggingEnvironment: "DiagnosticsProd", clustersGenevaLoggingConfigVersion: "2.2", log: log, envType: environmentTypeProduction, } err := p.populateCosmosDB(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateDomain(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateVaultURIs(ctx, rpAuthorizer) if err != nil { return nil, err } err = p.populateZones(ctx, rpAuthorizer) if err != nil { return nil, err } fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName) if err != nil { return nil, err } p.fpPrivateKey = fpPrivateKey p.fpCertificate = fpCertificates[0] p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875" clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName) if err != nil { return nil, err } p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0] p.e2eStorageAccountName = "arov4e2e" p.e2eStorageAccountRGName = "global" p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4" if p.ACRResourceID() != "" { // TODO: ugh! acrResource, err := azure.ParseResourceID(p.ACRResourceID()) if err != nil { return nil, err } p.acrName = acrResource.ResourceName } else { p.acrName = "arointsvc" } return p, nil } func (p *prod) InitializeAuthorizers() error { p.armClientAuthorizer = clientauthorizer.NewARM(p.log) adminClientAuthorizer, err := clientauthorizer.NewAdmin( p.log, "/etc/aro-rp/admin-ca-bundle.pem", os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"), ) if err != nil { return err } p.adminClientAuthorizer = adminClientAuthorizer return nil } func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.armClientAuthorizer } func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.adminClientAuthorizer }
func (p *prod) ACRResourceID() string { return os.Getenv("ACR_RESOURCE_ID") } func (p *prod) ACRName() string { return p.acrName } func (p *prod) AROOperatorImage() string { return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit) } func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error { databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer) accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup()) if err != nil { return err } if len(*accts.Value) != 1 { return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value)) } keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name) if err != nil { return err } p.cosmosDBAccountName = *(*accts.Value)[0].Name p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey return nil } func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error { zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer) zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } if len(zs) != 1 { return fmt.Errorf("found %d zones, expected 1", len(zs)) } p.domain = *zs[0].Name return nil } func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error { vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer) vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } for _, v := range vs { if v.Tags[generator.KeyVaultTagName] != nil { switch *v.Tags[generator.KeyVaultTagName] { case generator.ClustersKeyVaultTagValue: p.clustersKeyvaultURI = *v.Properties.VaultURI case generator.ServiceKeyVaultTagValue: p.serviceKeyvaultURI = *v.Properties.VaultURI } } } if p.clustersKeyvaultURI == "" { return fmt.Errorf("clusters key vault not found") } if p.serviceKeyvaultURI == "" { return fmt.Errorf("service key vault not found") } return nil } func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error { c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer) skus, err := c.List(ctx, "") if err != nil { return err } p.zones = map[string][]string{} for _, sku := range skus { if !strings.EqualFold((*sku.Locations)[0], p.Location()) || *sku.ResourceType != "virtualMachines" { continue } p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones } return nil } func (p *prod) ClustersGenevaLoggingConfigVersion() string { return p.clustersGenevaLoggingConfigVersion } func (p *prod) ClustersGenevaLoggingEnvironment() string { return p.clustersGenevaLoggingEnvironment } func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) { return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate } func (p *prod) ClustersKeyvaultURI() string { return p.clustersKeyvaultURI } func (p *prod) CosmosDB() (string, string) { return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey } func (p *prod) DatabaseName() string { return "ARO" } func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext(ctx, network, address) } func (p *prod) Domain() string { return p.domain } func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) { oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) if err != nil { return nil, err } sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource) if err != nil { return nil, err } return refreshable.NewAuthorizer(sp), nil } func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
random_line_split
prod.go
GenevaLoggingPrivateKey p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0] p.e2eStorageAccountName = "arov4e2e" p.e2eStorageAccountRGName = "global" p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4" if p.ACRResourceID() != "" { // TODO: ugh! acrResource, err := azure.ParseResourceID(p.ACRResourceID()) if err != nil { return nil, err } p.acrName = acrResource.ResourceName } else { p.acrName = "arointsvc" } return p, nil } func (p *prod) InitializeAuthorizers() error { p.armClientAuthorizer = clientauthorizer.NewARM(p.log) adminClientAuthorizer, err := clientauthorizer.NewAdmin( p.log, "/etc/aro-rp/admin-ca-bundle.pem", os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"), ) if err != nil { return err } p.adminClientAuthorizer = adminClientAuthorizer return nil } func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.armClientAuthorizer } func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer { return p.adminClientAuthorizer } func (p *prod) ACRResourceID() string { return os.Getenv("ACR_RESOURCE_ID") } func (p *prod) ACRName() string { return p.acrName } func (p *prod) AROOperatorImage() string { return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit) } func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error { databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer) accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup()) if err != nil { return err } if len(*accts.Value) != 1 { return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value)) } keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name) if err != nil { return err } p.cosmosDBAccountName = *(*accts.Value)[0].Name p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey return nil } func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error { zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer) zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } if len(zs) != 1 { return fmt.Errorf("found %d zones, expected 1", len(zs)) } p.domain = *zs[0].Name return nil } func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error { vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer) vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil) if err != nil { return err } for _, v := range vs { if v.Tags[generator.KeyVaultTagName] != nil { switch *v.Tags[generator.KeyVaultTagName] { case generator.ClustersKeyVaultTagValue: p.clustersKeyvaultURI = *v.Properties.VaultURI case generator.ServiceKeyVaultTagValue: p.serviceKeyvaultURI = *v.Properties.VaultURI } } } if p.clustersKeyvaultURI == "" { return fmt.Errorf("clusters key vault not found") } if p.serviceKeyvaultURI == "" { return fmt.Errorf("service key vault not found") } return nil } func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error { c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer) skus, err := c.List(ctx, "") if err != nil { return err } p.zones = map[string][]string{} for _, sku := range skus { if !strings.EqualFold((*sku.Locations)[0], p.Location()) || *sku.ResourceType != "virtualMachines" { continue } p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones } return nil } func (p *prod) ClustersGenevaLoggingConfigVersion() string { return p.clustersGenevaLoggingConfigVersion } func (p *prod) ClustersGenevaLoggingEnvironment() string { return p.clustersGenevaLoggingEnvironment } func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) { return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate } func (p *prod) ClustersKeyvaultURI() string { return p.clustersKeyvaultURI } func (p *prod) CosmosDB() (string, string) { return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey } func (p *prod) DatabaseName() string { return "ARO" } func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext(ctx, network, address) } func (p *prod) Domain() string { return p.domain } func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) { oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) if err != nil { return nil, err } sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource) if err != nil { return nil, err } return refreshable.NewAuthorizer(sp), nil } func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "") if err != nil { return nil, nil, err } key, certs, err := pem.Parse([]byte(*bundle.Value)) if err != nil { return nil, nil, err } if key == nil { return nil, nil, fmt.Errorf("no private key found") } if len(certs) == 0 { return nil, nil, fmt.Errorf("no certificate found") } return key, certs, nil } func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) { bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "") if err != nil { return nil, err } return base64.StdEncoding.DecodeString(*bundle.Value) } func (p *prod) Listen() (net.Listener, error) { return net.Listen("tcp", ":8443") } // ManagedDomain returns the fully qualified domain of a cluster if we manage // it. If we don't, it returns the empty string. We manage only domains of the // form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of // the former). func (p *prod) ManagedDomain(domain string) (string, error) { if domain == "" || strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") { // belt and braces: validation should already prevent this return "", fmt.Errorf("invalid domain %q", domain) } domain = strings.TrimSuffix(domain, "."+p.Domain()) if strings.ContainsRune(domain, '.') { return "", nil } return domain + "." + p.Domain(), nil } func (p *prod) MetricsSocketPath() string { return "/var/etw/mdm_statsd.socket" } func (p *prod) Zones(vmSize string) ([]string, error) { zones, found := p.zones[vmSize] if !found { return nil, fmt.Errorf("zone information not found for vm size %q", vmSize) } return zones, nil } func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error { // ARM ResourceGroup role assignments are not required in production. return nil } func (p *prod) E2EStorageAccountName() string { return p.e2eStorageAccountName } func (p *prod) E2EStorageAccountRGName() string { return p.e2eStorageAccountRGName } func (p *prod) E2EStorageAccountSubID() string { return p.e2eStorageAccountSubID } func (p *prod)
ShouldDeployDenyAssignment
identifier_name
pathy.go
. N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.") os.Exit(0) } readNextArg() // Skip program name modeString := readNextArg() var p PathyParameters // Read command-line arguments switch (strings.ToLower(modeString)) { case "draw": p = getDrawModeParameters() case "single": p = getSingleModeParameters() case "multiple": p = getMultipleModeParameters() default: fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString) os.Exit(1) } // Check some of the arguments if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 { fmt.Println("Scale must be a positive integer.") os.Exit(1) } if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 { fmt.Println("N must be a positive integer.") os.Exit(1) } if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 { fmt.Println("Trials must be a positive integer.") os.Exit(1) } // Run the appropriate mode switch (p.Mode) { case Draw: runDrawMode(p) case BenchSingle, BenchAndDrawSingle: runSingleMode(p) case BenchMultiple, BenchAndDrawMultiple: runMultipleMode(p) default: panic("Assertion failed: unexpected mode") } fmt.Println("Success") } func getDrawModeParameters() PathyParameters { if len(os.Args) != 5 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.Mode = Draw p.InPath = readNextArg() p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) return p } func getSingleModeParameters() PathyParameters { if len(os.Args) != 9 && len(os.Args) != 11 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.StartX = MustParseInt(readNextArg()) p.StartY = MustParseInt(readNextArg()) p.GoalX = MustParseInt(readNextArg()) p.GoalY = MustParseInt(readNextArg()) p.Algo = MustParsePathfindingFunction(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 11 { p.Mode = BenchAndDrawSingle p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchSingle } return p } func getMultipleModeParameters() PathyParameters { if len(os.Args) != 6 && len(os.Args) != 8 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.Algo = MustParsePathfindingFunction(readNextArg()) p.N = MustParseInt(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 8
else { p.Mode = BenchMultiple } return p } func runDrawMode(p PathyParameters) { if p.Mode != Draw { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } img := MakeMapImage(p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } func runSingleMode(p PathyParameters) { if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } start := NewNode(p.StartX, p.StartY) goal := NewNode(p.GoalX, p.GoalY) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) if p.Mode == BenchAndDrawSingle { img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } } func runMultipleMode(p PathyParameters) { if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple { panic("Assertion failed: unexpected mode") } // Load scenarios scenarios, err := LoadScenarios(p.InPath) if err != nil { fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } // Load map mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName) grid, err = LoadMap(mapPath) if err != nil { fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error()) os.Exit(1) } // If needed, create an output directory for images if p.Mode == BenchAndDrawMultiple { // Create the output directory if it doesn't exist _, err := os.Stat(p.OutPath) if os.IsNotExist(err) { err = os.Mkdir(p.OutPath, os.ModeDir) if err != nil { fmt.Printf("Error creating output directory: %s\n", err.Error()) os.Exit(1) } } } // Select n evenly spread out scenarios selectedScenarios := []Scenario{} var inc float64 if p.N >= len(scenarios) { inc = 1 p.N = len(scenarios) } else { inc = float64(len(scenarios)-1) / float64(p.N-1) } for i := 0.0; i < float64(len(scenarios)); i += inc { index := int(i) selectedScenarios = append(selectedScenarios, scenarios[index]) } // Assertion if len(selectedScenarios) != p.N { panic("Assertion failed: unexpected number of selected scenarios") } // Benchmark and draw scenarios sumTurnCount := 0.0 sumPathLen := 0.0 sumAvgAngle := 0.0 sumAvgRuntime := 0 for _, scenario := range selectedScenarios { // Assertion if scenario.MapName != scenarios[0].MapName { panic("Assertion failed: scenarios file referred to multiple map files") } sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y start := NewNode(sx,sy) goal := NewNode(gx,gy) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) sumTurnCount += float64(turns) sumPathLen += pathLen sumAvgAngle += avgAngle sumAvgRuntime += avgRuntime if p.Mode == BenchAndDrawMultiple { // Create a nice name for the image ext := filepath.Ext(scenario.MapName) fname := scenario.MapName[0:len(scenario.MapName)-len(ext)] fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy) out := filepath.Join(p.OutPath, fname) img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err
{ p.Mode = BenchAndDrawMultiple p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) }
conditional_block
pathy.go
. N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.") os.Exit(0) } readNextArg() // Skip program name modeString := readNextArg() var p PathyParameters // Read command-line arguments switch (strings.ToLower(modeString)) { case "draw": p = getDrawModeParameters() case "single": p = getSingleModeParameters() case "multiple": p = getMultipleModeParameters() default: fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString) os.Exit(1) } // Check some of the arguments if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 { fmt.Println("Scale must be a positive integer.") os.Exit(1) } if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 { fmt.Println("N must be a positive integer.") os.Exit(1) } if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 { fmt.Println("Trials must be a positive integer.") os.Exit(1) } // Run the appropriate mode switch (p.Mode) { case Draw: runDrawMode(p) case BenchSingle, BenchAndDrawSingle: runSingleMode(p) case BenchMultiple, BenchAndDrawMultiple: runMultipleMode(p) default: panic("Assertion failed: unexpected mode") } fmt.Println("Success") } func
() PathyParameters { if len(os.Args) != 5 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.Mode = Draw p.InPath = readNextArg() p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) return p } func getSingleModeParameters() PathyParameters { if len(os.Args) != 9 && len(os.Args) != 11 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.StartX = MustParseInt(readNextArg()) p.StartY = MustParseInt(readNextArg()) p.GoalX = MustParseInt(readNextArg()) p.GoalY = MustParseInt(readNextArg()) p.Algo = MustParsePathfindingFunction(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 11 { p.Mode = BenchAndDrawSingle p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchSingle } return p } func getMultipleModeParameters() PathyParameters { if len(os.Args) != 6 && len(os.Args) != 8 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.Algo = MustParsePathfindingFunction(readNextArg()) p.N = MustParseInt(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 8 { p.Mode = BenchAndDrawMultiple p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchMultiple } return p } func runDrawMode(p PathyParameters) { if p.Mode != Draw { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } img := MakeMapImage(p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } func runSingleMode(p PathyParameters) { if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } start := NewNode(p.StartX, p.StartY) goal := NewNode(p.GoalX, p.GoalY) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) if p.Mode == BenchAndDrawSingle { img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } } func runMultipleMode(p PathyParameters) { if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple { panic("Assertion failed: unexpected mode") } // Load scenarios scenarios, err := LoadScenarios(p.InPath) if err != nil { fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } // Load map mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName) grid, err = LoadMap(mapPath) if err != nil { fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error()) os.Exit(1) } // If needed, create an output directory for images if p.Mode == BenchAndDrawMultiple { // Create the output directory if it doesn't exist _, err := os.Stat(p.OutPath) if os.IsNotExist(err) { err = os.Mkdir(p.OutPath, os.ModeDir) if err != nil { fmt.Printf("Error creating output directory: %s\n", err.Error()) os.Exit(1) } } } // Select n evenly spread out scenarios selectedScenarios := []Scenario{} var inc float64 if p.N >= len(scenarios) { inc = 1 p.N = len(scenarios) } else { inc = float64(len(scenarios)-1) / float64(p.N-1) } for i := 0.0; i < float64(len(scenarios)); i += inc { index := int(i) selectedScenarios = append(selectedScenarios, scenarios[index]) } // Assertion if len(selectedScenarios) != p.N { panic("Assertion failed: unexpected number of selected scenarios") } // Benchmark and draw scenarios sumTurnCount := 0.0 sumPathLen := 0.0 sumAvgAngle := 0.0 sumAvgRuntime := 0 for _, scenario := range selectedScenarios { // Assertion if scenario.MapName != scenarios[0].MapName { panic("Assertion failed: scenarios file referred to multiple map files") } sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y start := NewNode(sx,sy) goal := NewNode(gx,gy) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) sumTurnCount += float64(turns) sumPathLen += pathLen sumAvgAngle += avgAngle sumAvgRuntime += avgRuntime if p.Mode == BenchAndDrawMultiple { // Create a nice name for the image ext := filepath.Ext(scenario.MapName) fname := scenario.MapName[0:len(scenario.MapName)-len(ext)] fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy) out := filepath.Join(p.OutPath, fname) img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err
getDrawModeParameters
identifier_name
pathy.go
. N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.") os.Exit(0) } readNextArg() // Skip program name modeString := readNextArg() var p PathyParameters // Read command-line arguments switch (strings.ToLower(modeString)) { case "draw": p = getDrawModeParameters() case "single": p = getSingleModeParameters() case "multiple": p = getMultipleModeParameters() default: fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString) os.Exit(1) } // Check some of the arguments if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 { fmt.Println("Scale must be a positive integer.") os.Exit(1) } if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 { fmt.Println("N must be a positive integer.") os.Exit(1) } if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 { fmt.Println("Trials must be a positive integer.") os.Exit(1) } // Run the appropriate mode switch (p.Mode) { case Draw: runDrawMode(p) case BenchSingle, BenchAndDrawSingle: runSingleMode(p) case BenchMultiple, BenchAndDrawMultiple: runMultipleMode(p) default: panic("Assertion failed: unexpected mode") } fmt.Println("Success") } func getDrawModeParameters() PathyParameters { if len(os.Args) != 5 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.Mode = Draw p.InPath = readNextArg() p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) return p } func getSingleModeParameters() PathyParameters { if len(os.Args) != 9 && len(os.Args) != 11 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.StartX = MustParseInt(readNextArg()) p.StartY = MustParseInt(readNextArg()) p.GoalX = MustParseInt(readNextArg()) p.GoalY = MustParseInt(readNextArg()) p.Algo = MustParsePathfindingFunction(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 11 { p.Mode = BenchAndDrawSingle p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchSingle } return p } func getMultipleModeParameters() PathyParameters { if len(os.Args) != 6 && len(os.Args) != 8 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.Algo = MustParsePathfindingFunction(readNextArg()) p.N = MustParseInt(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 8 { p.Mode = BenchAndDrawMultiple p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchMultiple } return p } func runDrawMode(p PathyParameters) { if p.Mode != Draw { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } img := MakeMapImage(p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } func runSingleMode(p PathyParameters) { if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } start := NewNode(p.StartX, p.StartY) goal := NewNode(p.GoalX, p.GoalY) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
img = DrawPath(img, path, p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } } func runMultipleMode(p PathyParameters) { if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple { panic("Assertion failed: unexpected mode") } // Load scenarios scenarios, err := LoadScenarios(p.InPath) if err != nil { fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } // Load map mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName) grid, err = LoadMap(mapPath) if err != nil { fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error()) os.Exit(1) } // If needed, create an output directory for images if p.Mode == BenchAndDrawMultiple { // Create the output directory if it doesn't exist _, err := os.Stat(p.OutPath) if os.IsNotExist(err) { err = os.Mkdir(p.OutPath, os.ModeDir) if err != nil { fmt.Printf("Error creating output directory: %s\n", err.Error()) os.Exit(1) } } } // Select n evenly spread out scenarios selectedScenarios := []Scenario{} var inc float64 if p.N >= len(scenarios) { inc = 1 p.N = len(scenarios) } else { inc = float64(len(scenarios)-1) / float64(p.N-1) } for i := 0.0; i < float64(len(scenarios)); i += inc { index := int(i) selectedScenarios = append(selectedScenarios, scenarios[index]) } // Assertion if len(selectedScenarios) != p.N { panic("Assertion failed: unexpected number of selected scenarios") } // Benchmark and draw scenarios sumTurnCount := 0.0 sumPathLen := 0.0 sumAvgAngle := 0.0 sumAvgRuntime := 0 for _, scenario := range selectedScenarios { // Assertion if scenario.MapName != scenarios[0].MapName { panic("Assertion failed: scenarios file referred to multiple map files") } sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y start := NewNode(sx,sy) goal := NewNode(gx,gy) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) sumTurnCount += float64(turns) sumPathLen += pathLen sumAvgAngle += avgAngle sumAvgRuntime += avgRuntime if p.Mode == BenchAndDrawMultiple { // Create a nice name for the image ext := filepath.Ext(scenario.MapName) fname := scenario.MapName[0:len(scenario.MapName)-len(ext)] fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy) out := filepath.Join(p.OutPath, fname) img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err
if p.Mode == BenchAndDrawSingle { img := MakeMapImage(p.Scale)
random_line_split
pathy.go
. N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.") os.Exit(0) } readNextArg() // Skip program name modeString := readNextArg() var p PathyParameters // Read command-line arguments switch (strings.ToLower(modeString)) { case "draw": p = getDrawModeParameters() case "single": p = getSingleModeParameters() case "multiple": p = getMultipleModeParameters() default: fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString) os.Exit(1) } // Check some of the arguments if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 { fmt.Println("Scale must be a positive integer.") os.Exit(1) } if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 { fmt.Println("N must be a positive integer.") os.Exit(1) } if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 { fmt.Println("Trials must be a positive integer.") os.Exit(1) } // Run the appropriate mode switch (p.Mode) { case Draw: runDrawMode(p) case BenchSingle, BenchAndDrawSingle: runSingleMode(p) case BenchMultiple, BenchAndDrawMultiple: runMultipleMode(p) default: panic("Assertion failed: unexpected mode") } fmt.Println("Success") } func getDrawModeParameters() PathyParameters { if len(os.Args) != 5 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.Mode = Draw p.InPath = readNextArg() p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) return p } func getSingleModeParameters() PathyParameters { if len(os.Args) != 9 && len(os.Args) != 11 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.StartX = MustParseInt(readNextArg()) p.StartY = MustParseInt(readNextArg()) p.GoalX = MustParseInt(readNextArg()) p.GoalY = MustParseInt(readNextArg()) p.Algo = MustParsePathfindingFunction(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 11 { p.Mode = BenchAndDrawSingle p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchSingle } return p } func getMultipleModeParameters() PathyParameters { if len(os.Args) != 6 && len(os.Args) != 8 { fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0]) os.Exit(1) } p := PathyParameters{} p.InPath = readNextArg() p.Algo = MustParsePathfindingFunction(readNextArg()) p.N = MustParseInt(readNextArg()) p.Trials = MustParseInt(readNextArg()) if len(os.Args) == 8 { p.Mode = BenchAndDrawMultiple p.OutPath = readNextArg() p.Scale = MustParseInt(readNextArg()) } else { p.Mode = BenchMultiple } return p } func runDrawMode(p PathyParameters) { if p.Mode != Draw { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } img := MakeMapImage(p.Scale) err = SaveImage(img, p.OutPath) if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } func runSingleMode(p PathyParameters)
if err != nil { fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error()) os.Exit(1) } } } func runMultipleMode(p PathyParameters) { if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple { panic("Assertion failed: unexpected mode") } // Load scenarios scenarios, err := LoadScenarios(p.InPath) if err != nil { fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } // Load map mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName) grid, err = LoadMap(mapPath) if err != nil { fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error()) os.Exit(1) } // If needed, create an output directory for images if p.Mode == BenchAndDrawMultiple { // Create the output directory if it doesn't exist _, err := os.Stat(p.OutPath) if os.IsNotExist(err) { err = os.Mkdir(p.OutPath, os.ModeDir) if err != nil { fmt.Printf("Error creating output directory: %s\n", err.Error()) os.Exit(1) } } } // Select n evenly spread out scenarios selectedScenarios := []Scenario{} var inc float64 if p.N >= len(scenarios) { inc = 1 p.N = len(scenarios) } else { inc = float64(len(scenarios)-1) / float64(p.N-1) } for i := 0.0; i < float64(len(scenarios)); i += inc { index := int(i) selectedScenarios = append(selectedScenarios, scenarios[index]) } // Assertion if len(selectedScenarios) != p.N { panic("Assertion failed: unexpected number of selected scenarios") } // Benchmark and draw scenarios sumTurnCount := 0.0 sumPathLen := 0.0 sumAvgAngle := 0.0 sumAvgRuntime := 0 for _, scenario := range selectedScenarios { // Assertion if scenario.MapName != scenarios[0].MapName { panic("Assertion failed: scenarios file referred to multiple map files") } sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y start := NewNode(sx,sy) goal := NewNode(gx,gy) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) sumTurnCount += float64(turns) sumPathLen += pathLen sumAvgAngle += avgAngle sumAvgRuntime += avgRuntime if p.Mode == BenchAndDrawMultiple { // Create a nice name for the image ext := filepath.Ext(scenario.MapName) fname := scenario.MapName[0:len(scenario.MapName)-len(ext)] fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy) out := filepath.Join(p.OutPath, fname) img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err
{ if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle { panic("Assertion failed: unexpected mode") } var err error grid, err = LoadMap(p.InPath) if err != nil { fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error()) os.Exit(1) } start := NewNode(p.StartX, p.StartY) goal := NewNode(p.GoalX, p.GoalY) path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials) fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) if p.Mode == BenchAndDrawSingle { img := MakeMapImage(p.Scale) img = DrawPath(img, path, p.Scale) err = SaveImage(img, p.OutPath)
identifier_body
dac.rs
to avoid losing any //! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA //! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the //! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes, //! software then has exactly one batch duration to fill the next buffer before its //! transfer begins. If software does not meet this deadline, old data will be repeatedly generated //! on the output and output will be shifted by one batch. //! //! ## Multiple Samples to Single DAC Codes //! //! For some applications, it may be desirable to generate a single DAC code from multiple ADC //! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs, //! applications are required to generate one DAC code for each ADC sample. To accomodate mapping //! multiple inputs to a single output, the output code can be repeated a number of times in the //! output buffer corresponding with the number of input samples that were used to generate it. //! //! //! # Note //! //! There is a very small amount of latency between updating the two DACs due to bus matrix //! priority. As such, one of the DACs will be updated marginally earlier before the other because //! the DMA requests are generated simultaneously. This can be avoided by providing a known offset //! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a //! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of //! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the //! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the //! DMA channels to arbitrate which transfer occurs first. //! //! //! # Limitations //! //! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check //! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is //! served promptly after the transfer completes. use stm32h7xx_hal as hal; use mutex_trait::Mutex; use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE}; use super::timers; use core::convert::TryFrom; use hal::{ dma::{ dma::{DMAReq, DmaConfig}, traits::TargetAddress, DMAError, MemoryToPeripheral, Transfer, }, spi::{HalDisabledSpi, HalEnabledSpi, HalSpi}, }; // The following global buffers are used for the DAC code DMA transfers. Two buffers are used for // each transfer in a ping-pong buffer configuration (one is being prepared while the other is being // processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on // startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`. #[link_section = ".axisram.buffers"] static mut DAC_BUF: [[SampleBuffer; 2]; 2] = [[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2]; /// Custom type for referencing DAC output codes. /// The internal integer is the raw code written to the DAC output register. #[derive(Copy, Clone)] pub struct DacCode(pub u16); impl DacCode { // The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096 // V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5. pub const FULL_SCALE: f32 = 4.096 * 2.5; pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32; pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB; } impl TryFrom<f32> for DacCode { type Error = (); fn try_from(voltage: f32) -> Result<DacCode, ()> { let code = voltage * Self::LSB_PER_VOLT; if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) { Err(()) } else { Ok(DacCode::from(code as i16)) } } } impl From<DacCode> for f32 { fn from(code: DacCode) -> f32 { i16::from(code) as f32 * DacCode::VOLT_PER_LSB } } impl From<DacCode> for i16 { fn from(code: DacCode) -> i16 { (code.0 as i16).wrapping_sub(i16::MIN) } } impl From<i16> for DacCode { /// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration. fn from(value: i16) -> Self { Self(value.wrapping_add(i16::MIN) as u16) } } impl From<u16> for DacCode { /// Create a dac code from the provided DAC output code. fn
(value: u16) -> Self { Self(value) } } macro_rules! dac_output { ($name:ident, $index:literal, $data_stream:ident, $spi:ident, $trigger_channel:ident, $dma_req:ident) => { /// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO struct $spi { spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, _channel: timers::tim2::$trigger_channel, } impl $spi { pub fn new( _channel: timers::tim2::$trigger_channel, spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, ) -> Self { Self { spi, _channel } } /// Start the SPI and begin operating in a DMA-driven transfer mode. pub fn start_dma(&mut self) { // Allow the SPI FIFOs to operate using only DMA data channels. self.spi.enable_dma_tx(); // Enable SPI and start it in infinite transaction mode. self.spi.inner().cr1.modify(|_, w| w.spe().set_bit()); self.spi.inner().cr1.modify(|_, w| w.cstart().started()); } } // Note(unsafe): This is safe because the DMA request line is logically owned by this module. // Additionally, the SPI is owned by this structure and is known to be configured for u16 word // sizes. unsafe impl TargetAddress<MemoryToPeripheral> for $spi { /// SPI is configured to operate using 16-bit transfer words. type MemSize = u16; /// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs. const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8); /// Whenever the DMA request occurs, it should write into SPI's TX FIFO. fn address(&self) -> usize { &self.spi.inner().txdr as *const _ as usize } } /// Represents data associated with DAC. pub struct $name { // Note: SPI TX functionality may not be used from this structure to ensure safety with DMA. transfer: Transfer< hal::dma::dma::$data_stream<hal::stm32::DMA1>, $spi, MemoryToPeripheral, &'static mut [u16], hal::dma::DBTransfer, >, } impl $name { /// Construct the DAC output channel. /// /// # Args /// * `spi` - The SPI interface used to communicate with the ADC. /// * `stream` - The DMA stream used to write DAC codes over SPI. /// * `trigger_channel` - The sampling timer output compare channel for update triggers. pub fn new( spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>, stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>, trigger_channel: timers::tim2::$trigger_channel, batch_size: usize, ) -> Self { // Generate DMA events when an output compare of the timer hitting zero (timer roll over) // occurs. trigger_channel.listen_dma(); trigger_channel.to_output_compare(4 + $index); // The stream constantly writes to the TX FIFO to write new update codes. let trigger_config = DmaConfig::default() .memory_increment(true) .double_buffer(true) .peripheral_increment(false); // Listen for any potential SPI error signals, which may indicate that we are not generating // update codes. let mut spi = spi.disable(); spi.listen(hal::spi::Event::Error); // AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output // here before starting the transfer . // Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them // elsewhere, so it is safe to access them here. for buf in unsafe { DAC_BUF[$index].iter_mut() } { for byte in buf.iter
from
identifier_name
dac.rs
avoid losing any //! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA //! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the //! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes, //! software then has exactly one batch duration to fill the next buffer before its //! transfer begins. If software does not meet this deadline, old data will be repeatedly generated //! on the output and output will be shifted by one batch. //! //! ## Multiple Samples to Single DAC Codes //! //! For some applications, it may be desirable to generate a single DAC code from multiple ADC //! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs, //! applications are required to generate one DAC code for each ADC sample. To accomodate mapping //! multiple inputs to a single output, the output code can be repeated a number of times in the //! output buffer corresponding with the number of input samples that were used to generate it. //! //! //! # Note //! //! There is a very small amount of latency between updating the two DACs due to bus matrix //! priority. As such, one of the DACs will be updated marginally earlier before the other because //! the DMA requests are generated simultaneously. This can be avoided by providing a known offset //! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a //! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of //! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the //! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the //! DMA channels to arbitrate which transfer occurs first. //! //! //! # Limitations //! //! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check //! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is //! served promptly after the transfer completes. use stm32h7xx_hal as hal; use mutex_trait::Mutex; use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE}; use super::timers; use core::convert::TryFrom; use hal::{ dma::{ dma::{DMAReq, DmaConfig}, traits::TargetAddress, DMAError, MemoryToPeripheral, Transfer, }, spi::{HalDisabledSpi, HalEnabledSpi, HalSpi}, }; // The following global buffers are used for the DAC code DMA transfers. Two buffers are used for // each transfer in a ping-pong buffer configuration (one is being prepared while the other is being // processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on // startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`. #[link_section = ".axisram.buffers"] static mut DAC_BUF: [[SampleBuffer; 2]; 2] = [[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2]; /// Custom type for referencing DAC output codes. /// The internal integer is the raw code written to the DAC output register. #[derive(Copy, Clone)] pub struct DacCode(pub u16); impl DacCode { // The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096 // V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5. pub const FULL_SCALE: f32 = 4.096 * 2.5; pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32; pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB; } impl TryFrom<f32> for DacCode { type Error = (); fn try_from(voltage: f32) -> Result<DacCode, ()> { let code = voltage * Self::LSB_PER_VOLT; if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) { Err(()) } else { Ok(DacCode::from(code as i16)) } } } impl From<DacCode> for f32 { fn from(code: DacCode) -> f32 { i16::from(code) as f32 * DacCode::VOLT_PER_LSB } } impl From<DacCode> for i16 { fn from(code: DacCode) -> i16 { (code.0 as i16).wrapping_sub(i16::MIN) } } impl From<i16> for DacCode { /// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration. fn from(value: i16) -> Self { Self(value.wrapping_add(i16::MIN) as u16) } } impl From<u16> for DacCode { /// Create a dac code from the provided DAC output code. fn from(value: u16) -> Self
} macro_rules! dac_output { ($name:ident, $index:literal, $data_stream:ident, $spi:ident, $trigger_channel:ident, $dma_req:ident) => { /// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO struct $spi { spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, _channel: timers::tim2::$trigger_channel, } impl $spi { pub fn new( _channel: timers::tim2::$trigger_channel, spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, ) -> Self { Self { spi, _channel } } /// Start the SPI and begin operating in a DMA-driven transfer mode. pub fn start_dma(&mut self) { // Allow the SPI FIFOs to operate using only DMA data channels. self.spi.enable_dma_tx(); // Enable SPI and start it in infinite transaction mode. self.spi.inner().cr1.modify(|_, w| w.spe().set_bit()); self.spi.inner().cr1.modify(|_, w| w.cstart().started()); } } // Note(unsafe): This is safe because the DMA request line is logically owned by this module. // Additionally, the SPI is owned by this structure and is known to be configured for u16 word // sizes. unsafe impl TargetAddress<MemoryToPeripheral> for $spi { /// SPI is configured to operate using 16-bit transfer words. type MemSize = u16; /// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs. const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8); /// Whenever the DMA request occurs, it should write into SPI's TX FIFO. fn address(&self) -> usize { &self.spi.inner().txdr as *const _ as usize } } /// Represents data associated with DAC. pub struct $name { // Note: SPI TX functionality may not be used from this structure to ensure safety with DMA. transfer: Transfer< hal::dma::dma::$data_stream<hal::stm32::DMA1>, $spi, MemoryToPeripheral, &'static mut [u16], hal::dma::DBTransfer, >, } impl $name { /// Construct the DAC output channel. /// /// # Args /// * `spi` - The SPI interface used to communicate with the ADC. /// * `stream` - The DMA stream used to write DAC codes over SPI. /// * `trigger_channel` - The sampling timer output compare channel for update triggers. pub fn new( spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>, stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>, trigger_channel: timers::tim2::$trigger_channel, batch_size: usize, ) -> Self { // Generate DMA events when an output compare of the timer hitting zero (timer roll over) // occurs. trigger_channel.listen_dma(); trigger_channel.to_output_compare(4 + $index); // The stream constantly writes to the TX FIFO to write new update codes. let trigger_config = DmaConfig::default() .memory_increment(true) .double_buffer(true) .peripheral_increment(false); // Listen for any potential SPI error signals, which may indicate that we are not generating // update codes. let mut spi = spi.disable(); spi.listen(hal::spi::Event::Error); // AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output // here before starting the transfer . // Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them // elsewhere, so it is safe to access them here. for buf in unsafe { DAC_BUF[$index].iter_mut() } { for byte in buf.iter
{ Self(value) }
identifier_body
dac.rs
avoid losing any //! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA //! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the //! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes, //! software then has exactly one batch duration to fill the next buffer before its //! transfer begins. If software does not meet this deadline, old data will be repeatedly generated //! on the output and output will be shifted by one batch. //! //! ## Multiple Samples to Single DAC Codes //! //! For some applications, it may be desirable to generate a single DAC code from multiple ADC //! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs, //! applications are required to generate one DAC code for each ADC sample. To accomodate mapping //! multiple inputs to a single output, the output code can be repeated a number of times in the //! output buffer corresponding with the number of input samples that were used to generate it. //! //! //! # Note //! //! There is a very small amount of latency between updating the two DACs due to bus matrix //! priority. As such, one of the DACs will be updated marginally earlier before the other because //! the DMA requests are generated simultaneously. This can be avoided by providing a known offset //! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a //! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of //! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the //! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the //! DMA channels to arbitrate which transfer occurs first. //! //! //! # Limitations //! //! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check //! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is //! served promptly after the transfer completes. use stm32h7xx_hal as hal; use mutex_trait::Mutex; use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE}; use super::timers; use core::convert::TryFrom; use hal::{ dma::{ dma::{DMAReq, DmaConfig}, traits::TargetAddress, DMAError, MemoryToPeripheral, Transfer, }, spi::{HalDisabledSpi, HalEnabledSpi, HalSpi}, }; // The following global buffers are used for the DAC code DMA transfers. Two buffers are used for // each transfer in a ping-pong buffer configuration (one is being prepared while the other is being // processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on // startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`. #[link_section = ".axisram.buffers"] static mut DAC_BUF: [[SampleBuffer; 2]; 2] = [[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2]; /// Custom type for referencing DAC output codes. /// The internal integer is the raw code written to the DAC output register. #[derive(Copy, Clone)] pub struct DacCode(pub u16); impl DacCode { // The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096 // V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5. pub const FULL_SCALE: f32 = 4.096 * 2.5; pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32; pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB; } impl TryFrom<f32> for DacCode { type Error = (); fn try_from(voltage: f32) -> Result<DacCode, ()> { let code = voltage * Self::LSB_PER_VOLT; if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) { Err(()) } else
} } impl From<DacCode> for f32 { fn from(code: DacCode) -> f32 { i16::from(code) as f32 * DacCode::VOLT_PER_LSB } } impl From<DacCode> for i16 { fn from(code: DacCode) -> i16 { (code.0 as i16).wrapping_sub(i16::MIN) } } impl From<i16> for DacCode { /// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration. fn from(value: i16) -> Self { Self(value.wrapping_add(i16::MIN) as u16) } } impl From<u16> for DacCode { /// Create a dac code from the provided DAC output code. fn from(value: u16) -> Self { Self(value) } } macro_rules! dac_output { ($name:ident, $index:literal, $data_stream:ident, $spi:ident, $trigger_channel:ident, $dma_req:ident) => { /// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO struct $spi { spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, _channel: timers::tim2::$trigger_channel, } impl $spi { pub fn new( _channel: timers::tim2::$trigger_channel, spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, ) -> Self { Self { spi, _channel } } /// Start the SPI and begin operating in a DMA-driven transfer mode. pub fn start_dma(&mut self) { // Allow the SPI FIFOs to operate using only DMA data channels. self.spi.enable_dma_tx(); // Enable SPI and start it in infinite transaction mode. self.spi.inner().cr1.modify(|_, w| w.spe().set_bit()); self.spi.inner().cr1.modify(|_, w| w.cstart().started()); } } // Note(unsafe): This is safe because the DMA request line is logically owned by this module. // Additionally, the SPI is owned by this structure and is known to be configured for u16 word // sizes. unsafe impl TargetAddress<MemoryToPeripheral> for $spi { /// SPI is configured to operate using 16-bit transfer words. type MemSize = u16; /// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs. const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8); /// Whenever the DMA request occurs, it should write into SPI's TX FIFO. fn address(&self) -> usize { &self.spi.inner().txdr as *const _ as usize } } /// Represents data associated with DAC. pub struct $name { // Note: SPI TX functionality may not be used from this structure to ensure safety with DMA. transfer: Transfer< hal::dma::dma::$data_stream<hal::stm32::DMA1>, $spi, MemoryToPeripheral, &'static mut [u16], hal::dma::DBTransfer, >, } impl $name { /// Construct the DAC output channel. /// /// # Args /// * `spi` - The SPI interface used to communicate with the ADC. /// * `stream` - The DMA stream used to write DAC codes over SPI. /// * `trigger_channel` - The sampling timer output compare channel for update triggers. pub fn new( spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>, stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>, trigger_channel: timers::tim2::$trigger_channel, batch_size: usize, ) -> Self { // Generate DMA events when an output compare of the timer hitting zero (timer roll over) // occurs. trigger_channel.listen_dma(); trigger_channel.to_output_compare(4 + $index); // The stream constantly writes to the TX FIFO to write new update codes. let trigger_config = DmaConfig::default() .memory_increment(true) .double_buffer(true) .peripheral_increment(false); // Listen for any potential SPI error signals, which may indicate that we are not generating // update codes. let mut spi = spi.disable(); spi.listen(hal::spi::Event::Error); // AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output // here before starting the transfer . // Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them // elsewhere, so it is safe to access them here. for buf in unsafe { DAC_BUF[$index].iter_mut() } { for byte in buf.iter
{ Ok(DacCode::from(code as i16)) }
conditional_block
dac.rs
to avoid losing any //! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA //! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the //! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes, //! software then has exactly one batch duration to fill the next buffer before its //! transfer begins. If software does not meet this deadline, old data will be repeatedly generated //! on the output and output will be shifted by one batch. //! //! ## Multiple Samples to Single DAC Codes //! //! For some applications, it may be desirable to generate a single DAC code from multiple ADC //! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs, //! applications are required to generate one DAC code for each ADC sample. To accomodate mapping //! multiple inputs to a single output, the output code can be repeated a number of times in the //! output buffer corresponding with the number of input samples that were used to generate it. //! //! //! # Note //! //! There is a very small amount of latency between updating the two DACs due to bus matrix //! priority. As such, one of the DACs will be updated marginally earlier before the other because //! the DMA requests are generated simultaneously. This can be avoided by providing a known offset //! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a //! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of //! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the //! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the //! DMA channels to arbitrate which transfer occurs first. //! //! //! # Limitations //! //! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check //! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is //! served promptly after the transfer completes. use stm32h7xx_hal as hal; use mutex_trait::Mutex; use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE}; use super::timers; use core::convert::TryFrom; use hal::{ dma::{ dma::{DMAReq, DmaConfig}, traits::TargetAddress, DMAError, MemoryToPeripheral, Transfer, }, spi::{HalDisabledSpi, HalEnabledSpi, HalSpi}, }; // The following global buffers are used for the DAC code DMA transfers. Two buffers are used for // each transfer in a ping-pong buffer configuration (one is being prepared while the other is being // processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on // startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`. #[link_section = ".axisram.buffers"] static mut DAC_BUF: [[SampleBuffer; 2]; 2] = [[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2]; /// Custom type for referencing DAC output codes. /// The internal integer is the raw code written to the DAC output register. #[derive(Copy, Clone)] pub struct DacCode(pub u16); impl DacCode { // The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096 // V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5. pub const FULL_SCALE: f32 = 4.096 * 2.5; pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32; pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB; } impl TryFrom<f32> for DacCode { type Error = (); fn try_from(voltage: f32) -> Result<DacCode, ()> { let code = voltage * Self::LSB_PER_VOLT; if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) { Err(()) } else { Ok(DacCode::from(code as i16)) } } } impl From<DacCode> for f32 { fn from(code: DacCode) -> f32 { i16::from(code) as f32 * DacCode::VOLT_PER_LSB } } impl From<DacCode> for i16 { fn from(code: DacCode) -> i16 { (code.0 as i16).wrapping_sub(i16::MIN) } } impl From<i16> for DacCode { /// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration. fn from(value: i16) -> Self { Self(value.wrapping_add(i16::MIN) as u16) } } impl From<u16> for DacCode { /// Create a dac code from the provided DAC output code. fn from(value: u16) -> Self { Self(value) } } macro_rules! dac_output { ($name:ident, $index:literal, $data_stream:ident, $spi:ident, $trigger_channel:ident, $dma_req:ident) => { /// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO struct $spi { spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, _channel: timers::tim2::$trigger_channel, } impl $spi { pub fn new( _channel: timers::tim2::$trigger_channel, spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>, ) -> Self { Self { spi, _channel } } /// Start the SPI and begin operating in a DMA-driven transfer mode. pub fn start_dma(&mut self) { // Allow the SPI FIFOs to operate using only DMA data channels. self.spi.enable_dma_tx(); // Enable SPI and start it in infinite transaction mode. self.spi.inner().cr1.modify(|_, w| w.spe().set_bit()); self.spi.inner().cr1.modify(|_, w| w.cstart().started()); } } // Note(unsafe): This is safe because the DMA request line is logically owned by this module. // Additionally, the SPI is owned by this structure and is known to be configured for u16 word // sizes. unsafe impl TargetAddress<MemoryToPeripheral> for $spi { /// SPI is configured to operate using 16-bit transfer words. type MemSize = u16; /// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs. const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8); /// Whenever the DMA request occurs, it should write into SPI's TX FIFO. fn address(&self) -> usize { &self.spi.inner().txdr as *const _ as usize } } /// Represents data associated with DAC. pub struct $name { // Note: SPI TX functionality may not be used from this structure to ensure safety with DMA. transfer: Transfer< hal::dma::dma::$data_stream<hal::stm32::DMA1>, $spi, MemoryToPeripheral, &'static mut [u16], hal::dma::DBTransfer,
/// /// # Args /// * `spi` - The SPI interface used to communicate with the ADC. /// * `stream` - The DMA stream used to write DAC codes over SPI. /// * `trigger_channel` - The sampling timer output compare channel for update triggers. pub fn new( spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>, stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>, trigger_channel: timers::tim2::$trigger_channel, batch_size: usize, ) -> Self { // Generate DMA events when an output compare of the timer hitting zero (timer roll over) // occurs. trigger_channel.listen_dma(); trigger_channel.to_output_compare(4 + $index); // The stream constantly writes to the TX FIFO to write new update codes. let trigger_config = DmaConfig::default() .memory_increment(true) .double_buffer(true) .peripheral_increment(false); // Listen for any potential SPI error signals, which may indicate that we are not generating // update codes. let mut spi = spi.disable(); spi.listen(hal::spi::Event::Error); // AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output // here before starting the transfer . // Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them // elsewhere, so it is safe to access them here. for buf in unsafe { DAC_BUF[$index].iter_mut() } { for byte in buf.iter_mut
>, } impl $name { /// Construct the DAC output channel.
random_line_split
sqlite.py
**kwds): if cls is SqliteBase: msg = 'cannot instantiate SqliteBase directly - make a subclass' raise NotImplementedError(msg) return super(SqliteBase, cls).__new__(cls) def __init__(self, connection, table): """Initialize self.""" self._connection = connection self._table = table def __repr__(self): """Return a string representation of the data source.""" cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name) def columns(self): """Return list of column names.""" cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()] def __iter__(self): """Return iterable of dictionary rows (like csv.DictReader).""" cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall()) def filter_rows(self, **kwds): if kwds: cursor = self._connection.cursor() cursor = self._execute_query('*', **kwds) # <- applies filter column_names = self.columns() dict_row = lambda row: dict(zip(column_names, row)) return (dict_row(row) for row in cursor) return self.__iter__() def distinct(self, columns, **kwds_filter): """Return iterable of tuples containing distinct *columns* values. """ if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor) def sum(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing sums of *column* values grouped by *keys*. """ self._assert_columns_exist(column) column = self._normalize_column(column) sql_functions = 'SUM({0})'.format(column) return self._sql_aggregate(sql_functions, keys, **kwds_filter) def count(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing count of non-empty *column* values grouped by *keys*. """ self._assert_columns_exist(column) sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)" sql_function = sql_function.format(self._normalize_column(column)) return self._sql_aggregate(sql_function, keys, **kwds_filter) def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): """Aggregates values using SQL function select--e.g., 'COUNT(*)', 'SUM(col1)', etc. """ # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None:
if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys) def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter): obj = super(SqliteBase, self) # 2.x compatible calling convention. return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter) # SqliteBase doesn't implement its own mapreduce() optimization. # A generalized, SQL optimization could do little more than the # already-optmized filter_rows() method. Since the super-class' # mapreduce() already uses filter_rows() internally, a separate # optimization is unnecessary. def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): """Execute query and return cursor object.""" try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor @classmethod def _build_query(cls, table, select_clause, **kwds_filter): """Return 'SELECT' query.""" query = 'SELECT ' + select_clause + ' FROM ' + table where_clause, params = cls._build_where_clause(**kwds_filter) if where_clause: query = query + ' WHERE ' + where_clause return query, params @staticmethod def _build_where_clause(**kwds_filter): """Return 'WHERE' clause that implements *kwds_filter* constraints. """ clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params def create_index(self, *columns): """Create an index for specified columns---can speed up testing in some cases. See :meth:`SqliteSource.create_index` for more details. """ self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement) @staticmethod def _normalize_column(column): """Normalize value for use as SQLite column name.""" if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"' class SqliteSource(SqliteBase): """Loads *table* data from given SQLite *connection*: :: conn = sqlite3.connect('mydatabase.sqlite3') subject = datatest.SqliteSource(conn, 'mytable') """ @classmethod def from_records(cls, data, columns=None): """Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads *data* (an iterable of lists, tuples, or dicts) into a temporary table using the named *columns*:: records = [ ('a', 'x'), ('b', 'y'), ('c', 'z'), ... ] subject = datatest.SqliteSource.from_records(records, ['col1', 'col2']) The *columns* argument can be omitted if *data* is a collection of dictionary or namedtuple records:: dict_rows = [ {'col
sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT!
conditional_block
sqlite.py
**kwds): if cls is SqliteBase: msg = 'cannot instantiate SqliteBase directly - make a subclass' raise NotImplementedError(msg) return super(SqliteBase, cls).__new__(cls) def __init__(self, connection, table): """Initialize self.""" self._connection = connection self._table = table def __repr__(self): """Return a string representation of the data source.""" cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name) def columns(self): """Return list of column names.""" cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()] def __iter__(self): """Return iterable of dictionary rows (like csv.DictReader).""" cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall()) def filter_rows(self, **kwds): if kwds: cursor = self._connection.cursor() cursor = self._execute_query('*', **kwds) # <- applies filter column_names = self.columns() dict_row = lambda row: dict(zip(column_names, row)) return (dict_row(row) for row in cursor) return self.__iter__() def distinct(self, columns, **kwds_filter): """Return iterable of tuples containing distinct *columns* values. """ if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor) def sum(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing sums of *column* values grouped by *keys*. """ self._assert_columns_exist(column) column = self._normalize_column(column) sql_functions = 'SUM({0})'.format(column) return self._sql_aggregate(sql_functions, keys, **kwds_filter) def count(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing count of non-empty *column* values grouped by *keys*. """ self._assert_columns_exist(column) sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)" sql_function = sql_function.format(self._normalize_column(column)) return self._sql_aggregate(sql_function, keys, **kwds_filter) def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): """Aggregates values using SQL function select--e.g., 'COUNT(*)', 'SUM(col1)', etc. """ # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None: sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT! if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys) def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter): obj = super(SqliteBase, self) # 2.x compatible calling convention. return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter) # SqliteBase doesn't implement its own mapreduce() optimization. # A generalized, SQL optimization could do little more than the # already-optmized filter_rows() method. Since the super-class' # mapreduce() already uses filter_rows() internally, a separate # optimization is unnecessary. def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): """Execute query and return cursor object.""" try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor @classmethod def _build_query(cls, table, select_clause, **kwds_filter): """Return 'SELECT' query.""" query = 'SELECT ' + select_clause + ' FROM ' + table where_clause, params = cls._build_where_clause(**kwds_filter) if where_clause: query = query + ' WHERE ' + where_clause return query, params @staticmethod def _build_where_clause(**kwds_filter): """Return 'WHERE' clause that implements *kwds_filter* constraints. """ clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params def create_index(self, *columns): """Create an index for specified columns---can speed up testing in some cases. See :meth:`SqliteSource.create_index` for more details. """ self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement) @staticmethod def _normalize_column(column): """Normalize value for use as SQLite column name.""" if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '':
"""Loads *table* data from given SQLite *connection*: :: conn = sqlite3.connect('mydatabase.sqlite3') subject = datatest.SqliteSource(conn, 'mytable') """ @classmethod def from_records(cls, data, columns=None): """Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads *data* (an iterable of lists, tuples, or dicts) into a temporary table using the named *columns*:: records = [ ('a', 'x'), ('b', 'y'), ('c', 'z'), ... ] subject = datatest.SqliteSource.from_records(records, ['col1', 'col2']) The *columns* argument can be omitted if *data* is a collection of dictionary or namedtuple records:: dict_rows = [ {'col
column = '_empty_' return '"' + column + '"' class SqliteSource(SqliteBase):
random_line_split
sqlite.py
**kwds): if cls is SqliteBase: msg = 'cannot instantiate SqliteBase directly - make a subclass' raise NotImplementedError(msg) return super(SqliteBase, cls).__new__(cls) def __init__(self, connection, table): """Initialize self.""" self._connection = connection self._table = table def __repr__(self): """Return a string representation of the data source.""" cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name) def columns(self): """Return list of column names.""" cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()] def
(self): """Return iterable of dictionary rows (like csv.DictReader).""" cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall()) def filter_rows(self, **kwds): if kwds: cursor = self._connection.cursor() cursor = self._execute_query('*', **kwds) # <- applies filter column_names = self.columns() dict_row = lambda row: dict(zip(column_names, row)) return (dict_row(row) for row in cursor) return self.__iter__() def distinct(self, columns, **kwds_filter): """Return iterable of tuples containing distinct *columns* values. """ if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor) def sum(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing sums of *column* values grouped by *keys*. """ self._assert_columns_exist(column) column = self._normalize_column(column) sql_functions = 'SUM({0})'.format(column) return self._sql_aggregate(sql_functions, keys, **kwds_filter) def count(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing count of non-empty *column* values grouped by *keys*. """ self._assert_columns_exist(column) sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)" sql_function = sql_function.format(self._normalize_column(column)) return self._sql_aggregate(sql_function, keys, **kwds_filter) def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): """Aggregates values using SQL function select--e.g., 'COUNT(*)', 'SUM(col1)', etc. """ # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None: sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT! if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys) def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter): obj = super(SqliteBase, self) # 2.x compatible calling convention. return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter) # SqliteBase doesn't implement its own mapreduce() optimization. # A generalized, SQL optimization could do little more than the # already-optmized filter_rows() method. Since the super-class' # mapreduce() already uses filter_rows() internally, a separate # optimization is unnecessary. def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): """Execute query and return cursor object.""" try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor @classmethod def _build_query(cls, table, select_clause, **kwds_filter): """Return 'SELECT' query.""" query = 'SELECT ' + select_clause + ' FROM ' + table where_clause, params = cls._build_where_clause(**kwds_filter) if where_clause: query = query + ' WHERE ' + where_clause return query, params @staticmethod def _build_where_clause(**kwds_filter): """Return 'WHERE' clause that implements *kwds_filter* constraints. """ clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params def create_index(self, *columns): """Create an index for specified columns---can speed up testing in some cases. See :meth:`SqliteSource.create_index` for more details. """ self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement) @staticmethod def _normalize_column(column): """Normalize value for use as SQLite column name.""" if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"' class SqliteSource(SqliteBase): """Loads *table* data from given SQLite *connection*: :: conn = sqlite3.connect('mydatabase.sqlite3') subject = datatest.SqliteSource(conn, 'mytable') """ @classmethod def from_records(cls, data, columns=None): """Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads *data* (an iterable of lists, tuples, or dicts) into a temporary table using the named *columns*:: records = [ ('a', 'x'), ('b', 'y'), ('c', 'z'), ... ] subject = datatest.SqliteSource.from_records(records, ['col1', 'col2']) The *columns* argument can be omitted if *data* is a collection of dictionary or namedtuple records:: dict_rows = [ {'col
__iter__
identifier_name
sqlite.py
def columns(self): """Return list of column names.""" cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()] def __iter__(self): """Return iterable of dictionary rows (like csv.DictReader).""" cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall()) def filter_rows(self, **kwds): if kwds: cursor = self._connection.cursor() cursor = self._execute_query('*', **kwds) # <- applies filter column_names = self.columns() dict_row = lambda row: dict(zip(column_names, row)) return (dict_row(row) for row in cursor) return self.__iter__() def distinct(self, columns, **kwds_filter): """Return iterable of tuples containing distinct *columns* values. """ if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor) def sum(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing sums of *column* values grouped by *keys*. """ self._assert_columns_exist(column) column = self._normalize_column(column) sql_functions = 'SUM({0})'.format(column) return self._sql_aggregate(sql_functions, keys, **kwds_filter) def count(self, column, keys=None, **kwds_filter): """Returns :class:`CompareDict` containing count of non-empty *column* values grouped by *keys*. """ self._assert_columns_exist(column) sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)" sql_function = sql_function.format(self._normalize_column(column)) return self._sql_aggregate(sql_function, keys, **kwds_filter) def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): """Aggregates values using SQL function select--e.g., 'COUNT(*)', 'SUM(col1)', etc. """ # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None: sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT! if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys) def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter): obj = super(SqliteBase, self) # 2.x compatible calling convention. return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter) # SqliteBase doesn't implement its own mapreduce() optimization. # A generalized, SQL optimization could do little more than the # already-optmized filter_rows() method. Since the super-class' # mapreduce() already uses filter_rows() internally, a separate # optimization is unnecessary. def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): """Execute query and return cursor object.""" try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor @classmethod def _build_query(cls, table, select_clause, **kwds_filter): """Return 'SELECT' query.""" query = 'SELECT ' + select_clause + ' FROM ' + table where_clause, params = cls._build_where_clause(**kwds_filter) if where_clause: query = query + ' WHERE ' + where_clause return query, params @staticmethod def _build_where_clause(**kwds_filter): """Return 'WHERE' clause that implements *kwds_filter* constraints. """ clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params def create_index(self, *columns): """Create an index for specified columns---can speed up testing in some cases. See :meth:`SqliteSource.create_index` for more details. """ self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement) @staticmethod def _normalize_column(column): """Normalize value for use as SQLite column name.""" if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"' class SqliteSource(SqliteBase): """Loads *table* data from given SQLite *connection*: :: conn = sqlite3.connect('mydatabase.sqlite3') subject = datatest.SqliteSource(conn, 'mytable') """ @classmethod def from_records(cls, data, columns=None): """Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads *data* (an iterable of lists, tuples, or dicts) into a temporary table using the named *columns*:: records = [ ('a', 'x'), ('b', 'y'), ('c', 'z'), ... ] subject = datatest.SqliteSource.from_records(records, ['col1', 'col2'])
"""Base class four SqliteSource and CsvSource (not intended to be instantiated directly). """ def __new__(cls, *args, **kwds): if cls is SqliteBase: msg = 'cannot instantiate SqliteBase directly - make a subclass' raise NotImplementedError(msg) return super(SqliteBase, cls).__new__(cls) def __init__(self, connection, table): """Initialize self.""" self._connection = connection self._table = table def __repr__(self): """Return a string representation of the data source.""" cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
identifier_body
storage.rs
_name = "database.db"; let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name); return r2d2::Pool::new(db_manager).unwrap(); }; } pub fn create_main_database_if_needed() { let pool = &MAIN_POOL; let conn = pool.get().unwrap(); create_main_tables_if_needed(&conn); } fn create_main_tables_if_needed(conn: &DatabaseConnection)
// Rooms pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60; pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60; pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60; lazy_static::lazy_static! { static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new()); } pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool { let mut pools = POOLS.lock().unwrap(); if let Some(pool) = pools.get(room_id.get_id()) { return pool.clone(); } else { let raw_path = format!("rooms/{}.db", room_id.get_id()); let path = Path::new(&raw_path); let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path); let pool = r2d2::Pool::new(db_manager).unwrap(); pools.insert(room_id.get_id().to_string(), pool); return pools[room_id.get_id()].clone(); } } pub fn create_database_if_needed(room_id: &RoomId) { let pool = pool_by_room_id(room_id); let conn = pool.get().unwrap(); create_room_tables_if_needed(&conn); } pub fn create_room_tables_if_needed(conn: &DatabaseConnection) { // Messages // The `id` field is needed to make `rowid` stable, which is important because otherwise // the `id`s in this table won't correspond to those in the deleted messages table let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages ( id INTEGER PRIMARY KEY, public_key TEXT, timestamp INTEGER, data TEXT, signature TEXT, is_deleted INTEGER )"; conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table."); // Deleted messages let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages ( id INTEGER PRIMARY KEY, deleted_message_id INTEGER )"; conn.execute(&deleted_messages_table_cmd, params![]) .expect("Couldn't create deleted messages table."); // Moderators let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators ( public_key TEXT )"; conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table."); // Block list let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list ( public_key TEXT )"; conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table."); // Pending tokens // Note that a given public key can have multiple pending tokens let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens ( public_key TEXT, timestamp INTEGER, token BLOB )"; conn.execute(&pending_tokens_table_cmd, params![]) .expect("Couldn't create pending tokens table."); // Tokens // The token is stored as hex here (rather than as bytes) because it's more convenient for lookup let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens ( public_key TEXT, timestamp INTEGER, token TEXT PRIMARY KEY )"; conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table."); // Files let files_table_cmd = "CREATE TABLE IF NOT EXISTS files ( id TEXT PRIMARY KEY, timestamp INTEGER )"; conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table."); // User activity table let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity ( public_key TEXT PRIMARY KEY, last_active INTEGER NOT NULL )"; conn.execute(&user_activity_table_cmd, params![]) .expect("Couldn't create user activity table."); } // Pruning pub async fn prune_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_tokens().await; }); } } pub async fn prune_pending_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_pending_tokens().await; }); } } pub async fn prune_files_periodically() { let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_files(FILE_EXPIRATION).await; }); } } async fn prune_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; let stmt = "DELETE FROM tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) { Ok(_) => (), Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; } info!("Pruned tokens."); } async fn prune_pending_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), }; let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - PENDING_TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) { Ok(_) => (), Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), }; } info!("Pruned pending tokens."); } fn get_expired_file_ids( pool: &DatabaseConnectionPool, file_expiration: i64, ) -> Result<Vec<String>, ()> { let now = chrono::Utc::now().timestamp(); let expiration = now - file_expiration; // Get a database connection and open a transaction let conn = pool.get().map_err(|e| { error!("Couldn't get database connection to prune files due to error: {}.", e); })?; // Get the IDs of the files to delete let raw_query = "SELECT id FROM files WHERE timestamp < (?1)"; let mut query = conn.prepare(&raw_query).map_err(|e| { error!("Couldn't prepare query to prune files due to error: {}.", e); })?; let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| { error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration); })?; Ok(rows.filter_map(|result| result.ok()).collect()) } pub async fn prune_files_for_room( pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64, ) { let ids = get_expired_file_ids(&pool, file_expiration); match ids { Ok(ids) if !ids.is_empty() => { // Delete the files let futs = ids.iter().map(|id| async move { ( tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await, id.to_owned(), ) }); let results = futures::future::join_all(futs).await; for (res, id) in results { if let Err(err) = res { error!( "Couldn't delete file: {} from room: {} due to error: {}.", id, room.get_id(), err ); } } let conn = match pool.get() { Ok(conn) => conn, Err(e) => { return error!( "Couldn't get database connection to prune files due to error: {}.", e ) } };
{ let main_table_cmd = "CREATE TABLE IF NOT EXISTS main ( id TEXT PRIMARY KEY, name TEXT, image_id TEXT )"; conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table."); }
identifier_body
storage.rs
file_name = "database.db"; let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name); return r2d2::Pool::new(db_manager).unwrap(); }; } pub fn create_main_database_if_needed() { let pool = &MAIN_POOL; let conn = pool.get().unwrap(); create_main_tables_if_needed(&conn); } fn create_main_tables_if_needed(conn: &DatabaseConnection) { let main_table_cmd = "CREATE TABLE IF NOT EXISTS main ( id TEXT PRIMARY KEY, name TEXT, image_id TEXT )"; conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table."); } // Rooms pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60; pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60; pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60; lazy_static::lazy_static! { static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new()); } pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool { let mut pools = POOLS.lock().unwrap(); if let Some(pool) = pools.get(room_id.get_id()) { return pool.clone(); } else { let raw_path = format!("rooms/{}.db", room_id.get_id()); let path = Path::new(&raw_path); let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path); let pool = r2d2::Pool::new(db_manager).unwrap(); pools.insert(room_id.get_id().to_string(), pool); return pools[room_id.get_id()].clone(); } } pub fn create_database_if_needed(room_id: &RoomId) { let pool = pool_by_room_id(room_id); let conn = pool.get().unwrap(); create_room_tables_if_needed(&conn); } pub fn create_room_tables_if_needed(conn: &DatabaseConnection) { // Messages // The `id` field is needed to make `rowid` stable, which is important because otherwise // the `id`s in this table won't correspond to those in the deleted messages table let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages ( id INTEGER PRIMARY KEY, public_key TEXT, timestamp INTEGER, data TEXT, signature TEXT, is_deleted INTEGER )"; conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table."); // Deleted messages let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages ( id INTEGER PRIMARY KEY, deleted_message_id INTEGER )"; conn.execute(&deleted_messages_table_cmd, params![]) .expect("Couldn't create deleted messages table."); // Moderators let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators ( public_key TEXT )"; conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table."); // Block list let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list ( public_key TEXT )"; conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table."); // Pending tokens // Note that a given public key can have multiple pending tokens let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens ( public_key TEXT, timestamp INTEGER, token BLOB )"; conn.execute(&pending_tokens_table_cmd, params![]) .expect("Couldn't create pending tokens table."); // Tokens // The token is stored as hex here (rather than as bytes) because it's more convenient for lookup let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens ( public_key TEXT, timestamp INTEGER, token TEXT PRIMARY KEY )"; conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table."); // Files let files_table_cmd = "CREATE TABLE IF NOT EXISTS files ( id TEXT PRIMARY KEY, timestamp INTEGER )"; conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table."); // User activity table let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity ( public_key TEXT PRIMARY KEY, last_active INTEGER NOT NULL )"; conn.execute(&user_activity_table_cmd, params![]) .expect("Couldn't create user activity table."); } // Pruning pub async fn prune_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_tokens().await; }); } } pub async fn prune_pending_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_pending_tokens().await; }); } } pub async fn prune_files_periodically() { let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_files(FILE_EXPIRATION).await; }); } } async fn prune_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; let stmt = "DELETE FROM tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) { Ok(_) => (), Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; } info!("Pruned tokens."); } async fn prune_pending_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
Ok(_) => (), Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), }; } info!("Pruned pending tokens."); } fn get_expired_file_ids( pool: &DatabaseConnectionPool, file_expiration: i64, ) -> Result<Vec<String>, ()> { let now = chrono::Utc::now().timestamp(); let expiration = now - file_expiration; // Get a database connection and open a transaction let conn = pool.get().map_err(|e| { error!("Couldn't get database connection to prune files due to error: {}.", e); })?; // Get the IDs of the files to delete let raw_query = "SELECT id FROM files WHERE timestamp < (?1)"; let mut query = conn.prepare(&raw_query).map_err(|e| { error!("Couldn't prepare query to prune files due to error: {}.", e); })?; let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| { error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration); })?; Ok(rows.filter_map(|result| result.ok()).collect()) } pub async fn prune_files_for_room( pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64, ) { let ids = get_expired_file_ids(&pool, file_expiration); match ids { Ok(ids) if !ids.is_empty() => { // Delete the files let futs = ids.iter().map(|id| async move { ( tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await, id.to_owned(), ) }); let results = futures::future::join_all(futs).await; for (res, id) in results { if let Err(err) = res { error!( "Couldn't delete file: {} from room: {} due to error: {}.", id, room.get_id(), err ); } } let conn = match pool.get() { Ok(conn) => conn, Err(e) => { return error!( "Couldn't get database connection to prune files due to error: {}.", e ) } };
}; let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - PENDING_TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) {
random_line_split
storage.rs
file_name = "database.db"; let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name); return r2d2::Pool::new(db_manager).unwrap(); }; } pub fn create_main_database_if_needed() { let pool = &MAIN_POOL; let conn = pool.get().unwrap(); create_main_tables_if_needed(&conn); } fn
(conn: &DatabaseConnection) { let main_table_cmd = "CREATE TABLE IF NOT EXISTS main ( id TEXT PRIMARY KEY, name TEXT, image_id TEXT )"; conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table."); } // Rooms pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60; pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60; pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60; lazy_static::lazy_static! { static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new()); } pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool { let mut pools = POOLS.lock().unwrap(); if let Some(pool) = pools.get(room_id.get_id()) { return pool.clone(); } else { let raw_path = format!("rooms/{}.db", room_id.get_id()); let path = Path::new(&raw_path); let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path); let pool = r2d2::Pool::new(db_manager).unwrap(); pools.insert(room_id.get_id().to_string(), pool); return pools[room_id.get_id()].clone(); } } pub fn create_database_if_needed(room_id: &RoomId) { let pool = pool_by_room_id(room_id); let conn = pool.get().unwrap(); create_room_tables_if_needed(&conn); } pub fn create_room_tables_if_needed(conn: &DatabaseConnection) { // Messages // The `id` field is needed to make `rowid` stable, which is important because otherwise // the `id`s in this table won't correspond to those in the deleted messages table let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages ( id INTEGER PRIMARY KEY, public_key TEXT, timestamp INTEGER, data TEXT, signature TEXT, is_deleted INTEGER )"; conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table."); // Deleted messages let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages ( id INTEGER PRIMARY KEY, deleted_message_id INTEGER )"; conn.execute(&deleted_messages_table_cmd, params![]) .expect("Couldn't create deleted messages table."); // Moderators let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators ( public_key TEXT )"; conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table."); // Block list let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list ( public_key TEXT )"; conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table."); // Pending tokens // Note that a given public key can have multiple pending tokens let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens ( public_key TEXT, timestamp INTEGER, token BLOB )"; conn.execute(&pending_tokens_table_cmd, params![]) .expect("Couldn't create pending tokens table."); // Tokens // The token is stored as hex here (rather than as bytes) because it's more convenient for lookup let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens ( public_key TEXT, timestamp INTEGER, token TEXT PRIMARY KEY )"; conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table."); // Files let files_table_cmd = "CREATE TABLE IF NOT EXISTS files ( id TEXT PRIMARY KEY, timestamp INTEGER )"; conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table."); // User activity table let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity ( public_key TEXT PRIMARY KEY, last_active INTEGER NOT NULL )"; conn.execute(&user_activity_table_cmd, params![]) .expect("Couldn't create user activity table."); } // Pruning pub async fn prune_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_tokens().await; }); } } pub async fn prune_pending_tokens_periodically() { let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_pending_tokens().await; }); } } pub async fn prune_files_periodically() { let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap()); loop { timer.tick().await; tokio::spawn(async { prune_files(FILE_EXPIRATION).await; }); } } async fn prune_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; let stmt = "DELETE FROM tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) { Ok(_) => (), Err(e) => return error!("Couldn't prune tokens due to error: {}.", e), }; } info!("Pruned tokens."); } async fn prune_pending_tokens() { let rooms = match get_all_room_ids() { Ok(rooms) => rooms, Err(_) => return, }; for room in rooms { let pool = pool_by_room_id(&room); // It's not catastrophic if we fail to prune the database for a given room let conn = match pool.get() { Ok(conn) => conn, Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), }; let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)"; let now = chrono::Utc::now().timestamp(); let expiration = now - PENDING_TOKEN_EXPIRATION; match conn.execute(&stmt, params![expiration]) { Ok(_) => (), Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), }; } info!("Pruned pending tokens."); } fn get_expired_file_ids( pool: &DatabaseConnectionPool, file_expiration: i64, ) -> Result<Vec<String>, ()> { let now = chrono::Utc::now().timestamp(); let expiration = now - file_expiration; // Get a database connection and open a transaction let conn = pool.get().map_err(|e| { error!("Couldn't get database connection to prune files due to error: {}.", e); })?; // Get the IDs of the files to delete let raw_query = "SELECT id FROM files WHERE timestamp < (?1)"; let mut query = conn.prepare(&raw_query).map_err(|e| { error!("Couldn't prepare query to prune files due to error: {}.", e); })?; let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| { error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration); })?; Ok(rows.filter_map(|result| result.ok()).collect()) } pub async fn prune_files_for_room( pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64, ) { let ids = get_expired_file_ids(&pool, file_expiration); match ids { Ok(ids) if !ids.is_empty() => { // Delete the files let futs = ids.iter().map(|id| async move { ( tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await, id.to_owned(), ) }); let results = futures::future::join_all(futs).await; for (res, id) in results { if let Err(err) = res { error!( "Couldn't delete file: {} from room: {} due to error: {}.", id, room.get_id(), err ); } } let conn = match pool.get() { Ok(conn) => conn, Err(e) => { return error!( "Couldn't get database connection to prune files due to error: {}.", e ) } };
create_main_tables_if_needed
identifier_name
grid.go
.keywords, request.user.keywords, // and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated. // Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped. func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) { // unmarshal ext to object map requestExtMap := parseExtToMap(requestExt) firstImpExtMap := parseExtToMap(firstImpExt) // extract `keywords` field requestExtKeywordsMap := extractKeywordsMap(requestExtMap) firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap) // parse + merge keywords keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords // overlay site + user keywords if site, exists := keywords["site"]; exists && len(site) > 0 { requestExtKeywordsMap["site"] = site } else { delete(requestExtKeywordsMap, "site") } if user, exists := keywords["user"]; exists && len(user) > 0 { requestExtKeywordsMap["user"] = user } else { delete(requestExtKeywordsMap, "user") } // reconcile keywords with request.ext if len(requestExtKeywordsMap) > 0 { requestExtMap["keywords"] = requestExtKeywordsMap } else { delete(requestExtMap, "keywords") } // marshal final result if len(requestExtMap) > 0 { return json.Marshal(requestExtMap) } return nil, nil } func parseExtToMap(ext json.RawMessage) map[string]interface{} { var root map[string]interface{} if err := json.Unmarshal(ext, &root); err != nil { return make(map[string]interface{}) } return root } func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} { if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists { return keywords } return make(map[string]interface{}) } func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} { if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists { return extractKeywordsMap(bidder) } return make(map[string]interface{}) } func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords { keywords := make(Keywords) for k, v := range extKeywords { // keywords may only be provided in the site and user sections if k != "site" && k != "user" { continue } // the site or user sections must be an object if section, ok := v.(map[string]interface{}); ok { keywords[k] = parseKeywordsFromSection(section) } } return keywords } func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher { keywordsPublishers := make(KeywordsPublisher)
for publisherKey, publisherValue := range section { // publisher value must be a slice publisherValueSlice, ok := publisherValue.([]interface{}) if !ok { continue } for _, publisherValueItem := range publisherValueSlice { // item must be an object publisherItem, ok := publisherValueItem.(map[string]interface{}) if !ok { continue } // publisher item must have a name publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name") if !ok { continue } var segments []KeywordSegment // extract valid segments if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists { for _, segment := range segmentsSlice { if segmentMap, ok := segment.(map[string]interface{}); ok { name, hasName := maputil.ReadEmbeddedString(segmentMap, "name") value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value") if hasName && hasValue { segments = append(segments, KeywordSegment{Name: name, Value: value}) } } } } // ensure consistent ordering for publisher item map publisherItemKeys := make([]string, 0, len(publisherItem)) for v := range publisherItem { publisherItemKeys = append(publisherItemKeys, v) } sort.Strings(publisherItemKeys) // compose compatible alternate segment format for _, potentialSegmentName := range publisherItemKeys { potentialSegmentValues := publisherItem[potentialSegmentName] // values must be an array if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok { for _, value := range valuesSlice { if valueAsString, ok := value.(string); ok { segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString}) } } } } if len(segments) > 0 { keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments}) } } } return keywordsPublishers } func parseKeywordsFromOpenRTB(keywords, section string) Keywords { keywordsSplit := strings.Split(keywords, ",") segments := make([]KeywordSegment, 0, len(keywordsSplit)) for _, v := range keywordsSplit { if v != "" { segments = append(segments, KeywordSegment{Name: "keywords", Value: v}) } } if len(segments) > 0 { return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}} } return make(Keywords) } func mergeKeywords(a, b Keywords) { for key, values := range b { if _, sectionExists := a[key]; !sectionExists { a[key] = KeywordsPublisher{} } for publisherKey, publisherValues := range values { a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...) } } } func setImpExtKeywords(request *openrtb2.BidRequest) error { userKeywords := "" if request.User != nil { userKeywords = request.User.Keywords } siteKeywords := "" if request.Site != nil { siteKeywords = request.Site.Keywords } var err error request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext) return err } func processImp(imp *openrtb2.Imp) error { // get the grid extension var ext adapters.ExtImpBidder var gridExt openrtb_ext.ExtImpGrid if err := json.Unmarshal(imp.Ext, &ext); err != nil { return err } if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil { return err } if gridExt.Uid == 0 { err := &errortypes.BadInput{ Message: "uid is empty", } return err } // no error return nil } func setImpExtData(imp openrtb2.Imp) openrtb2.Imp { var ext ExtImp if err := json.Unmarshal(imp.Ext, &ext); err != nil { return imp } if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" { ext.Gpid = ext.Data.AdServer.AdSlot extJSON, err := json.Marshal(ext) if err == nil { imp.Ext = extJSON } } return imp } func fixNative(req json.RawMessage) (json.RawMessage, error) { var gridReq map[string]interface{} var parsedRequest map[string]interface{} if err := json.Unmarshal(req, &gridReq); err != nil { return req, nil } if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists { for _, imp := range imps { if gridImp, ok := imp.(map[string]interface{}); ok { native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native") if hasNative { request, hasRequest := maputil.ReadEmbeddedString(native, "request") if hasRequest { delete(native, "request") if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil { native["request_native"] = parsedRequest } else { native["request_native"] = request } } } } } } return json.Marshal(gridReq) } // MakeRequests makes the HTTP requests which should be made to fetch bids. func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { var errors = make([]error, 0)
random_line_split
grid.go
, request.user.keywords, // and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated. // Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped. func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) { // unmarshal ext to object map requestExtMap := parseExtToMap(requestExt) firstImpExtMap := parseExtToMap(firstImpExt) // extract `keywords` field requestExtKeywordsMap := extractKeywordsMap(requestExtMap) firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap) // parse + merge keywords keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords // overlay site + user keywords if site, exists := keywords["site"]; exists && len(site) > 0 { requestExtKeywordsMap["site"] = site } else { delete(requestExtKeywordsMap, "site") } if user, exists := keywords["user"]; exists && len(user) > 0 { requestExtKeywordsMap["user"] = user } else { delete(requestExtKeywordsMap, "user") } // reconcile keywords with request.ext if len(requestExtKeywordsMap) > 0 { requestExtMap["keywords"] = requestExtKeywordsMap } else { delete(requestExtMap, "keywords") } // marshal final result if len(requestExtMap) > 0 { return json.Marshal(requestExtMap) } return nil, nil } func parseExtToMap(ext json.RawMessage) map[string]interface{} { var root map[string]interface{} if err := json.Unmarshal(ext, &root); err != nil { return make(map[string]interface{}) } return root } func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} { if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists { return keywords } return make(map[string]interface{}) } func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} { if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists { return extractKeywordsMap(bidder) } return make(map[string]interface{}) } func
(extKeywords map[string]interface{}) Keywords { keywords := make(Keywords) for k, v := range extKeywords { // keywords may only be provided in the site and user sections if k != "site" && k != "user" { continue } // the site or user sections must be an object if section, ok := v.(map[string]interface{}); ok { keywords[k] = parseKeywordsFromSection(section) } } return keywords } func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher { keywordsPublishers := make(KeywordsPublisher) for publisherKey, publisherValue := range section { // publisher value must be a slice publisherValueSlice, ok := publisherValue.([]interface{}) if !ok { continue } for _, publisherValueItem := range publisherValueSlice { // item must be an object publisherItem, ok := publisherValueItem.(map[string]interface{}) if !ok { continue } // publisher item must have a name publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name") if !ok { continue } var segments []KeywordSegment // extract valid segments if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists { for _, segment := range segmentsSlice { if segmentMap, ok := segment.(map[string]interface{}); ok { name, hasName := maputil.ReadEmbeddedString(segmentMap, "name") value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value") if hasName && hasValue { segments = append(segments, KeywordSegment{Name: name, Value: value}) } } } } // ensure consistent ordering for publisher item map publisherItemKeys := make([]string, 0, len(publisherItem)) for v := range publisherItem { publisherItemKeys = append(publisherItemKeys, v) } sort.Strings(publisherItemKeys) // compose compatible alternate segment format for _, potentialSegmentName := range publisherItemKeys { potentialSegmentValues := publisherItem[potentialSegmentName] // values must be an array if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok { for _, value := range valuesSlice { if valueAsString, ok := value.(string); ok { segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString}) } } } } if len(segments) > 0 { keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments}) } } } return keywordsPublishers } func parseKeywordsFromOpenRTB(keywords, section string) Keywords { keywordsSplit := strings.Split(keywords, ",") segments := make([]KeywordSegment, 0, len(keywordsSplit)) for _, v := range keywordsSplit { if v != "" { segments = append(segments, KeywordSegment{Name: "keywords", Value: v}) } } if len(segments) > 0 { return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}} } return make(Keywords) } func mergeKeywords(a, b Keywords) { for key, values := range b { if _, sectionExists := a[key]; !sectionExists { a[key] = KeywordsPublisher{} } for publisherKey, publisherValues := range values { a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...) } } } func setImpExtKeywords(request *openrtb2.BidRequest) error { userKeywords := "" if request.User != nil { userKeywords = request.User.Keywords } siteKeywords := "" if request.Site != nil { siteKeywords = request.Site.Keywords } var err error request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext) return err } func processImp(imp *openrtb2.Imp) error { // get the grid extension var ext adapters.ExtImpBidder var gridExt openrtb_ext.ExtImpGrid if err := json.Unmarshal(imp.Ext, &ext); err != nil { return err } if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil { return err } if gridExt.Uid == 0 { err := &errortypes.BadInput{ Message: "uid is empty", } return err } // no error return nil } func setImpExtData(imp openrtb2.Imp) openrtb2.Imp { var ext ExtImp if err := json.Unmarshal(imp.Ext, &ext); err != nil { return imp } if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" { ext.Gpid = ext.Data.AdServer.AdSlot extJSON, err := json.Marshal(ext) if err == nil { imp.Ext = extJSON } } return imp } func fixNative(req json.RawMessage) (json.RawMessage, error) { var gridReq map[string]interface{} var parsedRequest map[string]interface{} if err := json.Unmarshal(req, &gridReq); err != nil { return req, nil } if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists { for _, imp := range imps { if gridImp, ok := imp.(map[string]interface{}); ok { native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native") if hasNative { request, hasRequest := maputil.ReadEmbeddedString(native, "request") if hasRequest { delete(native, "request") if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil { native["request_native"] = parsedRequest } else { native["request_native"] = request } } } } } } return json.Marshal(gridReq) } // MakeRequests makes the HTTP requests which should be made to fetch bids. func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { var errors = make([]error, 0)
parseKeywordsFromMap
identifier_name
grid.go
, request.user.keywords, // and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated. // Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped. func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) { // unmarshal ext to object map requestExtMap := parseExtToMap(requestExt) firstImpExtMap := parseExtToMap(firstImpExt) // extract `keywords` field requestExtKeywordsMap := extractKeywordsMap(requestExtMap) firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap) // parse + merge keywords keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords // overlay site + user keywords if site, exists := keywords["site"]; exists && len(site) > 0 { requestExtKeywordsMap["site"] = site } else { delete(requestExtKeywordsMap, "site") } if user, exists := keywords["user"]; exists && len(user) > 0 { requestExtKeywordsMap["user"] = user } else { delete(requestExtKeywordsMap, "user") } // reconcile keywords with request.ext if len(requestExtKeywordsMap) > 0 { requestExtMap["keywords"] = requestExtKeywordsMap } else { delete(requestExtMap, "keywords") } // marshal final result if len(requestExtMap) > 0 { return json.Marshal(requestExtMap) } return nil, nil } func parseExtToMap(ext json.RawMessage) map[string]interface{} { var root map[string]interface{} if err := json.Unmarshal(ext, &root); err != nil { return make(map[string]interface{}) } return root } func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} { if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists { return keywords } return make(map[string]interface{}) } func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} { if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists { return extractKeywordsMap(bidder) } return make(map[string]interface{}) } func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords { keywords := make(Keywords) for k, v := range extKeywords { // keywords may only be provided in the site and user sections if k != "site" && k != "user" { continue } // the site or user sections must be an object if section, ok := v.(map[string]interface{}); ok { keywords[k] = parseKeywordsFromSection(section) } } return keywords } func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher { keywordsPublishers := make(KeywordsPublisher) for publisherKey, publisherValue := range section { // publisher value must be a slice publisherValueSlice, ok := publisherValue.([]interface{}) if !ok { continue } for _, publisherValueItem := range publisherValueSlice { // item must be an object publisherItem, ok := publisherValueItem.(map[string]interface{}) if !ok { continue } // publisher item must have a name publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name") if !ok { continue } var segments []KeywordSegment // extract valid segments if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists { for _, segment := range segmentsSlice { if segmentMap, ok := segment.(map[string]interface{}); ok { name, hasName := maputil.ReadEmbeddedString(segmentMap, "name") value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value") if hasName && hasValue { segments = append(segments, KeywordSegment{Name: name, Value: value}) } } } } // ensure consistent ordering for publisher item map publisherItemKeys := make([]string, 0, len(publisherItem)) for v := range publisherItem
sort.Strings(publisherItemKeys) // compose compatible alternate segment format for _, potentialSegmentName := range publisherItemKeys { potentialSegmentValues := publisherItem[potentialSegmentName] // values must be an array if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok { for _, value := range valuesSlice { if valueAsString, ok := value.(string); ok { segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString}) } } } } if len(segments) > 0 { keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments}) } } } return keywordsPublishers } func parseKeywordsFromOpenRTB(keywords, section string) Keywords { keywordsSplit := strings.Split(keywords, ",") segments := make([]KeywordSegment, 0, len(keywordsSplit)) for _, v := range keywordsSplit { if v != "" { segments = append(segments, KeywordSegment{Name: "keywords", Value: v}) } } if len(segments) > 0 { return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}} } return make(Keywords) } func mergeKeywords(a, b Keywords) { for key, values := range b { if _, sectionExists := a[key]; !sectionExists { a[key] = KeywordsPublisher{} } for publisherKey, publisherValues := range values { a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...) } } } func setImpExtKeywords(request *openrtb2.BidRequest) error { userKeywords := "" if request.User != nil { userKeywords = request.User.Keywords } siteKeywords := "" if request.Site != nil { siteKeywords = request.Site.Keywords } var err error request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext) return err } func processImp(imp *openrtb2.Imp) error { // get the grid extension var ext adapters.ExtImpBidder var gridExt openrtb_ext.ExtImpGrid if err := json.Unmarshal(imp.Ext, &ext); err != nil { return err } if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil { return err } if gridExt.Uid == 0 { err := &errortypes.BadInput{ Message: "uid is empty", } return err } // no error return nil } func setImpExtData(imp openrtb2.Imp) openrtb2.Imp { var ext ExtImp if err := json.Unmarshal(imp.Ext, &ext); err != nil { return imp } if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" { ext.Gpid = ext.Data.AdServer.AdSlot extJSON, err := json.Marshal(ext) if err == nil { imp.Ext = extJSON } } return imp } func fixNative(req json.RawMessage) (json.RawMessage, error) { var gridReq map[string]interface{} var parsedRequest map[string]interface{} if err := json.Unmarshal(req, &gridReq); err != nil { return req, nil } if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists { for _, imp := range imps { if gridImp, ok := imp.(map[string]interface{}); ok { native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native") if hasNative { request, hasRequest := maputil.ReadEmbeddedString(native, "request") if hasRequest { delete(native, "request") if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil { native["request_native"] = parsedRequest } else { native["request_native"] = request } } } } } } return json.Marshal(gridReq) } // MakeRequests makes the HTTP requests which should be made to fetch bids. func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { var errors = make([]error, 0
{ publisherItemKeys = append(publisherItemKeys, v) }
conditional_block
grid.go
.keywords, request.user.keywords, // and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated. // Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped. func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) { // unmarshal ext to object map requestExtMap := parseExtToMap(requestExt) firstImpExtMap := parseExtToMap(firstImpExt) // extract `keywords` field requestExtKeywordsMap := extractKeywordsMap(requestExtMap) firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap) // parse + merge keywords keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords // overlay site + user keywords if site, exists := keywords["site"]; exists && len(site) > 0 { requestExtKeywordsMap["site"] = site } else { delete(requestExtKeywordsMap, "site") } if user, exists := keywords["user"]; exists && len(user) > 0 { requestExtKeywordsMap["user"] = user } else { delete(requestExtKeywordsMap, "user") } // reconcile keywords with request.ext if len(requestExtKeywordsMap) > 0 { requestExtMap["keywords"] = requestExtKeywordsMap } else { delete(requestExtMap, "keywords") } // marshal final result if len(requestExtMap) > 0 { return json.Marshal(requestExtMap) } return nil, nil } func parseExtToMap(ext json.RawMessage) map[string]interface{} { var root map[string]interface{} if err := json.Unmarshal(ext, &root); err != nil { return make(map[string]interface{}) } return root } func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} { if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists { return keywords } return make(map[string]interface{}) } func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} { if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists { return extractKeywordsMap(bidder) } return make(map[string]interface{}) } func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords { keywords := make(Keywords) for k, v := range extKeywords { // keywords may only be provided in the site and user sections if k != "site" && k != "user" { continue } // the site or user sections must be an object if section, ok := v.(map[string]interface{}); ok { keywords[k] = parseKeywordsFromSection(section) } } return keywords } func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher
// extract valid segments if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists { for _, segment := range segmentsSlice { if segmentMap, ok := segment.(map[string]interface{}); ok { name, hasName := maputil.ReadEmbeddedString(segmentMap, "name") value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value") if hasName && hasValue { segments = append(segments, KeywordSegment{Name: name, Value: value}) } } } } // ensure consistent ordering for publisher item map publisherItemKeys := make([]string, 0, len(publisherItem)) for v := range publisherItem { publisherItemKeys = append(publisherItemKeys, v) } sort.Strings(publisherItemKeys) // compose compatible alternate segment format for _, potentialSegmentName := range publisherItemKeys { potentialSegmentValues := publisherItem[potentialSegmentName] // values must be an array if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok { for _, value := range valuesSlice { if valueAsString, ok := value.(string); ok { segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString}) } } } } if len(segments) > 0 { keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments}) } } } return keywordsPublishers } func parseKeywordsFromOpenRTB(keywords, section string) Keywords { keywordsSplit := strings.Split(keywords, ",") segments := make([]KeywordSegment, 0, len(keywordsSplit)) for _, v := range keywordsSplit { if v != "" { segments = append(segments, KeywordSegment{Name: "keywords", Value: v}) } } if len(segments) > 0 { return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}} } return make(Keywords) } func mergeKeywords(a, b Keywords) { for key, values := range b { if _, sectionExists := a[key]; !sectionExists { a[key] = KeywordsPublisher{} } for publisherKey, publisherValues := range values { a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...) } } } func setImpExtKeywords(request *openrtb2.BidRequest) error { userKeywords := "" if request.User != nil { userKeywords = request.User.Keywords } siteKeywords := "" if request.Site != nil { siteKeywords = request.Site.Keywords } var err error request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext) return err } func processImp(imp *openrtb2.Imp) error { // get the grid extension var ext adapters.ExtImpBidder var gridExt openrtb_ext.ExtImpGrid if err := json.Unmarshal(imp.Ext, &ext); err != nil { return err } if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil { return err } if gridExt.Uid == 0 { err := &errortypes.BadInput{ Message: "uid is empty", } return err } // no error return nil } func setImpExtData(imp openrtb2.Imp) openrtb2.Imp { var ext ExtImp if err := json.Unmarshal(imp.Ext, &ext); err != nil { return imp } if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" { ext.Gpid = ext.Data.AdServer.AdSlot extJSON, err := json.Marshal(ext) if err == nil { imp.Ext = extJSON } } return imp } func fixNative(req json.RawMessage) (json.RawMessage, error) { var gridReq map[string]interface{} var parsedRequest map[string]interface{} if err := json.Unmarshal(req, &gridReq); err != nil { return req, nil } if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists { for _, imp := range imps { if gridImp, ok := imp.(map[string]interface{}); ok { native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native") if hasNative { request, hasRequest := maputil.ReadEmbeddedString(native, "request") if hasRequest { delete(native, "request") if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil { native["request_native"] = parsedRequest } else { native["request_native"] = request } } } } } } return json.Marshal(gridReq) } // MakeRequests makes the HTTP requests which should be made to fetch bids. func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { var errors = make([]error, 0)
{ keywordsPublishers := make(KeywordsPublisher) for publisherKey, publisherValue := range section { // publisher value must be a slice publisherValueSlice, ok := publisherValue.([]interface{}) if !ok { continue } for _, publisherValueItem := range publisherValueSlice { // item must be an object publisherItem, ok := publisherValueItem.(map[string]interface{}) if !ok { continue } // publisher item must have a name publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name") if !ok { continue } var segments []KeywordSegment
identifier_body
decode.rs
<'a> { pub prefix: &'a str, pub local: &'a str, } impl Name<'_> { /// Check if a given name matches a tag name composed of `prefix:local` or just `local` pub fn matches(&self, tag_name: &str) -> bool { let split = tag_name.find(':'); match split { None => tag_name == self.local, Some(idx) => { let (prefix, local) = tag_name.split_at(idx); let local = &local[1..]; self.local == local && self.prefix == prefix } } } } #[derive(Debug, PartialEq)] pub struct Attr<'a> { name: Name<'a>, // attribute values can be escaped (eg. with double quotes, so we need a Cow) value: Cow<'a, str>, } #[derive(Debug, PartialEq)] pub struct StartEl<'a> { name: Name<'a>, attributes: Vec<Attr<'a>>, closed: bool, depth: Depth, } /// Xml Start Element /// /// ```xml /// <a:b c="d"> /// ^^^ ^^^^^ /// name attributes /// ``` impl<'a> StartEl<'a> { pub fn depth(&self) -> Depth { self.depth } fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self { Self { name: Name { prefix, local }, attributes: vec![], closed: false, depth, } } /// Retrieve an attribute with a given key /// /// key `prefix:local` combined as a str, joined by a `:` pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> { self.attributes .iter() .find(|attr| attr.name.matches(key)) .map(|attr| attr.value.as_ref()) } /// Returns whether this `StartEl` matches a given name /// in `prefix:local` form. pub fn matches(&self, pat: &str) -> bool { self.name.matches(pat) } /// Local component of this element's name /// /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn local(&self) -> &str { self.name.local } /// Prefix component of this elements name (or empty string) /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn prefix(&self) -> &str { self.name.prefix } /// Returns true of `el` at `depth` is a match for this `start_el` fn end_el(&self, el: ElementEnd, depth: Depth) -> bool { if depth != self.depth { return false; } match el { ElementEnd::Open => false, ElementEnd::Close(prefix, local) => { prefix.as_str() == self.name.prefix && local.as_str() == self.name.local } ElementEnd::Empty => false, } } } /// Xml Document abstraction /// /// This document wraps a lazy tokenizer with depth tracking. /// Constructing a document is essentially free. pub struct Document<'a> { tokenizer: Tokenizer<'a>, depth: Depth, } impl<'a> TryFrom<&'a [u8]> for Document<'a> { type Error = XmlError; fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> { Ok(Document::new( std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?, )) } } impl<'inp> Document<'inp> { pub fn new(doc: &'inp str) -> Self { Document { tokenizer: Tokenizer::from(doc), depth: 0, } } /// "Depth first" iterator /// /// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next /// start element regardless of depth. This is useful to give a pointer into the middle /// of a document to start reading. /// /// ```xml /// <Response> <-- first call returns this: /// <A> <-- next call /// <Nested /> <-- next call returns this /// <MoreNested>hello</MoreNested> <-- then this: /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> { next_start_element(self) } /// A scoped reader for the entire document pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> { let start_el = self .next_start_element() .ok_or_else(|| XmlError::custom("no root element"))?; Ok(ScopedDecoder { doc: self, start_el, terminated: false, }) } /// A scoped reader for a specific tag /// /// This method is necessary for when you need to return a ScopedDecoder from a function /// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference /// to a field owned by the current function pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: self, start_el, terminated: false, } } } /// Depth tracking iterator /// /// ```xml /// <a> <- startel depth 0 /// <b> <- startel depth 1 /// <c> <- startel depth 2 /// </c> <- endel depth 2 /// </b> <- endel depth 1 /// </a> <- endel depth 0 /// ``` impl<'inp> Iterator for Document<'inp> { type Item = Result<(Token<'inp>, Depth), XmlError>; fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> { let tok = self.tokenizer.next()?; let tok = match tok { Err(e) => return Some(Err(e.into())), Ok(tok) => tok, }; // depth bookkeeping match tok { Token::ElementEnd { end: ElementEnd::Close(_, _), .. } => { self.depth -= 1; } Token::ElementEnd { end: ElementEnd::Empty, .. } => self.depth -= 1, t @ Token::ElementStart { .. } => { self.depth += 1; // We want the startel and endel to have the same depth, but after the opener, // the parser will be at depth 1. Return the previous depth: return Some(Ok((t, self.depth - 1))); } _ => {} } Some(Ok((tok, self.depth))) } } /// XmlTag Abstraction /// /// ScopedDecoder represents a tag-scoped view into an XML document. Methods /// on `ScopedDecoder` return `None` when the current tag has been exhausted. pub struct ScopedDecoder<'inp, 'a> { doc: &'a mut Document<'inp>, start_el: StartEl<'inp>, terminated: bool, } /// When a scoped decoder is dropped, its entire scope is consumed so that the /// next read begins at the next tag at the same depth. impl Drop for ScopedDecoder<'_, '_> { fn drop(&mut self) { for _ in self {} } } impl<'inp> ScopedDecoder<'inp, '_> { /// The start element for this scope pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> { &self.start_el } /// Returns the next top-level tag in this scope /// The returned reader will fully read the tag during its lifetime. If it is dropped without /// the data being read, the reader will be advanced until the matching close tag. If you read /// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`. /// /// ```xml /// <Response> <-- scoped reader on this tag /// <A> <-- first call to next_tag returns this /// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A` /// <MoreNested>hello</MoreNested> /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> { let next_tag = next_start_element(self)?; Some(self.nested_decoder(next_tag)) } fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: &mut self.doc, start_el, terminated: false, } } } impl<'inp, 'a> Iterator for ScopedDecoder<'inp,
Name
identifier_name
decode.rs
()) } /// Returns whether this `StartEl` matches a given name /// in `prefix:local` form. pub fn matches(&self, pat: &str) -> bool { self.name.matches(pat) } /// Local component of this element's name /// /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn local(&self) -> &str
/// Prefix component of this elements name (or empty string) /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn prefix(&self) -> &str { self.name.prefix } /// Returns true of `el` at `depth` is a match for this `start_el` fn end_el(&self, el: ElementEnd, depth: Depth) -> bool { if depth != self.depth { return false; } match el { ElementEnd::Open => false, ElementEnd::Close(prefix, local) => { prefix.as_str() == self.name.prefix && local.as_str() == self.name.local } ElementEnd::Empty => false, } } } /// Xml Document abstraction /// /// This document wraps a lazy tokenizer with depth tracking. /// Constructing a document is essentially free. pub struct Document<'a> { tokenizer: Tokenizer<'a>, depth: Depth, } impl<'a> TryFrom<&'a [u8]> for Document<'a> { type Error = XmlError; fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> { Ok(Document::new( std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?, )) } } impl<'inp> Document<'inp> { pub fn new(doc: &'inp str) -> Self { Document { tokenizer: Tokenizer::from(doc), depth: 0, } } /// "Depth first" iterator /// /// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next /// start element regardless of depth. This is useful to give a pointer into the middle /// of a document to start reading. /// /// ```xml /// <Response> <-- first call returns this: /// <A> <-- next call /// <Nested /> <-- next call returns this /// <MoreNested>hello</MoreNested> <-- then this: /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> { next_start_element(self) } /// A scoped reader for the entire document pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> { let start_el = self .next_start_element() .ok_or_else(|| XmlError::custom("no root element"))?; Ok(ScopedDecoder { doc: self, start_el, terminated: false, }) } /// A scoped reader for a specific tag /// /// This method is necessary for when you need to return a ScopedDecoder from a function /// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference /// to a field owned by the current function pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: self, start_el, terminated: false, } } } /// Depth tracking iterator /// /// ```xml /// <a> <- startel depth 0 /// <b> <- startel depth 1 /// <c> <- startel depth 2 /// </c> <- endel depth 2 /// </b> <- endel depth 1 /// </a> <- endel depth 0 /// ``` impl<'inp> Iterator for Document<'inp> { type Item = Result<(Token<'inp>, Depth), XmlError>; fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> { let tok = self.tokenizer.next()?; let tok = match tok { Err(e) => return Some(Err(e.into())), Ok(tok) => tok, }; // depth bookkeeping match tok { Token::ElementEnd { end: ElementEnd::Close(_, _), .. } => { self.depth -= 1; } Token::ElementEnd { end: ElementEnd::Empty, .. } => self.depth -= 1, t @ Token::ElementStart { .. } => { self.depth += 1; // We want the startel and endel to have the same depth, but after the opener, // the parser will be at depth 1. Return the previous depth: return Some(Ok((t, self.depth - 1))); } _ => {} } Some(Ok((tok, self.depth))) } } /// XmlTag Abstraction /// /// ScopedDecoder represents a tag-scoped view into an XML document. Methods /// on `ScopedDecoder` return `None` when the current tag has been exhausted. pub struct ScopedDecoder<'inp, 'a> { doc: &'a mut Document<'inp>, start_el: StartEl<'inp>, terminated: bool, } /// When a scoped decoder is dropped, its entire scope is consumed so that the /// next read begins at the next tag at the same depth. impl Drop for ScopedDecoder<'_, '_> { fn drop(&mut self) { for _ in self {} } } impl<'inp> ScopedDecoder<'inp, '_> { /// The start element for this scope pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> { &self.start_el } /// Returns the next top-level tag in this scope /// The returned reader will fully read the tag during its lifetime. If it is dropped without /// the data being read, the reader will be advanced until the matching close tag. If you read /// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`. /// /// ```xml /// <Response> <-- scoped reader on this tag /// <A> <-- first call to next_tag returns this /// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A` /// <MoreNested>hello</MoreNested> /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> { let next_tag = next_start_element(self)?; Some(self.nested_decoder(next_tag)) } fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: &mut self.doc, start_el, terminated: false, } } } impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> { type Item = Result<(Token<'inp>, Depth), XmlError>; fn next(&mut self) -> Option<Self::Item> { if self.start_el.closed { self.terminated = true; } if self.terminated { return None; } let (tok, depth) = match self.doc.next() { Some(Ok((tok, depth))) => (tok, depth), other => return other, }; match tok { Token::ElementEnd { end, .. } if self.start_el.end_el(end, depth) => { self.terminated = true; return None; } _ => {} } Some(Ok((tok, depth))) } } /// Load the next start element out of a depth-tagged token iterator fn next_start_element<'a, 'inp>( tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>, ) -> Option<StartEl<'inp>> { let mut out = StartEl::new("", "", 0); loop { match tokens.next()? { Ok((Token::ElementStart { local, prefix, .. }, depth)) => { out.name.local = local.as_str(); out.name.prefix = prefix.as_str(); out.depth = depth; } Ok(( Token::Attribute { prefix, local, value, .. }, _, )) => out.attributes.push(Attr { name: Name { local: local.as_str(), prefix: prefix.as_str(), }, value: unescape(value.as_str()).ok()?, }), Ok(( Token::ElementEnd { end: ElementEnd::Open, .. }, _, )) => break, Ok(( Token::ElementEnd { end: ElementEnd::Empty, .. }, _, )) => { out.closed = true; break; } _ => {} } }
{ self.name.local }
identifier_body
decode.rs
_ref()) } /// Returns whether this `StartEl` matches a given name /// in `prefix:local` form. pub fn matches(&self, pat: &str) -> bool { self.name.matches(pat) } /// Local component of this element's name /// /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn local(&self) -> &str { self.name.local } /// Prefix component of this elements name (or empty string) /// ```xml /// <foo:bar> /// ^^^ /// ``` pub fn prefix(&self) -> &str { self.name.prefix } /// Returns true of `el` at `depth` is a match for this `start_el` fn end_el(&self, el: ElementEnd, depth: Depth) -> bool { if depth != self.depth { return false; } match el { ElementEnd::Open => false, ElementEnd::Close(prefix, local) => { prefix.as_str() == self.name.prefix && local.as_str() == self.name.local } ElementEnd::Empty => false, } } } /// Xml Document abstraction /// /// This document wraps a lazy tokenizer with depth tracking. /// Constructing a document is essentially free. pub struct Document<'a> { tokenizer: Tokenizer<'a>, depth: Depth, } impl<'a> TryFrom<&'a [u8]> for Document<'a> { type Error = XmlError; fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> { Ok(Document::new( std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?, )) } } impl<'inp> Document<'inp> { pub fn new(doc: &'inp str) -> Self { Document { tokenizer: Tokenizer::from(doc), depth: 0, } } /// "Depth first" iterator /// /// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next /// start element regardless of depth. This is useful to give a pointer into the middle /// of a document to start reading. /// /// ```xml /// <Response> <-- first call returns this: /// <A> <-- next call
/// <Nested /> <-- next call returns this /// <MoreNested>hello</MoreNested> <-- then this: /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> { next_start_element(self) } /// A scoped reader for the entire document pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> { let start_el = self .next_start_element() .ok_or_else(|| XmlError::custom("no root element"))?; Ok(ScopedDecoder { doc: self, start_el, terminated: false, }) } /// A scoped reader for a specific tag /// /// This method is necessary for when you need to return a ScopedDecoder from a function /// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference /// to a field owned by the current function pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: self, start_el, terminated: false, } } } /// Depth tracking iterator /// /// ```xml /// <a> <- startel depth 0 /// <b> <- startel depth 1 /// <c> <- startel depth 2 /// </c> <- endel depth 2 /// </b> <- endel depth 1 /// </a> <- endel depth 0 /// ``` impl<'inp> Iterator for Document<'inp> { type Item = Result<(Token<'inp>, Depth), XmlError>; fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> { let tok = self.tokenizer.next()?; let tok = match tok { Err(e) => return Some(Err(e.into())), Ok(tok) => tok, }; // depth bookkeeping match tok { Token::ElementEnd { end: ElementEnd::Close(_, _), .. } => { self.depth -= 1; } Token::ElementEnd { end: ElementEnd::Empty, .. } => self.depth -= 1, t @ Token::ElementStart { .. } => { self.depth += 1; // We want the startel and endel to have the same depth, but after the opener, // the parser will be at depth 1. Return the previous depth: return Some(Ok((t, self.depth - 1))); } _ => {} } Some(Ok((tok, self.depth))) } } /// XmlTag Abstraction /// /// ScopedDecoder represents a tag-scoped view into an XML document. Methods /// on `ScopedDecoder` return `None` when the current tag has been exhausted. pub struct ScopedDecoder<'inp, 'a> { doc: &'a mut Document<'inp>, start_el: StartEl<'inp>, terminated: bool, } /// When a scoped decoder is dropped, its entire scope is consumed so that the /// next read begins at the next tag at the same depth. impl Drop for ScopedDecoder<'_, '_> { fn drop(&mut self) { for _ in self {} } } impl<'inp> ScopedDecoder<'inp, '_> { /// The start element for this scope pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> { &self.start_el } /// Returns the next top-level tag in this scope /// The returned reader will fully read the tag during its lifetime. If it is dropped without /// the data being read, the reader will be advanced until the matching close tag. If you read /// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`. /// /// ```xml /// <Response> <-- scoped reader on this tag /// <A> <-- first call to next_tag returns this /// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A` /// <MoreNested>hello</MoreNested> /// </A> /// <B/> <-- second call to next_tag returns this /// </Response> /// ``` pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> { let next_tag = next_start_element(self)?; Some(self.nested_decoder(next_tag)) } fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> { ScopedDecoder { doc: &mut self.doc, start_el, terminated: false, } } } impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> { type Item = Result<(Token<'inp>, Depth), XmlError>; fn next(&mut self) -> Option<Self::Item> { if self.start_el.closed { self.terminated = true; } if self.terminated { return None; } let (tok, depth) = match self.doc.next() { Some(Ok((tok, depth))) => (tok, depth), other => return other, }; match tok { Token::ElementEnd { end, .. } if self.start_el.end_el(end, depth) => { self.terminated = true; return None; } _ => {} } Some(Ok((tok, depth))) } } /// Load the next start element out of a depth-tagged token iterator fn next_start_element<'a, 'inp>( tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>, ) -> Option<StartEl<'inp>> { let mut out = StartEl::new("", "", 0); loop { match tokens.next()? { Ok((Token::ElementStart { local, prefix, .. }, depth)) => { out.name.local = local.as_str(); out.name.prefix = prefix.as_str(); out.depth = depth; } Ok(( Token::Attribute { prefix, local, value, .. }, _, )) => out.attributes.push(Attr { name: Name { local: local.as_str(), prefix: prefix.as_str(), }, value: unescape(value.as_str()).ok()?, }), Ok(( Token::ElementEnd { end: ElementEnd::Open, .. }, _, )) => break, Ok(( Token::ElementEnd { end: ElementEnd::Empty, .. }, _, )) => { out.closed = true; break; } _ => {} } } Some(out
random_line_split
controller.go
) ControllerName() string { if c.controlmeta != nil { return c.controlmeta.Name() } return "" } func (c *ControlManager) ActionName() string { return c.action } func (c *ControlManager) Controller() reflect.Value { return c.control } func (c *ControlManager) ActionMeta() *ActionMeta { return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context) } func (c *ControlManager) AvailableMethods() ReqMethod { return c.controlmeta.ActionAvailableMethods(c.action) } func (c *ControlManager) AvailableMethodsList() []string { return c.controlmeta.ActionAvailableMethodsList(c.action) } func (c *ControlManager) ControllerMeta() *Meta { return c.controlmeta } func (c *ControlManager) Context() *web.Context { return c.context } func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) { if c.Prepare() == nil { return c.Execute() } return c.state, nil } func (c *ControlManager) Prepare() error { if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) { if c.controlmeta == nil { c.state = 404 return ErrControllerNotFound } switch c.controlmeta.T() { case ContypeConstructor: results := c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) c.control = results[0] if c.control.Kind() == reflect.Interface { c.control = c.control.Elem() } // after Elem from Interface, it might be a pointer to a struct too if c.control.Kind() == reflect.Ptr { c.control = c.control.Elem() } if c.controlmeta.Status() { state := int(results[1].Int()) if state <= 0 { state = http.StatusOK } c.state = state } case ContypeStruct, ContypeScontroller: fields := c.controlmeta.Fields() tmpcontrol := reflect.Indirect(c.control) for _, field := range fields { value := getDataValue(&field.DataMeta, getVMap(c.context), c.context.PData) // allows middleware resources to make changes to value based on tag // this can be useful to csrf where non csrf verified values are filtered if res := field.Tag().Get(StructValueFeedTag); res != "" { var reses []string if strings.Contains(res, ",") { reses = strings.Split(res, ",") } else { reses = []string{res} } for _, r := range reses { rinterface := c.context.Resource(strings.TrimSpace(r)) if rinterface != nil { if parser, ok := rinterface.(StructValueFeeder); ok { var err error value, err = parser.FeedStructValue(c.context, field, value) if err != nil { c.state = 500 log.Println(err) return ErrParseStruct } } } else { log.Println("Resource to parse struct var not found: ", r) c.state = 500 return ErrParseStruct } } } else if res := field.Tag().Get(StructValueResTag); res != "" { rinterface := c.context.Resource(strings.TrimSpace(res)) if rinterface != nil { value = reflect.ValueOf(rinterface) } } if value.IsValid() { tmpcontrol.FieldByName(field.Name()).Set(value) } } if c.state != 500 && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Construct_(c.context) } } if c.context.HasErrorCode() { c.state = c.context.ErrorCode() } c.prepared = true } return nil } func (c *ControlManager) Execute() (state int, vw mvc.View) { if c.prepared { if c.state == 0 { c.state = 200 } var results []reflect.Value var rstyle = c.controlmeta.ResultStyle if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices { switch c.controlmeta.T() { case ContypeFunc: results = c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) default: actmeta := c.ActionMeta() if actmeta != nil { meth := c.control.MethodByName(actmeta.Name()) results = meth.Call(getArgSlice(actmeta.Args(), getVMap(c.context), c.context.PData)) rstyle = actmeta.ResultStyle } else { c.state = 404 state = c.state return } } } if rstyle.Status() { c.state = int(results[1].Int()) } if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() { // for a consistent error page, error should be returned instead and allow sunny server itself // to render the error page state = c.state if state == 200 || state == 0 { if rstyle.View() { if !results[0].IsNil() && results[0].IsValid() { c.vw = (results[0].Interface()).(mvc.View) } } else { var vmap mvc.VM if results[0].IsNil() || !results[0].IsValid() { vmap = mvc.VM{} } else if rstyle.Vmap() { vmap = results[0].Interface().(mvc.VM) } else { vmap = mvc.VM(results[0].Interface().(map[string]interface{})) } c.vw = view.NewResultView(vmap) } vw = c.vw if vw == nil { state = -1 } } } else { // if state returned is -1, it means the controller has handled the response state = -1 } c.executed = true } else { state = c.state } return } func (c *ControlManager) PublishView() (err error) { if !c.prepared { err = ErrUnprepared } else if !c.executed { err = ErrUnexecuted } else if c.vw != nil { if c.context.Request.Method == "HEAD" { c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context)) } else { err = c.vw.Publish(c.context) } } return } func (c *ControlManager) Cleanup() { if c.prepared && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Destruct_() } } func getVMap(context *web.Context) map[string]reflect.Value { return map[string]reflect.Value{ "context": reflect.ValueOf(context), "w": reflect.ValueOf(context.Response), "r": reflect.ValueOf(context.Request), "upath": reflect.ValueOf(context.UPath), "pdata": reflect.ValueOf(context.PData), "upath_slice": reflect.ValueOf([]string(context.UPath)), "pdata_map": reflect.ValueOf(map[string]string(context.PData)), } } func getArgSlice(args []*ArgMeta, vmap map[string]reflect.Value, d web.PData) (values []reflect.Value) { values = make([]reflect.Value, len(args)) for i, arg := range args { values[i] = getDataValue(&arg.DataMeta, vmap, d) } return } func getDataValue(arg *DataMeta, vmap map[string]reflect.Value, d web.PData) (value reflect.Value) { switch arg.T() { case DatatypeWebContext: value = vmap["context"] case DatatypeRequest: value = vmap["r"] case DatatypeResponseWriter: value = vmap["w"] case DatatypeUpath: value = vmap["upath"] case DatatypeUpathSlice: value = vmap["upath_slice"] case DatatypePdata: value = vmap["pdata"] case DatatypePdataMap: value = vmap["pdata_map"] case DatatypeString: val, _ := d.String(arg.LName()) value = reflect.ValueOf(val) case DatatypeInt: val, _ := d.Int(arg.LName()) value = reflect.ValueOf(val) case DatatypeInt64: val, _ := d.Int64(arg.LName()) value = reflect.ValueOf(val) case DatatypeFloat: val, _ := d.Float32(arg.LName()) value = reflect.ValueOf(val)
case DatatypeFloat64: val, _ := d.Float64(arg.LName()) value = reflect.ValueOf(val)
random_line_split
controller.go
: context, controlmeta: cm, action: action, } } type ControlManager struct { control reflect.Value context *web.Context controlmeta *Meta action string prepared bool executed bool state int vw mvc.View } func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) { if !c.prepared { rtype := cm.RType() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() } c.controlmeta = cm c.control = reflect.New(rtype) ok = true } return } func (c *ControlManager) SetAction(action string) (ok bool) { if !c.prepared { c.action = action ok = true } return } func (c *ControlManager) SetState(state int) { c.state = state } func (c *ControlManager) State() int { return c.state } func (c *ControlManager) View() mvc.View { return c.vw } func (c *ControlManager) IsPrepared() bool { return c.prepared } func (c *ControlManager) IsExecuted() bool { return c.executed } func (c *ControlManager) MvcMeta() mvc.Meta { if c.controlmeta != nil { return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext} } return mvc.Meta{} } func (c *ControlManager) ModuleName() string { if c.controlmeta != nil { return c.controlmeta.Module() } return "" } func (c *ControlManager)
() string { if c.controlmeta != nil { return c.controlmeta.Name() } return "" } func (c *ControlManager) ActionName() string { return c.action } func (c *ControlManager) Controller() reflect.Value { return c.control } func (c *ControlManager) ActionMeta() *ActionMeta { return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context) } func (c *ControlManager) AvailableMethods() ReqMethod { return c.controlmeta.ActionAvailableMethods(c.action) } func (c *ControlManager) AvailableMethodsList() []string { return c.controlmeta.ActionAvailableMethodsList(c.action) } func (c *ControlManager) ControllerMeta() *Meta { return c.controlmeta } func (c *ControlManager) Context() *web.Context { return c.context } func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) { if c.Prepare() == nil { return c.Execute() } return c.state, nil } func (c *ControlManager) Prepare() error { if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) { if c.controlmeta == nil { c.state = 404 return ErrControllerNotFound } switch c.controlmeta.T() { case ContypeConstructor: results := c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) c.control = results[0] if c.control.Kind() == reflect.Interface { c.control = c.control.Elem() } // after Elem from Interface, it might be a pointer to a struct too if c.control.Kind() == reflect.Ptr { c.control = c.control.Elem() } if c.controlmeta.Status() { state := int(results[1].Int()) if state <= 0 { state = http.StatusOK } c.state = state } case ContypeStruct, ContypeScontroller: fields := c.controlmeta.Fields() tmpcontrol := reflect.Indirect(c.control) for _, field := range fields { value := getDataValue(&field.DataMeta, getVMap(c.context), c.context.PData) // allows middleware resources to make changes to value based on tag // this can be useful to csrf where non csrf verified values are filtered if res := field.Tag().Get(StructValueFeedTag); res != "" { var reses []string if strings.Contains(res, ",") { reses = strings.Split(res, ",") } else { reses = []string{res} } for _, r := range reses { rinterface := c.context.Resource(strings.TrimSpace(r)) if rinterface != nil { if parser, ok := rinterface.(StructValueFeeder); ok { var err error value, err = parser.FeedStructValue(c.context, field, value) if err != nil { c.state = 500 log.Println(err) return ErrParseStruct } } } else { log.Println("Resource to parse struct var not found: ", r) c.state = 500 return ErrParseStruct } } } else if res := field.Tag().Get(StructValueResTag); res != "" { rinterface := c.context.Resource(strings.TrimSpace(res)) if rinterface != nil { value = reflect.ValueOf(rinterface) } } if value.IsValid() { tmpcontrol.FieldByName(field.Name()).Set(value) } } if c.state != 500 && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Construct_(c.context) } } if c.context.HasErrorCode() { c.state = c.context.ErrorCode() } c.prepared = true } return nil } func (c *ControlManager) Execute() (state int, vw mvc.View) { if c.prepared { if c.state == 0 { c.state = 200 } var results []reflect.Value var rstyle = c.controlmeta.ResultStyle if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices { switch c.controlmeta.T() { case ContypeFunc: results = c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) default: actmeta := c.ActionMeta() if actmeta != nil { meth := c.control.MethodByName(actmeta.Name()) results = meth.Call(getArgSlice(actmeta.Args(), getVMap(c.context), c.context.PData)) rstyle = actmeta.ResultStyle } else { c.state = 404 state = c.state return } } } if rstyle.Status() { c.state = int(results[1].Int()) } if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() { // for a consistent error page, error should be returned instead and allow sunny server itself // to render the error page state = c.state if state == 200 || state == 0 { if rstyle.View() { if !results[0].IsNil() && results[0].IsValid() { c.vw = (results[0].Interface()).(mvc.View) } } else { var vmap mvc.VM if results[0].IsNil() || !results[0].IsValid() { vmap = mvc.VM{} } else if rstyle.Vmap() { vmap = results[0].Interface().(mvc.VM) } else { vmap = mvc.VM(results[0].Interface().(map[string]interface{})) } c.vw = view.NewResultView(vmap) } vw = c.vw if vw == nil { state = -1 } } } else { // if state returned is -1, it means the controller has handled the response state = -1 } c.executed = true } else { state = c.state } return } func (c *ControlManager) PublishView() (err error) { if !c.prepared { err = ErrUnprepared } else if !c.executed { err = ErrUnexecuted } else if c.vw != nil { if c.context.Request.Method == "HEAD" { c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context)) } else { err = c.vw.Publish(c.context) } } return } func (c *ControlManager) Cleanup() { if c.prepared && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Destruct_() } } func getVMap(context *web.Context) map[string]reflect.Value { return map[string]reflect.Value{ "context": reflect.ValueOf(context), "w": reflect.ValueOf(context.Response), "r": reflect.ValueOf(context.Request), "upath": reflect.ValueOf(context.UPath), "pdata": reflect.ValueOf(context.PData),
ControllerName
identifier_name
controller.go
: context, controlmeta: cm, action: action, } } type ControlManager struct { control reflect.Value context *web.Context controlmeta *Meta action string prepared bool executed bool state int vw mvc.View } func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) { if !c.prepared { rtype := cm.RType() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() } c.controlmeta = cm c.control = reflect.New(rtype) ok = true } return } func (c *ControlManager) SetAction(action string) (ok bool) { if !c.prepared { c.action = action ok = true } return } func (c *ControlManager) SetState(state int) { c.state = state } func (c *ControlManager) State() int { return c.state } func (c *ControlManager) View() mvc.View { return c.vw } func (c *ControlManager) IsPrepared() bool { return c.prepared } func (c *ControlManager) IsExecuted() bool { return c.executed } func (c *ControlManager) MvcMeta() mvc.Meta { if c.controlmeta != nil { return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext} } return mvc.Meta{} } func (c *ControlManager) ModuleName() string { if c.controlmeta != nil { return c.controlmeta.Module() } return "" } func (c *ControlManager) ControllerName() string { if c.controlmeta != nil { return c.controlmeta.Name() } return "" } func (c *ControlManager) ActionName() string { return c.action } func (c *ControlManager) Controller() reflect.Value { return c.control } func (c *ControlManager) ActionMeta() *ActionMeta { return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context) } func (c *ControlManager) AvailableMethods() ReqMethod { return c.controlmeta.ActionAvailableMethods(c.action) } func (c *ControlManager) AvailableMethodsList() []string { return c.controlmeta.ActionAvailableMethodsList(c.action) } func (c *ControlManager) ControllerMeta() *Meta { return c.controlmeta } func (c *ControlManager) Context() *web.Context { return c.context } func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) { if c.Prepare() == nil { return c.Execute() } return c.state, nil } func (c *ControlManager) Prepare() error { if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) { if c.controlmeta == nil { c.state = 404 return ErrControllerNotFound } switch c.controlmeta.T() { case ContypeConstructor: results := c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) c.control = results[0] if c.control.Kind() == reflect.Interface { c.control = c.control.Elem() } // after Elem from Interface, it might be a pointer to a struct too if c.control.Kind() == reflect.Ptr { c.control = c.control.Elem() } if c.controlmeta.Status() { state := int(results[1].Int()) if state <= 0 { state = http.StatusOK } c.state = state } case ContypeStruct, ContypeScontroller: fields := c.controlmeta.Fields() tmpcontrol := reflect.Indirect(c.control) for _, field := range fields { value := getDataValue(&field.DataMeta, getVMap(c.context), c.context.PData) // allows middleware resources to make changes to value based on tag // this can be useful to csrf where non csrf verified values are filtered if res := field.Tag().Get(StructValueFeedTag); res != "" { var reses []string if strings.Contains(res, ",")
else { reses = []string{res} } for _, r := range reses { rinterface := c.context.Resource(strings.TrimSpace(r)) if rinterface != nil { if parser, ok := rinterface.(StructValueFeeder); ok { var err error value, err = parser.FeedStructValue(c.context, field, value) if err != nil { c.state = 500 log.Println(err) return ErrParseStruct } } } else { log.Println("Resource to parse struct var not found: ", r) c.state = 500 return ErrParseStruct } } } else if res := field.Tag().Get(StructValueResTag); res != "" { rinterface := c.context.Resource(strings.TrimSpace(res)) if rinterface != nil { value = reflect.ValueOf(rinterface) } } if value.IsValid() { tmpcontrol.FieldByName(field.Name()).Set(value) } } if c.state != 500 && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Construct_(c.context) } } if c.context.HasErrorCode() { c.state = c.context.ErrorCode() } c.prepared = true } return nil } func (c *ControlManager) Execute() (state int, vw mvc.View) { if c.prepared { if c.state == 0 { c.state = 200 } var results []reflect.Value var rstyle = c.controlmeta.ResultStyle if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices { switch c.controlmeta.T() { case ContypeFunc: results = c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) default: actmeta := c.ActionMeta() if actmeta != nil { meth := c.control.MethodByName(actmeta.Name()) results = meth.Call(getArgSlice(actmeta.Args(), getVMap(c.context), c.context.PData)) rstyle = actmeta.ResultStyle } else { c.state = 404 state = c.state return } } } if rstyle.Status() { c.state = int(results[1].Int()) } if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() { // for a consistent error page, error should be returned instead and allow sunny server itself // to render the error page state = c.state if state == 200 || state == 0 { if rstyle.View() { if !results[0].IsNil() && results[0].IsValid() { c.vw = (results[0].Interface()).(mvc.View) } } else { var vmap mvc.VM if results[0].IsNil() || !results[0].IsValid() { vmap = mvc.VM{} } else if rstyle.Vmap() { vmap = results[0].Interface().(mvc.VM) } else { vmap = mvc.VM(results[0].Interface().(map[string]interface{})) } c.vw = view.NewResultView(vmap) } vw = c.vw if vw == nil { state = -1 } } } else { // if state returned is -1, it means the controller has handled the response state = -1 } c.executed = true } else { state = c.state } return } func (c *ControlManager) PublishView() (err error) { if !c.prepared { err = ErrUnprepared } else if !c.executed { err = ErrUnexecuted } else if c.vw != nil { if c.context.Request.Method == "HEAD" { c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context)) } else { err = c.vw.Publish(c.context) } } return } func (c *ControlManager) Cleanup() { if c.prepared && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Destruct_() } } func getVMap(context *web.Context) map[string]reflect.Value { return map[string]reflect.Value{ "context": reflect.ValueOf(context), "w": reflect.ValueOf(context.Response), "r": reflect.ValueOf(context.Request), "upath": reflect.ValueOf(context.UPath), "pdata": reflect.ValueOf(context.PData),
{ reses = strings.Split(res, ",") }
conditional_block
controller.go
: context, controlmeta: cm, action: action, } } type ControlManager struct { control reflect.Value context *web.Context controlmeta *Meta action string prepared bool executed bool state int vw mvc.View } func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) { if !c.prepared { rtype := cm.RType() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() } c.controlmeta = cm c.control = reflect.New(rtype) ok = true } return } func (c *ControlManager) SetAction(action string) (ok bool) { if !c.prepared { c.action = action ok = true } return } func (c *ControlManager) SetState(state int) { c.state = state } func (c *ControlManager) State() int { return c.state } func (c *ControlManager) View() mvc.View { return c.vw } func (c *ControlManager) IsPrepared() bool { return c.prepared } func (c *ControlManager) IsExecuted() bool { return c.executed } func (c *ControlManager) MvcMeta() mvc.Meta { if c.controlmeta != nil { return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext} } return mvc.Meta{} } func (c *ControlManager) ModuleName() string { if c.controlmeta != nil { return c.controlmeta.Module() } return "" } func (c *ControlManager) ControllerName() string { if c.controlmeta != nil { return c.controlmeta.Name() } return "" } func (c *ControlManager) ActionName() string { return c.action } func (c *ControlManager) Controller() reflect.Value { return c.control } func (c *ControlManager) ActionMeta() *ActionMeta { return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context) } func (c *ControlManager) AvailableMethods() ReqMethod { return c.controlmeta.ActionAvailableMethods(c.action) } func (c *ControlManager) AvailableMethodsList() []string { return c.controlmeta.ActionAvailableMethodsList(c.action) } func (c *ControlManager) ControllerMeta() *Meta { return c.controlmeta } func (c *ControlManager) Context() *web.Context { return c.context } func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) { if c.Prepare() == nil { return c.Execute() } return c.state, nil } func (c *ControlManager) Prepare() error { if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) { if c.controlmeta == nil { c.state = 404 return ErrControllerNotFound } switch c.controlmeta.T() { case ContypeConstructor: results := c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) c.control = results[0] if c.control.Kind() == reflect.Interface { c.control = c.control.Elem() } // after Elem from Interface, it might be a pointer to a struct too if c.control.Kind() == reflect.Ptr { c.control = c.control.Elem() } if c.controlmeta.Status() { state := int(results[1].Int()) if state <= 0 { state = http.StatusOK } c.state = state } case ContypeStruct, ContypeScontroller: fields := c.controlmeta.Fields() tmpcontrol := reflect.Indirect(c.control) for _, field := range fields { value := getDataValue(&field.DataMeta, getVMap(c.context), c.context.PData) // allows middleware resources to make changes to value based on tag // this can be useful to csrf where non csrf verified values are filtered if res := field.Tag().Get(StructValueFeedTag); res != "" { var reses []string if strings.Contains(res, ",") { reses = strings.Split(res, ",") } else { reses = []string{res} } for _, r := range reses { rinterface := c.context.Resource(strings.TrimSpace(r)) if rinterface != nil { if parser, ok := rinterface.(StructValueFeeder); ok { var err error value, err = parser.FeedStructValue(c.context, field, value) if err != nil { c.state = 500 log.Println(err) return ErrParseStruct } } } else { log.Println("Resource to parse struct var not found: ", r) c.state = 500 return ErrParseStruct } } } else if res := field.Tag().Get(StructValueResTag); res != "" { rinterface := c.context.Resource(strings.TrimSpace(res)) if rinterface != nil { value = reflect.ValueOf(rinterface) } } if value.IsValid() { tmpcontrol.FieldByName(field.Name()).Set(value) } } if c.state != 500 && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Construct_(c.context) } } if c.context.HasErrorCode() { c.state = c.context.ErrorCode() } c.prepared = true } return nil } func (c *ControlManager) Execute() (state int, vw mvc.View) { if c.prepared { if c.state == 0 { c.state = 200 } var results []reflect.Value var rstyle = c.controlmeta.ResultStyle if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices { switch c.controlmeta.T() { case ContypeFunc: results = c.control.Call(getArgSlice(c.controlmeta.Args(), getVMap(c.context), c.context.PData)) default: actmeta := c.ActionMeta() if actmeta != nil { meth := c.control.MethodByName(actmeta.Name()) results = meth.Call(getArgSlice(actmeta.Args(), getVMap(c.context), c.context.PData)) rstyle = actmeta.ResultStyle } else { c.state = 404 state = c.state return } } } if rstyle.Status() { c.state = int(results[1].Int()) } if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() { // for a consistent error page, error should be returned instead and allow sunny server itself // to render the error page state = c.state if state == 200 || state == 0 { if rstyle.View() { if !results[0].IsNil() && results[0].IsValid() { c.vw = (results[0].Interface()).(mvc.View) } } else { var vmap mvc.VM if results[0].IsNil() || !results[0].IsValid() { vmap = mvc.VM{} } else if rstyle.Vmap() { vmap = results[0].Interface().(mvc.VM) } else { vmap = mvc.VM(results[0].Interface().(map[string]interface{})) } c.vw = view.NewResultView(vmap) } vw = c.vw if vw == nil { state = -1 } } } else { // if state returned is -1, it means the controller has handled the response state = -1 } c.executed = true } else { state = c.state } return } func (c *ControlManager) PublishView() (err error) { if !c.prepared { err = ErrUnprepared } else if !c.executed { err = ErrUnexecuted } else if c.vw != nil { if c.context.Request.Method == "HEAD" { c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context)) } else { err = c.vw.Publish(c.context) } } return } func (c *ControlManager) Cleanup()
func getVMap(context *web.Context) map[string]reflect.Value { return map[string]reflect.Value{ "context": reflect.ValueOf(context), "w": reflect.ValueOf(context.Response), "r": reflect.ValueOf(context.Request), "upath": reflect.ValueOf(context.UPath), "pdata": reflect.ValueOf(context.PData),
{ if c.prepared && c.controlmeta.T() == ContypeScontroller { ctrler := c.control.Interface().(mvc.Controller) ctrler.Destruct_() } }
identifier_body
inifile.py
pyproject.toml` or `flit.ini` file with data about the package. """ if path.suffix == '.toml': with path.open() as f: d = toml.load(f) res = prep_toml_config(d, path) else: # Treat all other extensions as the older flit.ini format cp = _read_pkg_ini(path) res = _validate_config(cp, path) if validate_config(res): if os.environ.get('FLIT_ALLOW_INVALID'): log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.") else: raise ConfigError("Invalid config values (see log)") return res class EntryPointsConflict(ConfigError): def __str__(self): return ('Please specify console_scripts entry points, or [scripts] in ' 'flit config, not both.') def prep_toml_config(d, path): """Validate config loaded from pyproject.toml and prepare common metadata Returns a dictionary with keys: module, metadata, scripts, entrypoints, raw_config. """ if ('tool' not in d) or ('flit' not in d['tool']) \ or (not isinstance(d['tool']['flit'], dict)): raise ConfigError("TOML file missing [tool.flit] table.") d = d['tool']['flit'] unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'} unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')] if unknown_sections: raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections)) if 'metadata' not in d: raise ConfigError('[tool.flit.metadata] section is required') md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path) if 'scripts' in d: scripts_dict = dict(d['scripts']) else: scripts_dict = {} if 'entrypoints' in d: entrypoints = flatten_entrypoints(d['entrypoints']) else: entrypoints = {} _add_scripts_to_entrypoints(entrypoints, scripts_dict) return { 'module': module, 'metadata': md_dict, 'reqs_by_extra': reqs_by_extra, 'scripts': scripts_dict, 'entrypoints': entrypoints, 'raw_config': d, } def flatten_entrypoints(ep): """Flatten nested entrypoints dicts. Entry points group names can include dots. But dots in TOML make nested dictionaries: [entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}} The proper way to avoid this is: [entrypoints."a.b"] # {'entrypoints': {'a.b': {}}} But since there isn't a need for arbitrarily nested mappings in entrypoints, flit allows you to use the former. This flattens the nested dictionaries from loading pyproject.toml. """ def _flatten(d, prefix): d1 = {} for k, v in d.items(): if isinstance(v, dict): yield from _flatten(v, prefix+'.'+k) else: d1[k] = v if d1: yield prefix, d1 res = {} for k, v in ep.items(): res.update(_flatten(v, k)) return res def _add_scripts_to_entrypoints(entrypoints, scripts_dict): if scripts_dict: if 'console_scripts' in entrypoints: raise EntryPointsConflict else: entrypoints['console_scripts'] = scripts_dict def _read_pkg_ini(path): """Reads old-style flit.ini """ cp = configparser.ConfigParser() with path.open(encoding='utf-8') as f: cp.read_file(f) return cp readme_ext_to_content_type = { '.rst': 'text/x-rst', '.md': 'text/markdown', '.txt': 'text/plain', } def _prep_metadata(md_sect, path): """Process & verify the metadata from a config file - Pull out the module name we're packaging. - Read description-file and check that it's valid rst - Convert dashes in key names to underscores (e.g. home-page in config -> home_page in metadata) """ if not set(md_sect).issuperset(metadata_required_fields): missing = metadata_required_fields - set(md_sect) raise ConfigError("Required fields missing: " + '\n'.join(missing)) module = md_sect.get('module') if not module.isidentifier(): raise ConfigError("Module name %r is not a valid identifier" % module) md_dict = {} # Description file if 'description-file' in md_sect:
raise ConfigError( "Description file {} does not exist".format(description_file) ) ext = description_file.suffix try: mimetype = readme_ext_to_content_type[ext] except KeyError: log.warning("Unknown extension %r for description file.", ext) log.warning(" Recognised extensions: %s", " ".join(readme_ext_to_content_type)) mimetype = None if mimetype == 'text/x-rst': # rst check stream = io.StringIO() res = render(raw_desc, stream) if not res: log.warning("The file description seems not to be valid rst for PyPI;" " it will be interpreted as plain text") log.warning(stream.getvalue()) md_dict['description'] = raw_desc md_dict['description_content_type'] = mimetype if 'urls' in md_sect: project_urls = md_dict['project_urls'] = [] for label, url in sorted(md_sect.pop('urls').items()): project_urls.append("{}, {}".format(label, url)) for key, value in md_sect.items(): if key in {'description-file', 'module'}: continue if key not in metadata_allowed_fields: closest = difflib.get_close_matches(key, metadata_allowed_fields, n=1, cutoff=0.7) msg = "Unrecognised metadata key: {!r}".format(key) if closest: msg += " (did you mean {!r}?)".format(closest[0]) raise ConfigError(msg) k2 = key.replace('-', '_') md_dict[k2] = value if key in metadata_list_fields: if not isinstance(value, list): raise ConfigError('Expected a list for {} field, found {!r}' .format(key, value)) if not all(isinstance(a, str) for a in value): raise ConfigError('Expected a list of strings for {} field' .format(key)) elif key == 'requires-extra': if not isinstance(value, dict): raise ConfigError('Expected a dict for requires-extra field, found {!r}' .format(value)) if not all(isinstance(e, list) for e in value.values()): raise ConfigError('Expected a dict of lists for requires-extra field') for e, reqs in value.items(): if not all(isinstance(a, str) for a in reqs): raise ConfigError('Expected a string list for requires-extra. (extra {})' .format(e)) else: if not isinstance(value, str): raise ConfigError('Expected a string for {} field, found {!r}' .format(key, value)) # What we call requires in the ini file is technically requires_dist in # the metadata. if 'requires' in md_dict: md_dict['requires_dist'] = md_dict.pop('requires') # And what we call dist-name is name in the metadata if 'dist_name' in md_dict: md_dict['name'] = md_dict.pop('dist_name') # Move dev-requires into requires-extra reqs_noextra = md_dict.pop('requires_dist', []) reqs_by_extra = md_dict.pop('requires_extra', {}) dev_requires = md_dict.pop('dev_requires', None) if dev_requires is not None: if 'dev' in reqs_by_extra: raise ConfigError( 'dev-requires occurs together with its replacement requires-extra.dev.') else: log.warning( '“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.') reqs_by_extra['dev'] = dev_requires # Add requires-extra requirements into requires_dist md_dict['requires_dist'] = \ reqs_noextra + list(_expand_requires_extra(reqs_by_extra)) md_dict['provides_extra'] = sorted(reqs_by_extra.keys()) # For internal use, record the main requirements as a '.none' extra. reqs_by_extra['.none'] = reqs_noextra return md_dict, module, reqs_by_extra def _expand_requires_extra(re): for extra, reqs in sorted(re.items()): for req in reqs: if ';' in req: name, envmark = req.split(';', 1) yield '{}; extra == "{}" and ({})'.format(name, extra, envmark) else: yield
description_file = path.parent / md_sect.get('description-file') try: with description_file.open(encoding='utf-8') as f: raw_desc = f.read() except FileNotFoundError:
random_line_split
inifile.py
pyproject.toml` or `flit.ini` file with data about the package. """ if path.suffix == '.toml': with path.open() as f: d = toml.load(f) res = prep_toml_config(d, path) else: # Treat all other extensions as the older flit.ini format cp = _read_pkg_ini(path) res = _validate_config(cp, path) if validate_config(res): if os.environ.get('FLIT_ALLOW_INVALID'): log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.") else: raise ConfigError("Invalid config values (see log)") return res class EntryPointsConflict(ConfigError): def __str__(self): return ('Please specify console_scripts entry points, or [scripts] in ' 'flit config, not both.') def prep_toml_config(d, path): """Validate config loaded from pyproject.toml and prepare common metadata Returns a dictionary with keys: module, metadata, scripts, entrypoints, raw_config. """ if ('tool' not in d) or ('flit' not in d['tool']) \ or (not isinstance(d['tool']['flit'], dict)): raise ConfigError("TOML file missing [tool.flit] table.") d = d['tool']['flit'] unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'} unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')] if unknown_sections: raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections)) if 'metadata' not in d: raise ConfigError('[tool.flit.metadata] section is required') md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path) if 'scripts' in d: scripts_dict = dict(d['scripts']) else: scripts_dict = {} if 'entrypoints' in d: entrypoints = flatten_entrypoints(d['entrypoints']) else: entrypoints = {} _add_scripts_to_entrypoints(entrypoints, scripts_dict) return { 'module': module, 'metadata': md_dict, 'reqs_by_extra': reqs_by_extra, 'scripts': scripts_dict, 'entrypoints': entrypoints, 'raw_config': d, } def flatten_entrypoints(ep): """Flatten nested entrypoints dicts. Entry points group names can include dots. But dots in TOML make nested dictionaries: [entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}} The proper way to avoid this is: [entrypoints."a.b"] # {'entrypoints': {'a.b': {}}} But since there isn't a need for arbitrarily nested mappings in entrypoints, flit allows you to use the former. This flattens the nested dictionaries from loading pyproject.toml. """ def _flatten(d, prefix): d1 = {} for k, v in d.items(): if isinstance(v, dict): yield from _flatten(v, prefix+'.'+k) else: d1[k] = v if d1: yield prefix, d1 res = {} for k, v in ep.items(): res.update(_flatten(v, k)) return res def _add_scripts_to_entrypoints(entrypoints, scripts_dict): if scripts_dict: if 'console_scripts' in entrypoints:
else: entrypoints['console_scripts'] = scripts_dict def _read_pkg_ini(path): """Reads old-style flit.ini """ cp = configparser.ConfigParser() with path.open(encoding='utf-8') as f: cp.read_file(f) return cp readme_ext_to_content_type = { '.rst': 'text/x-rst', '.md': 'text/markdown', '.txt': 'text/plain', } def _prep_metadata(md_sect, path): """Process & verify the metadata from a config file - Pull out the module name we're packaging. - Read description-file and check that it's valid rst - Convert dashes in key names to underscores (e.g. home-page in config -> home_page in metadata) """ if not set(md_sect).issuperset(metadata_required_fields): missing = metadata_required_fields - set(md_sect) raise ConfigError("Required fields missing: " + '\n'.join(missing)) module = md_sect.get('module') if not module.isidentifier(): raise ConfigError("Module name %r is not a valid identifier" % module) md_dict = {} # Description file if 'description-file' in md_sect: description_file = path.parent / md_sect.get('description-file') try: with description_file.open(encoding='utf-8') as f: raw_desc = f.read() except FileNotFoundError: raise ConfigError( "Description file {} does not exist".format(description_file) ) ext = description_file.suffix try: mimetype = readme_ext_to_content_type[ext] except KeyError: log.warning("Unknown extension %r for description file.", ext) log.warning(" Recognised extensions: %s", " ".join(readme_ext_to_content_type)) mimetype = None if mimetype == 'text/x-rst': # rst check stream = io.StringIO() res = render(raw_desc, stream) if not res: log.warning("The file description seems not to be valid rst for PyPI;" " it will be interpreted as plain text") log.warning(stream.getvalue()) md_dict['description'] = raw_desc md_dict['description_content_type'] = mimetype if 'urls' in md_sect: project_urls = md_dict['project_urls'] = [] for label, url in sorted(md_sect.pop('urls').items()): project_urls.append("{}, {}".format(label, url)) for key, value in md_sect.items(): if key in {'description-file', 'module'}: continue if key not in metadata_allowed_fields: closest = difflib.get_close_matches(key, metadata_allowed_fields, n=1, cutoff=0.7) msg = "Unrecognised metadata key: {!r}".format(key) if closest: msg += " (did you mean {!r}?)".format(closest[0]) raise ConfigError(msg) k2 = key.replace('-', '_') md_dict[k2] = value if key in metadata_list_fields: if not isinstance(value, list): raise ConfigError('Expected a list for {} field, found {!r}' .format(key, value)) if not all(isinstance(a, str) for a in value): raise ConfigError('Expected a list of strings for {} field' .format(key)) elif key == 'requires-extra': if not isinstance(value, dict): raise ConfigError('Expected a dict for requires-extra field, found {!r}' .format(value)) if not all(isinstance(e, list) for e in value.values()): raise ConfigError('Expected a dict of lists for requires-extra field') for e, reqs in value.items(): if not all(isinstance(a, str) for a in reqs): raise ConfigError('Expected a string list for requires-extra. (extra {})' .format(e)) else: if not isinstance(value, str): raise ConfigError('Expected a string for {} field, found {!r}' .format(key, value)) # What we call requires in the ini file is technically requires_dist in # the metadata. if 'requires' in md_dict: md_dict['requires_dist'] = md_dict.pop('requires') # And what we call dist-name is name in the metadata if 'dist_name' in md_dict: md_dict['name'] = md_dict.pop('dist_name') # Move dev-requires into requires-extra reqs_noextra = md_dict.pop('requires_dist', []) reqs_by_extra = md_dict.pop('requires_extra', {}) dev_requires = md_dict.pop('dev_requires', None) if dev_requires is not None: if 'dev' in reqs_by_extra: raise ConfigError( 'dev-requires occurs together with its replacement requires-extra.dev.') else: log.warning( '“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.') reqs_by_extra['dev'] = dev_requires # Add requires-extra requirements into requires_dist md_dict['requires_dist'] = \ reqs_noextra + list(_expand_requires_extra(reqs_by_extra)) md_dict['provides_extra'] = sorted(reqs_by_extra.keys()) # For internal use, record the main requirements as a '.none' extra. reqs_by_extra['.none'] = reqs_noextra return md_dict, module, reqs_by_extra def _expand_requires_extra(re): for extra, reqs in sorted(re.items()): for req in reqs: if ';' in req: name, envmark = req.split(';', 1) yield '{}; extra == "{}" and ({})'.format(name, extra, envmark) else: yield
raise EntryPointsConflict
conditional_block
inifile.py
pyproject.toml` or `flit.ini` file with data about the package. """ if path.suffix == '.toml': with path.open() as f: d = toml.load(f) res = prep_toml_config(d, path) else: # Treat all other extensions as the older flit.ini format cp = _read_pkg_ini(path) res = _validate_config(cp, path) if validate_config(res): if os.environ.get('FLIT_ALLOW_INVALID'): log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.") else: raise ConfigError("Invalid config values (see log)") return res class EntryPointsConflict(ConfigError):
def prep_toml_config(d, path): """Validate config loaded from pyproject.toml and prepare common metadata Returns a dictionary with keys: module, metadata, scripts, entrypoints, raw_config. """ if ('tool' not in d) or ('flit' not in d['tool']) \ or (not isinstance(d['tool']['flit'], dict)): raise ConfigError("TOML file missing [tool.flit] table.") d = d['tool']['flit'] unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'} unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')] if unknown_sections: raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections)) if 'metadata' not in d: raise ConfigError('[tool.flit.metadata] section is required') md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path) if 'scripts' in d: scripts_dict = dict(d['scripts']) else: scripts_dict = {} if 'entrypoints' in d: entrypoints = flatten_entrypoints(d['entrypoints']) else: entrypoints = {} _add_scripts_to_entrypoints(entrypoints, scripts_dict) return { 'module': module, 'metadata': md_dict, 'reqs_by_extra': reqs_by_extra, 'scripts': scripts_dict, 'entrypoints': entrypoints, 'raw_config': d, } def flatten_entrypoints(ep): """Flatten nested entrypoints dicts. Entry points group names can include dots. But dots in TOML make nested dictionaries: [entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}} The proper way to avoid this is: [entrypoints."a.b"] # {'entrypoints': {'a.b': {}}} But since there isn't a need for arbitrarily nested mappings in entrypoints, flit allows you to use the former. This flattens the nested dictionaries from loading pyproject.toml. """ def _flatten(d, prefix): d1 = {} for k, v in d.items(): if isinstance(v, dict): yield from _flatten(v, prefix+'.'+k) else: d1[k] = v if d1: yield prefix, d1 res = {} for k, v in ep.items(): res.update(_flatten(v, k)) return res def _add_scripts_to_entrypoints(entrypoints, scripts_dict): if scripts_dict: if 'console_scripts' in entrypoints: raise EntryPointsConflict else: entrypoints['console_scripts'] = scripts_dict def _read_pkg_ini(path): """Reads old-style flit.ini """ cp = configparser.ConfigParser() with path.open(encoding='utf-8') as f: cp.read_file(f) return cp readme_ext_to_content_type = { '.rst': 'text/x-rst', '.md': 'text/markdown', '.txt': 'text/plain', } def _prep_metadata(md_sect, path): """Process & verify the metadata from a config file - Pull out the module name we're packaging. - Read description-file and check that it's valid rst - Convert dashes in key names to underscores (e.g. home-page in config -> home_page in metadata) """ if not set(md_sect).issuperset(metadata_required_fields): missing = metadata_required_fields - set(md_sect) raise ConfigError("Required fields missing: " + '\n'.join(missing)) module = md_sect.get('module') if not module.isidentifier(): raise ConfigError("Module name %r is not a valid identifier" % module) md_dict = {} # Description file if 'description-file' in md_sect: description_file = path.parent / md_sect.get('description-file') try: with description_file.open(encoding='utf-8') as f: raw_desc = f.read() except FileNotFoundError: raise ConfigError( "Description file {} does not exist".format(description_file) ) ext = description_file.suffix try: mimetype = readme_ext_to_content_type[ext] except KeyError: log.warning("Unknown extension %r for description file.", ext) log.warning(" Recognised extensions: %s", " ".join(readme_ext_to_content_type)) mimetype = None if mimetype == 'text/x-rst': # rst check stream = io.StringIO() res = render(raw_desc, stream) if not res: log.warning("The file description seems not to be valid rst for PyPI;" " it will be interpreted as plain text") log.warning(stream.getvalue()) md_dict['description'] = raw_desc md_dict['description_content_type'] = mimetype if 'urls' in md_sect: project_urls = md_dict['project_urls'] = [] for label, url in sorted(md_sect.pop('urls').items()): project_urls.append("{}, {}".format(label, url)) for key, value in md_sect.items(): if key in {'description-file', 'module'}: continue if key not in metadata_allowed_fields: closest = difflib.get_close_matches(key, metadata_allowed_fields, n=1, cutoff=0.7) msg = "Unrecognised metadata key: {!r}".format(key) if closest: msg += " (did you mean {!r}?)".format(closest[0]) raise ConfigError(msg) k2 = key.replace('-', '_') md_dict[k2] = value if key in metadata_list_fields: if not isinstance(value, list): raise ConfigError('Expected a list for {} field, found {!r}' .format(key, value)) if not all(isinstance(a, str) for a in value): raise ConfigError('Expected a list of strings for {} field' .format(key)) elif key == 'requires-extra': if not isinstance(value, dict): raise ConfigError('Expected a dict for requires-extra field, found {!r}' .format(value)) if not all(isinstance(e, list) for e in value.values()): raise ConfigError('Expected a dict of lists for requires-extra field') for e, reqs in value.items(): if not all(isinstance(a, str) for a in reqs): raise ConfigError('Expected a string list for requires-extra. (extra {})' .format(e)) else: if not isinstance(value, str): raise ConfigError('Expected a string for {} field, found {!r}' .format(key, value)) # What we call requires in the ini file is technically requires_dist in # the metadata. if 'requires' in md_dict: md_dict['requires_dist'] = md_dict.pop('requires') # And what we call dist-name is name in the metadata if 'dist_name' in md_dict: md_dict['name'] = md_dict.pop('dist_name') # Move dev-requires into requires-extra reqs_noextra = md_dict.pop('requires_dist', []) reqs_by_extra = md_dict.pop('requires_extra', {}) dev_requires = md_dict.pop('dev_requires', None) if dev_requires is not None: if 'dev' in reqs_by_extra: raise ConfigError( 'dev-requires occurs together with its replacement requires-extra.dev.') else: log.warning( '“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.') reqs_by_extra['dev'] = dev_requires # Add requires-extra requirements into requires_dist md_dict['requires_dist'] = \ reqs_noextra + list(_expand_requires_extra(reqs_by_extra)) md_dict['provides_extra'] = sorted(reqs_by_extra.keys()) # For internal use, record the main requirements as a '.none' extra. reqs_by_extra['.none'] = reqs_noextra return md_dict, module, reqs_by_extra def _expand_requires_extra(re): for extra, reqs in sorted(re.items()): for req in reqs: if ';' in req: name, envmark = req.split(';', 1) yield '{}; extra == "{}" and ({})'.format(name, extra, envmark) else:
def __str__(self): return ('Please specify console_scripts entry points, or [scripts] in ' 'flit config, not both.')
identifier_body
inifile.py
pyproject.toml` or `flit.ini` file with data about the package. """ if path.suffix == '.toml': with path.open() as f: d = toml.load(f) res = prep_toml_config(d, path) else: # Treat all other extensions as the older flit.ini format cp = _read_pkg_ini(path) res = _validate_config(cp, path) if validate_config(res): if os.environ.get('FLIT_ALLOW_INVALID'): log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.") else: raise ConfigError("Invalid config values (see log)") return res class EntryPointsConflict(ConfigError): def __str__(self): return ('Please specify console_scripts entry points, or [scripts] in ' 'flit config, not both.') def prep_toml_config(d, path): """Validate config loaded from pyproject.toml and prepare common metadata Returns a dictionary with keys: module, metadata, scripts, entrypoints, raw_config. """ if ('tool' not in d) or ('flit' not in d['tool']) \ or (not isinstance(d['tool']['flit'], dict)): raise ConfigError("TOML file missing [tool.flit] table.") d = d['tool']['flit'] unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'} unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')] if unknown_sections: raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections)) if 'metadata' not in d: raise ConfigError('[tool.flit.metadata] section is required') md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path) if 'scripts' in d: scripts_dict = dict(d['scripts']) else: scripts_dict = {} if 'entrypoints' in d: entrypoints = flatten_entrypoints(d['entrypoints']) else: entrypoints = {} _add_scripts_to_entrypoints(entrypoints, scripts_dict) return { 'module': module, 'metadata': md_dict, 'reqs_by_extra': reqs_by_extra, 'scripts': scripts_dict, 'entrypoints': entrypoints, 'raw_config': d, } def flatten_entrypoints(ep): """Flatten nested entrypoints dicts. Entry points group names can include dots. But dots in TOML make nested dictionaries: [entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}} The proper way to avoid this is: [entrypoints."a.b"] # {'entrypoints': {'a.b': {}}} But since there isn't a need for arbitrarily nested mappings in entrypoints, flit allows you to use the former. This flattens the nested dictionaries from loading pyproject.toml. """ def _flatten(d, prefix): d1 = {} for k, v in d.items(): if isinstance(v, dict): yield from _flatten(v, prefix+'.'+k) else: d1[k] = v if d1: yield prefix, d1 res = {} for k, v in ep.items(): res.update(_flatten(v, k)) return res def
(entrypoints, scripts_dict): if scripts_dict: if 'console_scripts' in entrypoints: raise EntryPointsConflict else: entrypoints['console_scripts'] = scripts_dict def _read_pkg_ini(path): """Reads old-style flit.ini """ cp = configparser.ConfigParser() with path.open(encoding='utf-8') as f: cp.read_file(f) return cp readme_ext_to_content_type = { '.rst': 'text/x-rst', '.md': 'text/markdown', '.txt': 'text/plain', } def _prep_metadata(md_sect, path): """Process & verify the metadata from a config file - Pull out the module name we're packaging. - Read description-file and check that it's valid rst - Convert dashes in key names to underscores (e.g. home-page in config -> home_page in metadata) """ if not set(md_sect).issuperset(metadata_required_fields): missing = metadata_required_fields - set(md_sect) raise ConfigError("Required fields missing: " + '\n'.join(missing)) module = md_sect.get('module') if not module.isidentifier(): raise ConfigError("Module name %r is not a valid identifier" % module) md_dict = {} # Description file if 'description-file' in md_sect: description_file = path.parent / md_sect.get('description-file') try: with description_file.open(encoding='utf-8') as f: raw_desc = f.read() except FileNotFoundError: raise ConfigError( "Description file {} does not exist".format(description_file) ) ext = description_file.suffix try: mimetype = readme_ext_to_content_type[ext] except KeyError: log.warning("Unknown extension %r for description file.", ext) log.warning(" Recognised extensions: %s", " ".join(readme_ext_to_content_type)) mimetype = None if mimetype == 'text/x-rst': # rst check stream = io.StringIO() res = render(raw_desc, stream) if not res: log.warning("The file description seems not to be valid rst for PyPI;" " it will be interpreted as plain text") log.warning(stream.getvalue()) md_dict['description'] = raw_desc md_dict['description_content_type'] = mimetype if 'urls' in md_sect: project_urls = md_dict['project_urls'] = [] for label, url in sorted(md_sect.pop('urls').items()): project_urls.append("{}, {}".format(label, url)) for key, value in md_sect.items(): if key in {'description-file', 'module'}: continue if key not in metadata_allowed_fields: closest = difflib.get_close_matches(key, metadata_allowed_fields, n=1, cutoff=0.7) msg = "Unrecognised metadata key: {!r}".format(key) if closest: msg += " (did you mean {!r}?)".format(closest[0]) raise ConfigError(msg) k2 = key.replace('-', '_') md_dict[k2] = value if key in metadata_list_fields: if not isinstance(value, list): raise ConfigError('Expected a list for {} field, found {!r}' .format(key, value)) if not all(isinstance(a, str) for a in value): raise ConfigError('Expected a list of strings for {} field' .format(key)) elif key == 'requires-extra': if not isinstance(value, dict): raise ConfigError('Expected a dict for requires-extra field, found {!r}' .format(value)) if not all(isinstance(e, list) for e in value.values()): raise ConfigError('Expected a dict of lists for requires-extra field') for e, reqs in value.items(): if not all(isinstance(a, str) for a in reqs): raise ConfigError('Expected a string list for requires-extra. (extra {})' .format(e)) else: if not isinstance(value, str): raise ConfigError('Expected a string for {} field, found {!r}' .format(key, value)) # What we call requires in the ini file is technically requires_dist in # the metadata. if 'requires' in md_dict: md_dict['requires_dist'] = md_dict.pop('requires') # And what we call dist-name is name in the metadata if 'dist_name' in md_dict: md_dict['name'] = md_dict.pop('dist_name') # Move dev-requires into requires-extra reqs_noextra = md_dict.pop('requires_dist', []) reqs_by_extra = md_dict.pop('requires_extra', {}) dev_requires = md_dict.pop('dev_requires', None) if dev_requires is not None: if 'dev' in reqs_by_extra: raise ConfigError( 'dev-requires occurs together with its replacement requires-extra.dev.') else: log.warning( '“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.') reqs_by_extra['dev'] = dev_requires # Add requires-extra requirements into requires_dist md_dict['requires_dist'] = \ reqs_noextra + list(_expand_requires_extra(reqs_by_extra)) md_dict['provides_extra'] = sorted(reqs_by_extra.keys()) # For internal use, record the main requirements as a '.none' extra. reqs_by_extra['.none'] = reqs_noextra return md_dict, module, reqs_by_extra def _expand_requires_extra(re): for extra, reqs in sorted(re.items()): for req in reqs: if ';' in req: name, envmark = req.split(';', 1) yield '{}; extra == "{}" and ({})'.format(name, extra, envmark) else: yield
_add_scripts_to_entrypoints
identifier_name
dynmap.go
for k, v := range(this.Map) { submp, ok := ToDynMap(this.Map[k]) if ok { v = submp.ToMap() } mp[k] = v } return mp } // recursively clones this DynMap. all sub maps will be clones as well func (this *DynMap) Clone() *DynMap { mp := New() for k, v := range(this.Map) { submp, ok := ToDynMap(this.Map[k]) if ok { v = submp.Clone() } mp.Put(k, v) } return mp } // Returns self. Here so that we satisfy the DynMaper interface func (this *DynMap) ToDynMap() *DynMap { return this } //encodes this map into a url encoded string. //maps are encoded in the rails style (key[key2][key2]=value) // TODO: we should sort the keynames so ordering is consistent and then this // can be used a cache key func (this *DynMap) MarshalURL() (string, error) { vals := &url.Values{} for key, value := range this.Map { err := this.urlEncode(vals, key, value) if err != nil { return "", err } } str := vals.Encode() log.Printf(str) return vals.Encode(), nil } // Unmarshals a url encoded string. // will also parse rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURL(urlstring string) error { //TODO: split on ? values, err := url.ParseQuery(urlstring) if err != nil { return err } return this.UnmarshalURLValues(values) } // Unmarshals url.Values into the map. // Will correctly handle rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURLValues(values url.Values) error { for k := range values { var v = values[k] key := strings.Replace(k, "[", ".", -1) key = strings.Replace(key, "]", "", -1) if len(v) == 1 { this.PutWithDot(key, v[0]) } else { this.PutWithDot(key, v) } } return nil } //adds the requested value to the Values func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error { if DynMapConvertable(value) { mp, ok := ToDynMap(value) if !ok { return fmt.Errorf("Unable to convert %s", mp) } for k, v := range mp.Map { //encode in rails style key[key2]=value this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v) } return nil } r := reflect.ValueOf(value) //now test if it is an array if r.Kind() == reflect.Array || r.Kind() == reflect.Slice { for i :=0; i < r.Len(); i++ { this.urlEncode(vals, key, r.Index(i).Interface()) } } vals.Add(key, ToString(value)) return nil } func (this *DynMap) MarshalJSON() ([]byte, error) { bytes, err := json.Marshal(this.Map) return bytes, err } func (this *DynMap) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &this.Map) } // Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable func (this *DynMap) GetInt64(key string) (int64, bool) { tmp, ok := this.Get(key) if !ok { return -1, ok } val, err := ToInt64(tmp) if err == nil { return val, true } return -1, false } func (this *DynMap) MustInt64(key string, def int64) int64 { v, ok := this.GetInt64(key) if ok { return v } return def } func (this *DynMap) MustInt(key string, def int) int { v, ok := this.GetInt(key) if ok { return v } return def } func (this *DynMap) GetInt(key string) (int, bool) { v, ok := this.GetInt64(key) if !ok { return -1, ok } return int(v), true } // // Gets a string representation of the value at key // func (this *DynMap) GetString(key string) (string, bool) { tmp, ok := this.Get(key) if !ok { return ToString(tmp), ok } return ToString(tmp), true } // gets a string. if string is not available in the map, then the default //is returned func (this *DynMap) MustString(key string, def string) string { tmp, ok := this.GetString(key) if !ok { return def } return tmp } func (this *DynMap) GetTime(key string) (time.Time, bool) { tmp, ok := this.Get(key) if !ok { return time.Now(), false } t, err := ToTime(tmp) if err != nil { return time.Now(), false } return t, true } func (this *DynMap) MustTime(key string, def time.Time) time.Time { tmp, ok := this.GetTime(key) if !ok { return def } return tmp } func (this *DynMap) GetBool(key string) (bool, bool) { tmp, ok := this.Get(key) if !ok { return false, ok } b, err := ToBool(tmp) if err != nil { return false, false } return b, true } func (this *DynMap) MustBool(key string, def bool) bool { tmp, ok := this.GetBool(key) if !ok { return def } return tmp } //Gets a dynmap from the requested. // This will update the value in the map if the // value was not already a dynmap. func (this *DynMap) GetDynMap(key string) (*DynMap, bool) { tmp, ok := this.Get(key) if !ok { return nil, ok } mp, ok := ToDynMap(tmp) return mp, ok } func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap { tmp, ok := this.GetDynMap(key) if !ok { return def } return tmp } // gets a slice of dynmaps func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []*DynMap: return v, true case []interface{}: retlist := make([]*DynMap, 0) for _, tmp := range v { in, ok := ToDynMap(tmp) if !ok { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //Returns a slice of ints func (this *DynMap) GetIntSlice(key string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []int: return v, true case []interface{}: retlist := make([]int, 0) for _, tmp := range v { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of ints. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: retlist := make([]int, 0) for _, tmp := range strings.Split(v, delim) { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } ret, ok := this.GetIntSlice(key) return ret, ok } //Returns a slice of strings func (this *DynMap) GetStringSlice(key string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []string: return v, true case []interface{}: retlist := make([]string, 0) for _, tmp := range v { in := ToString(tmp) retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of strings. if the value is
random_line_split
dynmap.go
rails style (key[key2][key2]=value) // TODO: we should sort the keynames so ordering is consistent and then this // can be used a cache key func (this *DynMap) MarshalURL() (string, error) { vals := &url.Values{} for key, value := range this.Map { err := this.urlEncode(vals, key, value) if err != nil { return "", err } } str := vals.Encode() log.Printf(str) return vals.Encode(), nil } // Unmarshals a url encoded string. // will also parse rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURL(urlstring string) error { //TODO: split on ? values, err := url.ParseQuery(urlstring) if err != nil { return err } return this.UnmarshalURLValues(values) } // Unmarshals url.Values into the map. // Will correctly handle rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURLValues(values url.Values) error { for k := range values { var v = values[k] key := strings.Replace(k, "[", ".", -1) key = strings.Replace(key, "]", "", -1) if len(v) == 1 { this.PutWithDot(key, v[0]) } else { this.PutWithDot(key, v) } } return nil } //adds the requested value to the Values func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error { if DynMapConvertable(value) { mp, ok := ToDynMap(value) if !ok
for k, v := range mp.Map { //encode in rails style key[key2]=value this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v) } return nil } r := reflect.ValueOf(value) //now test if it is an array if r.Kind() == reflect.Array || r.Kind() == reflect.Slice { for i :=0; i < r.Len(); i++ { this.urlEncode(vals, key, r.Index(i).Interface()) } } vals.Add(key, ToString(value)) return nil } func (this *DynMap) MarshalJSON() ([]byte, error) { bytes, err := json.Marshal(this.Map) return bytes, err } func (this *DynMap) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &this.Map) } // Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable func (this *DynMap) GetInt64(key string) (int64, bool) { tmp, ok := this.Get(key) if !ok { return -1, ok } val, err := ToInt64(tmp) if err == nil { return val, true } return -1, false } func (this *DynMap) MustInt64(key string, def int64) int64 { v, ok := this.GetInt64(key) if ok { return v } return def } func (this *DynMap) MustInt(key string, def int) int { v, ok := this.GetInt(key) if ok { return v } return def } func (this *DynMap) GetInt(key string) (int, bool) { v, ok := this.GetInt64(key) if !ok { return -1, ok } return int(v), true } // // Gets a string representation of the value at key // func (this *DynMap) GetString(key string) (string, bool) { tmp, ok := this.Get(key) if !ok { return ToString(tmp), ok } return ToString(tmp), true } // gets a string. if string is not available in the map, then the default //is returned func (this *DynMap) MustString(key string, def string) string { tmp, ok := this.GetString(key) if !ok { return def } return tmp } func (this *DynMap) GetTime(key string) (time.Time, bool) { tmp, ok := this.Get(key) if !ok { return time.Now(), false } t, err := ToTime(tmp) if err != nil { return time.Now(), false } return t, true } func (this *DynMap) MustTime(key string, def time.Time) time.Time { tmp, ok := this.GetTime(key) if !ok { return def } return tmp } func (this *DynMap) GetBool(key string) (bool, bool) { tmp, ok := this.Get(key) if !ok { return false, ok } b, err := ToBool(tmp) if err != nil { return false, false } return b, true } func (this *DynMap) MustBool(key string, def bool) bool { tmp, ok := this.GetBool(key) if !ok { return def } return tmp } //Gets a dynmap from the requested. // This will update the value in the map if the // value was not already a dynmap. func (this *DynMap) GetDynMap(key string) (*DynMap, bool) { tmp, ok := this.Get(key) if !ok { return nil, ok } mp, ok := ToDynMap(tmp) return mp, ok } func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap { tmp, ok := this.GetDynMap(key) if !ok { return def } return tmp } // gets a slice of dynmaps func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []*DynMap: return v, true case []interface{}: retlist := make([]*DynMap, 0) for _, tmp := range v { in, ok := ToDynMap(tmp) if !ok { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //Returns a slice of ints func (this *DynMap) GetIntSlice(key string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []int: return v, true case []interface{}: retlist := make([]int, 0) for _, tmp := range v { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of ints. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: retlist := make([]int, 0) for _, tmp := range strings.Split(v, delim) { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } ret, ok := this.GetIntSlice(key) return ret, ok } //Returns a slice of strings func (this *DynMap) GetStringSlice(key string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []string: return v, true case []interface{}: retlist := make([]string, 0) for _, tmp := range v { in := ToString(tmp) retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of strings. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: return strings.Split(v, delim), true } ret, ok := this.GetStringSlice(key) return ret, ok } // Adds the item to a slice func (this *DynMap) AddToSlice(key string, mp interface{}) error { this.PutIfAbsent(key, make([]interface{}, 0)) lst, _ := this.Get(key) switch v := lst.(type) { case []interface{}: v = append(v, mp) this.Put(key, v) } return nil } // puts all the values from the passed in
{ return fmt.Errorf("Unable to convert %s", mp) }
conditional_block
dynmap.go
rails style (key[key2][key2]=value) // TODO: we should sort the keynames so ordering is consistent and then this // can be used a cache key func (this *DynMap) MarshalURL() (string, error) { vals := &url.Values{} for key, value := range this.Map { err := this.urlEncode(vals, key, value) if err != nil { return "", err } } str := vals.Encode() log.Printf(str) return vals.Encode(), nil } // Unmarshals a url encoded string. // will also parse rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURL(urlstring string) error { //TODO: split on ? values, err := url.ParseQuery(urlstring) if err != nil { return err } return this.UnmarshalURLValues(values) } // Unmarshals url.Values into the map. // Will correctly handle rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURLValues(values url.Values) error { for k := range values { var v = values[k] key := strings.Replace(k, "[", ".", -1) key = strings.Replace(key, "]", "", -1) if len(v) == 1 { this.PutWithDot(key, v[0]) } else { this.PutWithDot(key, v) } } return nil } //adds the requested value to the Values func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error { if DynMapConvertable(value) { mp, ok := ToDynMap(value) if !ok { return fmt.Errorf("Unable to convert %s", mp) } for k, v := range mp.Map { //encode in rails style key[key2]=value this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v) } return nil } r := reflect.ValueOf(value) //now test if it is an array if r.Kind() == reflect.Array || r.Kind() == reflect.Slice { for i :=0; i < r.Len(); i++ { this.urlEncode(vals, key, r.Index(i).Interface()) } } vals.Add(key, ToString(value)) return nil } func (this *DynMap) MarshalJSON() ([]byte, error) { bytes, err := json.Marshal(this.Map) return bytes, err } func (this *DynMap) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &this.Map) } // Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable func (this *DynMap) GetInt64(key string) (int64, bool) { tmp, ok := this.Get(key) if !ok { return -1, ok } val, err := ToInt64(tmp) if err == nil { return val, true } return -1, false } func (this *DynMap) MustInt64(key string, def int64) int64 { v, ok := this.GetInt64(key) if ok { return v } return def } func (this *DynMap) MustInt(key string, def int) int { v, ok := this.GetInt(key) if ok { return v } return def } func (this *DynMap) GetInt(key string) (int, bool) { v, ok := this.GetInt64(key) if !ok { return -1, ok } return int(v), true } // // Gets a string representation of the value at key // func (this *DynMap) GetString(key string) (string, bool)
// gets a string. if string is not available in the map, then the default //is returned func (this *DynMap) MustString(key string, def string) string { tmp, ok := this.GetString(key) if !ok { return def } return tmp } func (this *DynMap) GetTime(key string) (time.Time, bool) { tmp, ok := this.Get(key) if !ok { return time.Now(), false } t, err := ToTime(tmp) if err != nil { return time.Now(), false } return t, true } func (this *DynMap) MustTime(key string, def time.Time) time.Time { tmp, ok := this.GetTime(key) if !ok { return def } return tmp } func (this *DynMap) GetBool(key string) (bool, bool) { tmp, ok := this.Get(key) if !ok { return false, ok } b, err := ToBool(tmp) if err != nil { return false, false } return b, true } func (this *DynMap) MustBool(key string, def bool) bool { tmp, ok := this.GetBool(key) if !ok { return def } return tmp } //Gets a dynmap from the requested. // This will update the value in the map if the // value was not already a dynmap. func (this *DynMap) GetDynMap(key string) (*DynMap, bool) { tmp, ok := this.Get(key) if !ok { return nil, ok } mp, ok := ToDynMap(tmp) return mp, ok } func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap { tmp, ok := this.GetDynMap(key) if !ok { return def } return tmp } // gets a slice of dynmaps func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []*DynMap: return v, true case []interface{}: retlist := make([]*DynMap, 0) for _, tmp := range v { in, ok := ToDynMap(tmp) if !ok { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //Returns a slice of ints func (this *DynMap) GetIntSlice(key string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []int: return v, true case []interface{}: retlist := make([]int, 0) for _, tmp := range v { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of ints. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: retlist := make([]int, 0) for _, tmp := range strings.Split(v, delim) { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } ret, ok := this.GetIntSlice(key) return ret, ok } //Returns a slice of strings func (this *DynMap) GetStringSlice(key string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []string: return v, true case []interface{}: retlist := make([]string, 0) for _, tmp := range v { in := ToString(tmp) retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of strings. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: return strings.Split(v, delim), true } ret, ok := this.GetStringSlice(key) return ret, ok } // Adds the item to a slice func (this *DynMap) AddToSlice(key string, mp interface{}) error { this.PutIfAbsent(key, make([]interface{}, 0)) lst, _ := this.Get(key) switch v := lst.(type) { case []interface{}: v = append(v, mp) this.Put(key, v) } return nil } // puts all the values from the passed in
{ tmp, ok := this.Get(key) if !ok { return ToString(tmp), ok } return ToString(tmp), true }
identifier_body
dynmap.go
rails style (key[key2][key2]=value) // TODO: we should sort the keynames so ordering is consistent and then this // can be used a cache key func (this *DynMap) MarshalURL() (string, error) { vals := &url.Values{} for key, value := range this.Map { err := this.urlEncode(vals, key, value) if err != nil { return "", err } } str := vals.Encode() log.Printf(str) return vals.Encode(), nil } // Unmarshals a url encoded string. // will also parse rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURL(urlstring string) error { //TODO: split on ? values, err := url.ParseQuery(urlstring) if err != nil { return err } return this.UnmarshalURLValues(values) } // Unmarshals url.Values into the map. // Will correctly handle rails style maps in the form key[key1][key2]=val func (this *DynMap) UnmarshalURLValues(values url.Values) error { for k := range values { var v = values[k] key := strings.Replace(k, "[", ".", -1) key = strings.Replace(key, "]", "", -1) if len(v) == 1 { this.PutWithDot(key, v[0]) } else { this.PutWithDot(key, v) } } return nil } //adds the requested value to the Values func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error { if DynMapConvertable(value) { mp, ok := ToDynMap(value) if !ok { return fmt.Errorf("Unable to convert %s", mp) } for k, v := range mp.Map { //encode in rails style key[key2]=value this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v) } return nil } r := reflect.ValueOf(value) //now test if it is an array if r.Kind() == reflect.Array || r.Kind() == reflect.Slice { for i :=0; i < r.Len(); i++ { this.urlEncode(vals, key, r.Index(i).Interface()) } } vals.Add(key, ToString(value)) return nil } func (this *DynMap) MarshalJSON() ([]byte, error) { bytes, err := json.Marshal(this.Map) return bytes, err } func (this *DynMap) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &this.Map) } // Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable func (this *DynMap) GetInt64(key string) (int64, bool) { tmp, ok := this.Get(key) if !ok { return -1, ok } val, err := ToInt64(tmp) if err == nil { return val, true } return -1, false } func (this *DynMap) MustInt64(key string, def int64) int64 { v, ok := this.GetInt64(key) if ok { return v } return def } func (this *DynMap) MustInt(key string, def int) int { v, ok := this.GetInt(key) if ok { return v } return def } func (this *DynMap) GetInt(key string) (int, bool) { v, ok := this.GetInt64(key) if !ok { return -1, ok } return int(v), true } // // Gets a string representation of the value at key // func (this *DynMap) GetString(key string) (string, bool) { tmp, ok := this.Get(key) if !ok { return ToString(tmp), ok } return ToString(tmp), true } // gets a string. if string is not available in the map, then the default //is returned func (this *DynMap) MustString(key string, def string) string { tmp, ok := this.GetString(key) if !ok { return def } return tmp } func (this *DynMap) GetTime(key string) (time.Time, bool) { tmp, ok := this.Get(key) if !ok { return time.Now(), false } t, err := ToTime(tmp) if err != nil { return time.Now(), false } return t, true } func (this *DynMap)
(key string, def time.Time) time.Time { tmp, ok := this.GetTime(key) if !ok { return def } return tmp } func (this *DynMap) GetBool(key string) (bool, bool) { tmp, ok := this.Get(key) if !ok { return false, ok } b, err := ToBool(tmp) if err != nil { return false, false } return b, true } func (this *DynMap) MustBool(key string, def bool) bool { tmp, ok := this.GetBool(key) if !ok { return def } return tmp } //Gets a dynmap from the requested. // This will update the value in the map if the // value was not already a dynmap. func (this *DynMap) GetDynMap(key string) (*DynMap, bool) { tmp, ok := this.Get(key) if !ok { return nil, ok } mp, ok := ToDynMap(tmp) return mp, ok } func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap { tmp, ok := this.GetDynMap(key) if !ok { return def } return tmp } // gets a slice of dynmaps func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []*DynMap: return v, true case []interface{}: retlist := make([]*DynMap, 0) for _, tmp := range v { in, ok := ToDynMap(tmp) if !ok { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //Returns a slice of ints func (this *DynMap) GetIntSlice(key string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []int: return v, true case []interface{}: retlist := make([]int, 0) for _, tmp := range v { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of ints. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: retlist := make([]int, 0) for _, tmp := range strings.Split(v, delim) { in, err := ToInt(tmp) if err != nil { return nil, false } retlist = append(retlist, in) } return retlist, true } ret, ok := this.GetIntSlice(key) return ret, ok } //Returns a slice of strings func (this *DynMap) GetStringSlice(key string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case []string: return v, true case []interface{}: retlist := make([]string, 0) for _, tmp := range v { in := ToString(tmp) retlist = append(retlist, in) } return retlist, true } return nil, false } //gets a slice of strings. if the value is a string it will //split by the requested delimiter func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) { lst, ok := this.Get(key) if !ok { return nil, false } switch v := lst.(type) { case string: return strings.Split(v, delim), true } ret, ok := this.GetStringSlice(key) return ret, ok } // Adds the item to a slice func (this *DynMap) AddToSlice(key string, mp interface{}) error { this.PutIfAbsent(key, make([]interface{}, 0)) lst, _ := this.Get(key) switch v := lst.(type) { case []interface{}: v = append(v, mp) this.Put(key, v) } return nil } // puts all the values from the passed in map
MustTime
identifier_name
lex.go
func (t token) Error() string { return fmt.Sprintf("lex error at %d: %s", int(t.pos), t.value) } type tokenType int const ( tokenNone tokenType = iota tokenError tokenEOF tokenText // anything that isn't one of the following tokenZeroWidthNoBreakSpace // U+FEFF used for unwrappable tokenNL // \n tokenCR // \r // unicode tokens we care about, mostly because of breaking rules. The whitespace // and dash tokens listed may be different than what Go uses in the relevant Go // unicode tables. // whitespace tokens from https://www.cs.tut.fi/~jkorpela/chars/spaces.html // // exceptions to the table: // no-break space U+00A0 is not considered whitespace for line break purposes // narrow no-break space U+202F is not considered whitespace for line break purposes // zero width no-break space U+FEFF is not considered whitespace for line break purposes tokenTab // \t tokenSpace // U+0020 tokenOghamSpaceMark // U+1680 tokenMongolianVowelSeparator // U+180E tokenEnQuad // U+2000 tokenEmQuad // U+2001 tokenEnSpace // U+2002 tokenEmSpace // U+2003 tokenThreePerEmSpace // U+2004 tokenFourPerEmSpace // U+2005 tokenSixPerEmSpace // U+2006 tokenFigureSpace // U+2007 tokenPunctuationSpace // U+2008 tokenThinSpace // U+2009 tokenHairSpace // U+200A tokenZeroWidthSpace // U+200B tokenMediumMathematicalSpace // U+205F tokenIdeographicSpace // U+3000 // dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html // hyphens and dashes in lines breaking rules sections // // exceptions to the table: // tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc. // hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done // minus sign U+2212 does not cause a line break // wavy dash U+301C does not cause a line break // wavy dash U+3939 does not cause a line break // two em dash U+2E3A is not in table but is here. // three em dash U+2E3B is not in table but is here. // small em dash U+FE58 is not in table but is here. // small hyphen-minus U+FE63 is not in table but is here. // full width hyphen-minus U+FF0D is not in table but is here. // mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char // presentation form for vertical em dash U+FE31 is not in table but is here. // presentation form for vertical en dash U+FE32 is not in table but is here. tokenHyphenMinus // U+002D tokenSoftHyphen // U+00AD tokenArmenianHyphen // U+058A tokenHyphen // U+2010 tokenFigureDash // U+2012 tokenEnDash // U+2013 tokenEmDash // U+2014 can be before or after but only after is supported here tokenHorizontalBar // U+2015 tokenSwungDash // U+2053 tokenSuperscriptMinus // U+207B tokenSubScriptMinus // U+208B tokenTwoEmDash // U+2E3A tokenThreeEmDash // U+2E3B tokenPresentationFormForVerticalEmDash // U+FE31 tokenPresentationFormForVerticalEnDash // U+FE32 tokenSmallEmDash // U+FE58 tokenSmallHyphenMinus // U+FE63 tokenFullWidthHyphenMinus // U+FF0D ) var key = map[string]tokenType{ "\r": tokenCR, "\n": tokenNL, "\t": tokenTab, "\uFEFF": tokenZeroWidthNoBreakSpace, "\u0020": tokenSpace, "\u1680": tokenOghamSpaceMark, "\u180E": tokenMongolianVowelSeparator, "\u2000": tokenEnQuad, "\u2001": tokenEmQuad, "\u2002": tokenEnSpace, "\u2003": tokenEmSpace, "\u2004": tokenThreePerEmSpace, "\u2005": tokenFourPerEmSpace, "\u2006": tokenSixPerEmSpace, "\u2007": tokenFigureSpace, "\u2008": tokenPunctuationSpace, "\u2009": tokenThinSpace, "\u200A": tokenHairSpace, "\u200B": tokenZeroWidthSpace, "\u205F": tokenMediumMathematicalSpace, "\u3000": tokenIdeographicSpace, "\u002D": tokenHyphenMinus, "\u00AD": tokenSoftHyphen, "\u058A": tokenArmenianHyphen, "\u2010": tokenHyphen, "\u2012": tokenFigureDash, "\u2013": tokenEnDash, "\u2014": tokenEmDash, "\u2015": tokenHorizontalBar, "\u2053": tokenSwungDash, "\u207B": tokenSuperscriptMinus, "\u208B": tokenSubScriptMinus, "\u2E3A": tokenTwoEmDash, "\u2E3B": tokenThreeEmDash, "\uFE31": tokenPresentationFormForVerticalEmDash, "\uFE32": tokenPresentationFormForVerticalEnDash, "\uFE58": tokenSmallEmDash, "\uFE63": tokenSmallHyphenMinus, "\uFF0D": tokenFullWidthHyphenMinus, } var vals = map[tokenType]string{ tokenNone: "none", tokenError: "error", tokenEOF: "eof", tokenText: "text", tokenZeroWidthNoBreakSpace: "zero width no break space", tokenNL: "nl", tokenCR: "cr", tokenTab: "tab", tokenSpace: "space", tokenOghamSpaceMark: "ogham space mark", tokenMongolianVowelSeparator: "mongolian vowel separator", tokenEnQuad: "en quad", tokenEmQuad: "em quad", tokenEnSpace: "en space", tokenEmSpace: "em space", tokenThreePerEmSpace: "three per em space", tokenFourPerEmSpace: "four per em space", tokenSixPerEmSpace: "siz per em space", tokenFigureSpace: "token figure space", tokenPunctuationSpace: "punctuation space", tokenThinSpace: "thin space", tokenHairSpace: "hair space", tokenZeroWidthSpace: "width space", tokenMediumMathematicalSpace: "medium mathematical space", tokenIdeographicSpace: "ideographic space", tokenHyphenMinus: "hyphen minus", tokenSoftHyphen: "soft hyphen", tokenArmenianHyphen: "armenian hyphen", tokenHyphen: "hyphen", tokenFigureDash: "figure dash", tokenEnDash: "en dash", tokenEmDash: "em dash", tokenHorizontalBar: "horizontal bar", tokenSwungDash: "swung dash", tokenSuperscriptMinus: "superscript minus", tokenSubScriptMinus: "subscript minus", tokenTwoEmDash: "two em dash", tokenThreeEmDash: "three em dash", tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash", tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash", tokenSmallEmDash: "small em dash", tokenSmallHyphenMinus: "small hyphen minus", tokenFullWidthHyphenMinus: "full width hyphen minus", } const eof = -1
{ switch { case t.typ == tokenEOF: return "EOF" case t.typ == tokenError: return t.value } return t.value }
identifier_body
lex.go
012": tokenFigureDash, "\u2013": tokenEnDash, "\u2014": tokenEmDash, "\u2015": tokenHorizontalBar, "\u2053": tokenSwungDash, "\u207B": tokenSuperscriptMinus, "\u208B": tokenSubScriptMinus, "\u2E3A": tokenTwoEmDash, "\u2E3B": tokenThreeEmDash, "\uFE31": tokenPresentationFormForVerticalEmDash, "\uFE32": tokenPresentationFormForVerticalEnDash, "\uFE58": tokenSmallEmDash, "\uFE63": tokenSmallHyphenMinus, "\uFF0D": tokenFullWidthHyphenMinus, } var vals = map[tokenType]string{ tokenNone: "none", tokenError: "error", tokenEOF: "eof", tokenText: "text", tokenZeroWidthNoBreakSpace: "zero width no break space", tokenNL: "nl", tokenCR: "cr", tokenTab: "tab", tokenSpace: "space", tokenOghamSpaceMark: "ogham space mark", tokenMongolianVowelSeparator: "mongolian vowel separator", tokenEnQuad: "en quad", tokenEmQuad: "em quad", tokenEnSpace: "en space", tokenEmSpace: "em space", tokenThreePerEmSpace: "three per em space", tokenFourPerEmSpace: "four per em space", tokenSixPerEmSpace: "siz per em space", tokenFigureSpace: "token figure space", tokenPunctuationSpace: "punctuation space", tokenThinSpace: "thin space", tokenHairSpace: "hair space", tokenZeroWidthSpace: "width space", tokenMediumMathematicalSpace: "medium mathematical space", tokenIdeographicSpace: "ideographic space", tokenHyphenMinus: "hyphen minus", tokenSoftHyphen: "soft hyphen", tokenArmenianHyphen: "armenian hyphen", tokenHyphen: "hyphen", tokenFigureDash: "figure dash", tokenEnDash: "en dash", tokenEmDash: "em dash", tokenHorizontalBar: "horizontal bar", tokenSwungDash: "swung dash", tokenSuperscriptMinus: "superscript minus", tokenSubScriptMinus: "subscript minus", tokenTwoEmDash: "two em dash", tokenThreeEmDash: "three em dash", tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash", tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash", tokenSmallEmDash: "small em dash", tokenSmallHyphenMinus: "small hyphen minus", tokenFullWidthHyphenMinus: "full width hyphen minus", } const eof = -1 const ( classText tokenClass = iota classCR classNL classTab classSpace classHyphen ) type tokenClass int type stateFn func(*lexer) stateFn type lexer struct { input []byte // the string being scanned state stateFn // the next lexing function to enter pos Pos // current position of this item start Pos // start position of this item width Pos // width of last rune read from input lastPos Pos // position of most recent item returned by nextItem runeCnt int // the number of runes in the current token sequence tokens chan token // channel of scanned tokens } func lex(input []byte) *lexer { l := &lexer{ input: input, state: lexText, tokens: make(chan token, 2), } go l.run() return l } // next returns the next rune in the input. func (l *lexer) next() rune { l.runeCnt++ if int(l.pos) >= len(l.input) { l.width = 0 return eof } r, w := utf8.DecodeRune(l.input[l.pos:]) l.width = Pos(w) l.pos += l.width return r } // peek returns but does not consume the next rune in the input func (l *lexer) peek() rune { r := l.next() l.backup() return r } // backup steps back one rune. Can be called only once per call of next. func (l *lexer) backup() { l.pos -= l.width l.runeCnt-- } // emit passes an item back to the client. func (l *lexer) emit(t tokenType) { l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])} l.start = l.pos l.runeCnt = 0 } // ignore skips over the pending input before this point. func (l *lexer) ignore() { l.start = l.pos l.runeCnt = 0 } // accept consumes the next rune if it's from the valid set. func (l *lexer) accept(valid string) bool { if strings.ContainsRune(valid, l.next()) { return true } l.backup() return false } // acceptRun cunsumes a run of runes from the valid set. func (l *lexer) acceptRun(valid string) { if strings.ContainsRune(valid, l.next()) { } l.backup() } // error returns an error token and terminates the scan by passing back a nil // pointer that will be the next state, terminating l.run. func (l *lexer) errorf(format string, args ...interface{}) stateFn { l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)} return nil } // nextToken returns the next token from the input. func (l *lexer) nextToken() token { token := <-l.tokens l.lastPos = token.pos return token } // drain the channel so the lex go routine will exit: called by caller. func (l *lexer) drain() { for range l.tokens { } } // run lexes the input by executing state functions until the state is nil. func (l *lexer) run() { for state := lexText; state != nil; { state = state(l) } close(l.tokens) // No more tokens will be delivered } // lexText scans non whitespace/hyphen chars. func lexText(l *lexer) stateFn { for { is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be if is { if l.pos > l.start { l.emit(tokenText) } switch class { case classCR: return lexCR case classNL: return lexNL case classSpace: return lexSpace case classTab: return lexTab case classHyphen: return lexHyphen } } if l.next() == eof { l.runeCnt-- // eof doesn't count. break } } // Correctly reached EOF. if l.pos > l.start { l.emit(tokenText) } l.emit(tokenEOF) // Useful to make EOF a token return nil // Stop the run loop. } // a breakpoint is any character afterwhich a wrap may occur. If it is a // breakpoint char, the type of char is returned. func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) { r, _ := utf8.DecodeRune(l.input[l.pos:]) t, ok := key[string(r)] if !ok || t <= tokenZeroWidthNoBreakSpace { return false, classText } switch t { case tokenCR: return true, classCR case tokenNL: return true, classNL case tokenTab: return true, classTab } if isSpace(t) { return true, classSpace } if isHyphen(t) { return true, classHyphen } // it really shouldn't get to here, but if it does, treat it like classText return false, classText } // lexCR handles a carriage return, `\r`; these are skipped. The prior token // should already have been emitted and the next token should be a CR, which // are skipped. The next token is checked to ensure that it really is a CR. func lexCR(l *lexer) stateFn { r := l.next() t := key[string(r)] // don't need to check ok, as the zero value won't match if t == tokenCR { l.ignore() } return lexText } // lexNL handles a new line, `\n`; the prior token should already have been // emitted and the next token should be a NL. The next token is checked to // ensure that it really is a NL func lexNL(l *lexer) stateFn { r := l.next() t := key[string(r)] // don't need to check ok, as the zero value won't match if t == tokenNL { l.emit(tokenNL) } return lexText } // lexTab handles a tab, '\t'; the prior token should already have been emitted // and the next token should be a tab. The next token is checked to ensure that // it really is a tab. func
lexTab
identifier_name
lex.go
U+FE58 tokenSmallHyphenMinus // U+FE63 tokenFullWidthHyphenMinus // U+FF0D ) var key = map[string]tokenType{ "\r": tokenCR, "\n": tokenNL, "\t": tokenTab, "\uFEFF": tokenZeroWidthNoBreakSpace, "\u0020": tokenSpace, "\u1680": tokenOghamSpaceMark, "\u180E": tokenMongolianVowelSeparator, "\u2000": tokenEnQuad, "\u2001": tokenEmQuad, "\u2002": tokenEnSpace, "\u2003": tokenEmSpace, "\u2004": tokenThreePerEmSpace, "\u2005": tokenFourPerEmSpace, "\u2006": tokenSixPerEmSpace, "\u2007": tokenFigureSpace, "\u2008": tokenPunctuationSpace, "\u2009": tokenThinSpace, "\u200A": tokenHairSpace, "\u200B": tokenZeroWidthSpace, "\u205F": tokenMediumMathematicalSpace, "\u3000": tokenIdeographicSpace, "\u002D": tokenHyphenMinus, "\u00AD": tokenSoftHyphen, "\u058A": tokenArmenianHyphen, "\u2010": tokenHyphen, "\u2012": tokenFigureDash, "\u2013": tokenEnDash, "\u2014": tokenEmDash, "\u2015": tokenHorizontalBar, "\u2053": tokenSwungDash, "\u207B": tokenSuperscriptMinus, "\u208B": tokenSubScriptMinus, "\u2E3A": tokenTwoEmDash, "\u2E3B": tokenThreeEmDash, "\uFE31": tokenPresentationFormForVerticalEmDash, "\uFE32": tokenPresentationFormForVerticalEnDash, "\uFE58": tokenSmallEmDash, "\uFE63": tokenSmallHyphenMinus, "\uFF0D": tokenFullWidthHyphenMinus, } var vals = map[tokenType]string{ tokenNone: "none", tokenError: "error", tokenEOF: "eof", tokenText: "text", tokenZeroWidthNoBreakSpace: "zero width no break space", tokenNL: "nl", tokenCR: "cr", tokenTab: "tab", tokenSpace: "space", tokenOghamSpaceMark: "ogham space mark", tokenMongolianVowelSeparator: "mongolian vowel separator", tokenEnQuad: "en quad", tokenEmQuad: "em quad", tokenEnSpace: "en space", tokenEmSpace: "em space", tokenThreePerEmSpace: "three per em space", tokenFourPerEmSpace: "four per em space", tokenSixPerEmSpace: "siz per em space", tokenFigureSpace: "token figure space", tokenPunctuationSpace: "punctuation space", tokenThinSpace: "thin space", tokenHairSpace: "hair space", tokenZeroWidthSpace: "width space", tokenMediumMathematicalSpace: "medium mathematical space", tokenIdeographicSpace: "ideographic space", tokenHyphenMinus: "hyphen minus", tokenSoftHyphen: "soft hyphen", tokenArmenianHyphen: "armenian hyphen", tokenHyphen: "hyphen", tokenFigureDash: "figure dash", tokenEnDash: "en dash", tokenEmDash: "em dash", tokenHorizontalBar: "horizontal bar", tokenSwungDash: "swung dash", tokenSuperscriptMinus: "superscript minus", tokenSubScriptMinus: "subscript minus", tokenTwoEmDash: "two em dash", tokenThreeEmDash: "three em dash", tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash", tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash", tokenSmallEmDash: "small em dash", tokenSmallHyphenMinus: "small hyphen minus", tokenFullWidthHyphenMinus: "full width hyphen minus", } const eof = -1 const ( classText tokenClass = iota classCR classNL classTab classSpace classHyphen ) type tokenClass int type stateFn func(*lexer) stateFn type lexer struct { input []byte // the string being scanned state stateFn // the next lexing function to enter pos Pos // current position of this item start Pos // start position of this item width Pos // width of last rune read from input lastPos Pos // position of most recent item returned by nextItem runeCnt int // the number of runes in the current token sequence tokens chan token // channel of scanned tokens } func lex(input []byte) *lexer { l := &lexer{ input: input, state: lexText, tokens: make(chan token, 2), } go l.run() return l } // next returns the next rune in the input. func (l *lexer) next() rune { l.runeCnt++ if int(l.pos) >= len(l.input) { l.width = 0 return eof } r, w := utf8.DecodeRune(l.input[l.pos:]) l.width = Pos(w) l.pos += l.width return r } // peek returns but does not consume the next rune in the input func (l *lexer) peek() rune { r := l.next() l.backup() return r } // backup steps back one rune. Can be called only once per call of next. func (l *lexer) backup() { l.pos -= l.width l.runeCnt-- } // emit passes an item back to the client. func (l *lexer) emit(t tokenType) { l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])} l.start = l.pos l.runeCnt = 0 } // ignore skips over the pending input before this point. func (l *lexer) ignore() { l.start = l.pos l.runeCnt = 0 } // accept consumes the next rune if it's from the valid set. func (l *lexer) accept(valid string) bool { if strings.ContainsRune(valid, l.next()) { return true } l.backup() return false } // acceptRun cunsumes a run of runes from the valid set. func (l *lexer) acceptRun(valid string) { if strings.ContainsRune(valid, l.next()) { } l.backup() } // error returns an error token and terminates the scan by passing back a nil // pointer that will be the next state, terminating l.run. func (l *lexer) errorf(format string, args ...interface{}) stateFn { l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)} return nil } // nextToken returns the next token from the input. func (l *lexer) nextToken() token { token := <-l.tokens l.lastPos = token.pos return token } // drain the channel so the lex go routine will exit: called by caller. func (l *lexer) drain() { for range l.tokens { } } // run lexes the input by executing state functions until the state is nil. func (l *lexer) run() { for state := lexText; state != nil; { state = state(l) } close(l.tokens) // No more tokens will be delivered } // lexText scans non whitespace/hyphen chars. func lexText(l *lexer) stateFn { for { is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be if is { if l.pos > l.start { l.emit(tokenText) } switch class { case classCR: return lexCR case classNL: return lexNL case classSpace: return lexSpace case classTab: return lexTab case classHyphen: return lexHyphen } } if l.next() == eof { l.runeCnt-- // eof doesn't count. break } } // Correctly reached EOF. if l.pos > l.start { l.emit(tokenText) } l.emit(tokenEOF) // Useful to make EOF a token return nil // Stop the run loop. } // a breakpoint is any character afterwhich a wrap may occur. If it is a // breakpoint char, the type of char is returned. func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) { r, _ := utf8.DecodeRune(l.input[l.pos:]) t, ok := key[string(r)] if !ok || t <= tokenZeroWidthNoBreakSpace
{ return false, classText }
conditional_block
lex.go
tokenSixPerEmSpace // U+2006 tokenFigureSpace // U+2007 tokenPunctuationSpace // U+2008 tokenThinSpace // U+2009 tokenHairSpace // U+200A tokenZeroWidthSpace // U+200B tokenMediumMathematicalSpace // U+205F tokenIdeographicSpace // U+3000 // dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html // hyphens and dashes in lines breaking rules sections // // exceptions to the table: // tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc. // hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done // minus sign U+2212 does not cause a line break // wavy dash U+301C does not cause a line break // wavy dash U+3939 does not cause a line break // two em dash U+2E3A is not in table but is here. // three em dash U+2E3B is not in table but is here. // small em dash U+FE58 is not in table but is here. // small hyphen-minus U+FE63 is not in table but is here. // full width hyphen-minus U+FF0D is not in table but is here. // mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char // presentation form for vertical em dash U+FE31 is not in table but is here. // presentation form for vertical en dash U+FE32 is not in table but is here. tokenHyphenMinus // U+002D tokenSoftHyphen // U+00AD tokenArmenianHyphen // U+058A tokenHyphen // U+2010 tokenFigureDash // U+2012 tokenEnDash // U+2013 tokenEmDash // U+2014 can be before or after but only after is supported here tokenHorizontalBar // U+2015 tokenSwungDash // U+2053 tokenSuperscriptMinus // U+207B tokenSubScriptMinus // U+208B tokenTwoEmDash // U+2E3A tokenThreeEmDash // U+2E3B tokenPresentationFormForVerticalEmDash // U+FE31 tokenPresentationFormForVerticalEnDash // U+FE32 tokenSmallEmDash // U+FE58 tokenSmallHyphenMinus // U+FE63 tokenFullWidthHyphenMinus // U+FF0D ) var key = map[string]tokenType{ "\r": tokenCR, "\n": tokenNL, "\t": tokenTab, "\uFEFF": tokenZeroWidthNoBreakSpace, "\u0020": tokenSpace, "\u1680": tokenOghamSpaceMark, "\u180E": tokenMongolianVowelSeparator, "\u2000": tokenEnQuad, "\u2001": tokenEmQuad, "\u2002": tokenEnSpace, "\u2003": tokenEmSpace, "\u2004": tokenThreePerEmSpace, "\u2005": tokenFourPerEmSpace, "\u2006": tokenSixPerEmSpace, "\u2007": tokenFigureSpace, "\u2008": tokenPunctuationSpace, "\u2009": tokenThinSpace, "\u200A": tokenHairSpace, "\u200B": tokenZeroWidthSpace, "\u205F": tokenMediumMathematicalSpace, "\u3000": tokenIdeographicSpace, "\u002D": tokenHyphenMinus, "\u00AD": tokenSoftHyphen, "\u058A": tokenArmenianHyphen, "\u2010": tokenHyphen, "\u2012": tokenFigureDash, "\u2013": tokenEnDash, "\u2014": tokenEmDash, "\u2015": tokenHorizontalBar, "\u2053": tokenSwungDash, "\u207B": tokenSuperscriptMinus, "\u208B": tokenSubScriptMinus, "\u2E3A": tokenTwoEmDash, "\u2E3B": tokenThreeEmDash, "\uFE31": tokenPresentationFormForVerticalEmDash, "\uFE32": tokenPresentationFormForVerticalEnDash, "\uFE58": tokenSmallEmDash, "\uFE63": tokenSmallHyphenMinus, "\uFF0D": tokenFullWidthHyphenMinus, } var vals = map[tokenType]string{ tokenNone: "none", tokenError: "error", tokenEOF: "eof", tokenText: "text", tokenZeroWidthNoBreakSpace: "zero width no break space", tokenNL: "nl", tokenCR: "cr", tokenTab: "tab", tokenSpace: "space", tokenOghamSpaceMark: "ogham space mark", tokenMongolianVowelSeparator: "mongolian vowel separator", tokenEnQuad: "en quad", tokenEmQuad: "em quad", tokenEnSpace: "en space", tokenEmSpace: "em space", tokenThreePerEmSpace: "three per em space", tokenFourPerEmSpace: "four per em space", tokenSixPerEmSpace: "siz per em space", tokenFigureSpace: "token figure space", tokenPunctuationSpace: "punctuation space", tokenThinSpace: "thin space", tokenHairSpace: "hair space", tokenZeroWidthSpace: "width space", tokenMediumMathematicalSpace: "medium mathematical space", tokenIdeographicSpace: "ideographic space", tokenHyphenMinus: "hyphen minus", tokenSoftHyphen: "soft hyphen", tokenArmenianHyphen: "armenian hyphen", tokenHyphen: "hyphen", tokenFigureDash: "figure dash", tokenEnDash: "en dash",
tokenEmDash: "em dash", tokenHorizontalBar: "horizontal bar", tokenSwungDash: "swung dash", tokenSuperscriptMinus: "superscript minus", tokenSubScriptMinus: "subscript minus", tokenTwoEmDash: "two em dash", tokenThreeEmDash: "three em dash", tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash", tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash", tokenSmallEmDash: "small em dash", tokenSmallHyphenMinus: "small hyphen minus", tokenFullWidthHyphenMinus: "full width hyphen minus", } const eof = -1 const ( classText tokenClass = iota classCR classNL classTab classSpace classHyphen ) type tokenClass int type stateFn func(*lexer) stateFn type lexer struct { input []byte // the string being scanned state stateFn // the next lexing function to enter pos Pos // current position of this item start Pos // start position of this item width Pos // width of last rune read from input lastPos Pos // position of most recent item returned by nextItem runeCnt int // the number of runes in the current token sequence tokens chan token // channel of scanned tokens } func lex(input []byte) *lexer { l := &lexer{ input: input, state: lexText, tokens: make(chan token, 2), } go l.run() return l } // next returns the next rune in the input. func (l *lexer) next() rune { l.runeCnt++ if int(l.pos) >= len(l.input) { l.width = 0 return eof } r, w := utf8.DecodeRune(l.input[l.pos:]) l.width = Pos(w) l.pos += l.width return r } // peek returns but does not consume the next rune in the input func (l *lexer) peek() rune { r := l.next() l.backup() return r } // backup steps back one rune. Can be called only once per call of next. func (l *lexer) backup() { l.pos -= l.width l.runeCnt-- } // emit passes an item back to the client. func (l *lexer) emit(t tokenType) { l.tokens <- token{t, l.start, l.runeCnt, string
random_line_split
se-spam-helper.user.js
Of = {}, notifiedOfToday = {}; var ooflagSites = {}; var questionQueue = {}; var siteWebsocketIDs = {}; var sitesByWebsocketID = {}; var onQuestionQueueTimeout = flushQuestionQueue; var checkAnswer = checkPost, checkQuestion = checkPost; menu_init(); notification_init(); window.addEventListener("unload", onbeforeunload); scrapePage(); function atGMT(time, func){ var timeLeft = (time - Date.now()) % (24 * hours); setTimeout(func, timeLeft); } function onMessage(e){ var response = JSON.parse(e.data); var data = response.data && JSON.parse(response.data); if(response.action === "hb"){ ws.send("hb"); } else if(response.action === "155-questions-active"){ onQuestionActive(parseRealtimeSocket(data)); } else if(response.action.match(/\d+-questions-active/)){ scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]); } else { console.log("unknown response type: %s in %o", response.action, response); } } function scrapePage(){ $(".realtime-question:visible").each(function(){ var qLink = this.querySelector("a.realtime-question-url"); onQuestionActive({ body: undefined, link: qLink.href, site: hostNameToSiteName(qLink.hostname), tags: $(".post-tag", this).map(function(){return this.textContent;}), title: $("h2", this).text().trim(), question_id: qLink.href.match(/\/questions\/(\d+)\//)[1], }); }); hiderInstall(); } function scrapePerSiteQuestion(html, site){ var question = new DOMParser().parseFromString(html, "text/html") .getElementsByClassName("question-summary")[0]; var qLink = "http://" + siteNameToHostName(site) + question.querySelector("a.question-hyperlink").getAttribute("href"); onQuestionActive({ body: $(".excerpt", question).html().trim(), link: qLink, site: site, tags: $(".post-tag", question).map(function(){return this.textContent;}), title: $("h3 a", question).text().trim(), question_id: question.id.split("-").pop(), }); } function
(site){ if(siteWebsocketIDs[site] === undefined){ siteWebsocketIDs[site] = false; // prevent double fetching GM_xmlhttpRequest({ method: "GET", url: "http://" + siteNameToHostName(site), ontimeout: checkSiteHasSocket.bind(null, site), onerror: function(response) { console.log(response); checkSiteHasSocket(site); // retry }, onload: function(response){ var scripts = (new DOMParser()) .parseFromString(response.response, "text/html") .head.querySelectorAll("script:not([src])"); [].forEach.call(scripts, function(script){ var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML); if(match){ siteWebsocketIDs[site] = match[1]; sitesByWebsocketID[match[1]] = site; } }); if(siteWebsocketIDs[site]){ console.log("the ID for %s is %o", site, siteWebsocketIDs[site]); ws.send(siteWebsocketIDs[site] + "-questions-active"); } else { console.log("could not find the ID for %s", site); } } }); } } function parseRealtimeSocket(wsData){ return{ body: wsData.bodySummary, link: wsData.url, site: wsData.apiSiteParameter, tags: wsData.tags, title: htmlUnescape(wsData.titleEncodedFancy), question_id: wsData.id, }; } function onQuestionActive(qData){ checkQuestion(qData); hiderInstall(); checkSiteHasSocket(qData.site); questionQueuePush(qData); } function questionQueuePush(qData){ var site = qData.site; var id = qData.question_id; var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0}; if(!queue.questions[id]) queue.length++; queue.questions[id] = qData; if(queue.length >= 100){ flushQuestionQueue(queue); }else{ if(!queue.timeout){ queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT); } } } function flushQuestionQueue(queue){ var ids = Object.keys(queue.questions); queue.length = 0; queue.questions = {}; clearTimeout(queue.timeout); queue.timeout = null; console.log("requesting answers for " + ids.length + " questions on " + queue.site); seApiCall("questions", ids.join(";"), { filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq", site: queue.site}) .then(function(response){ response.items.forEach(function(question){ question.site = queue.site; question.title = htmlUnescape(question.title); checkQuestion(question); if(question.answers) question.answers.forEach(function(answer){ checkAnswer(question, answer); }); }); }); } function checkPost(question, answer){var title = question.title; var host = question.site; var site = hostNameToSiteName(host); var site_class = "realtime-" + siteToClass(site); var classname = site_class + "-" + question.question_id; var q_body = $("<div/>", {html: question.body}); var a_body; if(answer) a_body = $("<div/>", {html: answer.body}); var text = answer ? a_body.text() : title + "\n" + q_body.text(); var id = answer ? answer.answer_id : question.question_id; var link = answer ? answer.link : question.link; if(!notifiedOf[site]) notifiedOf[site] = {}; if(!notifiedOf[site][id]){ if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) || is.mostlyUppercase(text) || /\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) || !answer && ( site == "meta" || site == "drupal" || /(?:[^a-hj-np-z ] *){9,}/i.test(title) || is.mostlyUppercase(title) || /\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title) ) ){ css.textContent += "." + classname + " {background-color: #FCC}\n"; notify(site, title, answer ? "A - " + a_body.text() : question.body ? "Q - " + q_body.text() : undefined, link); } notifiedOf[site][id] = true; if(!notifiedOfToday[site]) notifiedOfToday[site] = {}; notifiedOfToday[site][id] = true; } } function hiderInstall(){ var children = document.getElementById("mainArea").children; for(var i = 0; i < children.length; i++){ if(children[i].getElementsByClassName("spam-helper-site-hider").length) break; var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/); if(!match) break; var siteClass = match[1]; var hider = imgPool.get(function(){ var hider = document.createElement("img"); hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png"; hider.title = "I'm out of spam flags for today here"; hider.className = "spam-helper-site-hider"; hider.style.cursor = "pointer"; return hider; }); hider.onclick = function(siteClass){ daily_css.textContent += "." + siteClass + " {display: none}\n"; ooflagSites[siteClass] = true; }.bind(null, siteClass); children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider); children[i].classList.add(siteClass); } } function notify(site, title, body, url){ if(notification_granted && !ooflagSites[site]){ var notification = new Notification(title, { icon: classToImageUrl(siteToClass(site)), body: body || '' }); notification.onclick = function(){ GM_openInTab(url); GM_setClipboard(url); }; } } function menu_init(){ menu = document.createElement("div");
checkSiteHasSocket
identifier_name
se-spam-helper.user.js
.head.querySelectorAll("script:not([src])"); [].forEach.call(scripts, function(script){ var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML); if(match){ siteWebsocketIDs[site] = match[1]; sitesByWebsocketID[match[1]] = site; } }); if(siteWebsocketIDs[site]){ console.log("the ID for %s is %o", site, siteWebsocketIDs[site]); ws.send(siteWebsocketIDs[site] + "-questions-active"); } else { console.log("could not find the ID for %s", site); } } }); } } function parseRealtimeSocket(wsData){ return{ body: wsData.bodySummary, link: wsData.url, site: wsData.apiSiteParameter, tags: wsData.tags, title: htmlUnescape(wsData.titleEncodedFancy), question_id: wsData.id, }; } function onQuestionActive(qData){ checkQuestion(qData); hiderInstall(); checkSiteHasSocket(qData.site); questionQueuePush(qData); } function questionQueuePush(qData){ var site = qData.site; var id = qData.question_id; var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0}; if(!queue.questions[id]) queue.length++; queue.questions[id] = qData; if(queue.length >= 100){ flushQuestionQueue(queue); }else{ if(!queue.timeout){ queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT); } } } function flushQuestionQueue(queue){ var ids = Object.keys(queue.questions); queue.length = 0; queue.questions = {}; clearTimeout(queue.timeout); queue.timeout = null; console.log("requesting answers for " + ids.length + " questions on " + queue.site); seApiCall("questions", ids.join(";"), { filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq", site: queue.site}) .then(function(response){ response.items.forEach(function(question){ question.site = queue.site; question.title = htmlUnescape(question.title); checkQuestion(question); if(question.answers) question.answers.forEach(function(answer){ checkAnswer(question, answer); }); }); }); } function checkPost(question, answer){var title = question.title; var host = question.site; var site = hostNameToSiteName(host); var site_class = "realtime-" + siteToClass(site); var classname = site_class + "-" + question.question_id; var q_body = $("<div/>", {html: question.body}); var a_body; if(answer) a_body = $("<div/>", {html: answer.body}); var text = answer ? a_body.text() : title + "\n" + q_body.text(); var id = answer ? answer.answer_id : question.question_id; var link = answer ? answer.link : question.link; if(!notifiedOf[site]) notifiedOf[site] = {}; if(!notifiedOf[site][id]){ if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) || is.mostlyUppercase(text) || /\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) || !answer && ( site == "meta" || site == "drupal" || /(?:[^a-hj-np-z ] *){9,}/i.test(title) || is.mostlyUppercase(title) || /\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title) ) ){ css.textContent += "." + classname + " {background-color: #FCC}\n"; notify(site, title, answer ? "A - " + a_body.text() : question.body ? "Q - " + q_body.text() : undefined, link); } notifiedOf[site][id] = true; if(!notifiedOfToday[site]) notifiedOfToday[site] = {}; notifiedOfToday[site][id] = true; } } function hiderInstall(){ var children = document.getElementById("mainArea").children; for(var i = 0; i < children.length; i++){ if(children[i].getElementsByClassName("spam-helper-site-hider").length) break; var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/); if(!match) break; var siteClass = match[1]; var hider = imgPool.get(function(){ var hider = document.createElement("img"); hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png"; hider.title = "I'm out of spam flags for today here"; hider.className = "spam-helper-site-hider"; hider.style.cursor = "pointer"; return hider; }); hider.onclick = function(siteClass){ daily_css.textContent += "." + siteClass + " {display: none}\n"; ooflagSites[siteClass] = true; }.bind(null, siteClass); children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider); children[i].classList.add(siteClass); } } function notify(site, title, body, url){ if(notification_granted && !ooflagSites[site]){ var notification = new Notification(title, { icon: classToImageUrl(siteToClass(site)), body: body || '' }); notification.onclick = function(){ GM_openInTab(url); GM_setClipboard(url); }; } } function menu_init(){ menu = document.createElement("div"); menu.id = "spam-helper-menu"; var a = document.createElement("a"); a.href = "#"; a.id = "spam-helper-menu-a"; a.textContent = "spam helper"; a.onclick = function(){ if(menu.parentElement){ document.body.removeChild(menu); }else{ document.body.appendChild(menu); menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px"; menu.style.left = a.offsetLeft + "px"; } }; var wrapper = document.getElementsByClassName('topbar-wrapper')[0]; var links = document.getElementsByClassName('topbar-links')[0]; wrapper.insertBefore(menu, links); css.textContent += "#spam-helper-menu {display: inline-block; padding-top:7px}" + "#spam-helper-menu > span {display: block; width: 150px; color: white}" + "#spam-helper-menu > span > input { vertical-align: -2px; }"; } function notification_init(){ notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false; var cb = document.createElement("input"); cb.type = "checkbox"; cb.checked = notification_granted; cb.id = "spamhelpernotificationcb"; cb.onchange = function(){ if(cb.checked){ Notification.requestPermission(function(permission){ notification_granted = (permission === "granted"); localStorage.setItem("spam-helper-notification_granted", notification_granted); }); }else{ notification_granted = false; localStorage.setItem("spam-helper-notification_granted", false); } }; var label = document.createElement("label"); label.textContent = "enable notifications"; label.htmlFor = "spamhelpernotificationcb"; var span = document.createElement("span"); span.appendChild(cb); span.appendChild(label); menu.appendChild(span); } // function ElementPool(){ var queue = []; return { constructor: ElementPool, get: function(func){ var r; for(var i = 0; i < queue.length; i++){ if(!document.contains(queue[i])){ r = queue.splice(i,1)[0]; break; } } r = r || func(); queue.push(r); return r; } }; } var apiQueue = new Mutex(); function seApiCall(/* path..., options */){ var path = [].slice.call(arguments); var options = path.pop(); var partialOk = options.partialOk; delete options.partialOk; var responseDeferred = $.Deferred(); var results = []; (function getPage(page){ apiQueue.enqueue(function(){ var apiQueueDeferred = $.Deferred(); options.pagesize = 100; options.page = page; console.log("fired request"); GM_xmlhttpRequest({ method: "GET",
url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options),
random_line_split
se-spam-helper.user.js
", site); } } }); } } function parseRealtimeSocket(wsData){ return{ body: wsData.bodySummary, link: wsData.url, site: wsData.apiSiteParameter, tags: wsData.tags, title: htmlUnescape(wsData.titleEncodedFancy), question_id: wsData.id, }; } function onQuestionActive(qData){ checkQuestion(qData); hiderInstall(); checkSiteHasSocket(qData.site); questionQueuePush(qData); } function questionQueuePush(qData){ var site = qData.site; var id = qData.question_id; var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0}; if(!queue.questions[id]) queue.length++; queue.questions[id] = qData; if(queue.length >= 100){ flushQuestionQueue(queue); }else{ if(!queue.timeout){ queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT); } } } function flushQuestionQueue(queue){ var ids = Object.keys(queue.questions); queue.length = 0; queue.questions = {}; clearTimeout(queue.timeout); queue.timeout = null; console.log("requesting answers for " + ids.length + " questions on " + queue.site); seApiCall("questions", ids.join(";"), { filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq", site: queue.site}) .then(function(response){ response.items.forEach(function(question){ question.site = queue.site; question.title = htmlUnescape(question.title); checkQuestion(question); if(question.answers) question.answers.forEach(function(answer){ checkAnswer(question, answer); }); }); }); } function checkPost(question, answer){var title = question.title; var host = question.site; var site = hostNameToSiteName(host); var site_class = "realtime-" + siteToClass(site); var classname = site_class + "-" + question.question_id; var q_body = $("<div/>", {html: question.body}); var a_body; if(answer) a_body = $("<div/>", {html: answer.body}); var text = answer ? a_body.text() : title + "\n" + q_body.text(); var id = answer ? answer.answer_id : question.question_id; var link = answer ? answer.link : question.link; if(!notifiedOf[site]) notifiedOf[site] = {}; if(!notifiedOf[site][id]){ if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) || is.mostlyUppercase(text) || /\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) || !answer && ( site == "meta" || site == "drupal" || /(?:[^a-hj-np-z ] *){9,}/i.test(title) || is.mostlyUppercase(title) || /\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title) ) ){ css.textContent += "." + classname + " {background-color: #FCC}\n"; notify(site, title, answer ? "A - " + a_body.text() : question.body ? "Q - " + q_body.text() : undefined, link); } notifiedOf[site][id] = true; if(!notifiedOfToday[site]) notifiedOfToday[site] = {}; notifiedOfToday[site][id] = true; } } function hiderInstall(){ var children = document.getElementById("mainArea").children; for(var i = 0; i < children.length; i++){ if(children[i].getElementsByClassName("spam-helper-site-hider").length) break; var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/); if(!match) break; var siteClass = match[1]; var hider = imgPool.get(function(){ var hider = document.createElement("img"); hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png"; hider.title = "I'm out of spam flags for today here"; hider.className = "spam-helper-site-hider"; hider.style.cursor = "pointer"; return hider; }); hider.onclick = function(siteClass){ daily_css.textContent += "." + siteClass + " {display: none}\n"; ooflagSites[siteClass] = true; }.bind(null, siteClass); children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider); children[i].classList.add(siteClass); } } function notify(site, title, body, url){ if(notification_granted && !ooflagSites[site]){ var notification = new Notification(title, { icon: classToImageUrl(siteToClass(site)), body: body || '' }); notification.onclick = function(){ GM_openInTab(url); GM_setClipboard(url); }; } } function menu_init(){ menu = document.createElement("div"); menu.id = "spam-helper-menu"; var a = document.createElement("a"); a.href = "#"; a.id = "spam-helper-menu-a"; a.textContent = "spam helper"; a.onclick = function(){ if(menu.parentElement){ document.body.removeChild(menu); }else{ document.body.appendChild(menu); menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px"; menu.style.left = a.offsetLeft + "px"; } }; var wrapper = document.getElementsByClassName('topbar-wrapper')[0]; var links = document.getElementsByClassName('topbar-links')[0]; wrapper.insertBefore(menu, links); css.textContent += "#spam-helper-menu {display: inline-block; padding-top:7px}" + "#spam-helper-menu > span {display: block; width: 150px; color: white}" + "#spam-helper-menu > span > input { vertical-align: -2px; }"; } function notification_init(){ notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false; var cb = document.createElement("input"); cb.type = "checkbox"; cb.checked = notification_granted; cb.id = "spamhelpernotificationcb"; cb.onchange = function(){ if(cb.checked){ Notification.requestPermission(function(permission){ notification_granted = (permission === "granted"); localStorage.setItem("spam-helper-notification_granted", notification_granted); }); }else{ notification_granted = false; localStorage.setItem("spam-helper-notification_granted", false); } }; var label = document.createElement("label"); label.textContent = "enable notifications"; label.htmlFor = "spamhelpernotificationcb"; var span = document.createElement("span"); span.appendChild(cb); span.appendChild(label); menu.appendChild(span); } // function ElementPool(){ var queue = []; return { constructor: ElementPool, get: function(func){ var r; for(var i = 0; i < queue.length; i++){ if(!document.contains(queue[i])){ r = queue.splice(i,1)[0]; break; } } r = r || func(); queue.push(r); return r; } }; } var apiQueue = new Mutex(); function seApiCall(/* path..., options */){ var path = [].slice.call(arguments); var options = path.pop(); var partialOk = options.partialOk; delete options.partialOk; var responseDeferred = $.Deferred(); var results = []; (function getPage(page){ apiQueue.enqueue(function(){ var apiQueueDeferred = $.Deferred(); options.pagesize = 100; options.page = page; console.log("fired request"); GM_xmlhttpRequest({ method: "GET", url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options), ontimeout: getPage.bind(null, page), onerror: function(response) { console.log(response); getPage(page); // retry }, onload: function(response) { response = JSON.parse(response.responseText); if(response.error_message) throw response.error_message; console.log("got response, remaining quota: " + response.quota_remaining); [].push.apply(results, response.items); if(response.has_more && !partialOk){ console.log("need more pages"); getPage(page + 1); }else
{ console.log("collected " + results.length + " results"); responseDeferred.resolve({items: results, partial: !!response.has_more}); }
conditional_block
se-spam-helper.user.js
Of = {}, notifiedOfToday = {}; var ooflagSites = {}; var questionQueue = {}; var siteWebsocketIDs = {}; var sitesByWebsocketID = {}; var onQuestionQueueTimeout = flushQuestionQueue; var checkAnswer = checkPost, checkQuestion = checkPost; menu_init(); notification_init(); window.addEventListener("unload", onbeforeunload); scrapePage(); function atGMT(time, func){ var timeLeft = (time - Date.now()) % (24 * hours); setTimeout(func, timeLeft); } function onMessage(e){ var response = JSON.parse(e.data); var data = response.data && JSON.parse(response.data); if(response.action === "hb"){ ws.send("hb"); } else if(response.action === "155-questions-active"){ onQuestionActive(parseRealtimeSocket(data)); } else if(response.action.match(/\d+-questions-active/)){ scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]); } else { console.log("unknown response type: %s in %o", response.action, response); } } function scrapePage()
function scrapePerSiteQuestion(html, site){ var question = new DOMParser().parseFromString(html, "text/html") .getElementsByClassName("question-summary")[0]; var qLink = "http://" + siteNameToHostName(site) + question.querySelector("a.question-hyperlink").getAttribute("href"); onQuestionActive({ body: $(".excerpt", question).html().trim(), link: qLink, site: site, tags: $(".post-tag", question).map(function(){return this.textContent;}), title: $("h3 a", question).text().trim(), question_id: question.id.split("-").pop(), }); } function checkSiteHasSocket(site){ if(siteWebsocketIDs[site] === undefined){ siteWebsocketIDs[site] = false; // prevent double fetching GM_xmlhttpRequest({ method: "GET", url: "http://" + siteNameToHostName(site), ontimeout: checkSiteHasSocket.bind(null, site), onerror: function(response) { console.log(response); checkSiteHasSocket(site); // retry }, onload: function(response){ var scripts = (new DOMParser()) .parseFromString(response.response, "text/html") .head.querySelectorAll("script:not([src])"); [].forEach.call(scripts, function(script){ var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML); if(match){ siteWebsocketIDs[site] = match[1]; sitesByWebsocketID[match[1]] = site; } }); if(siteWebsocketIDs[site]){ console.log("the ID for %s is %o", site, siteWebsocketIDs[site]); ws.send(siteWebsocketIDs[site] + "-questions-active"); } else { console.log("could not find the ID for %s", site); } } }); } } function parseRealtimeSocket(wsData){ return{ body: wsData.bodySummary, link: wsData.url, site: wsData.apiSiteParameter, tags: wsData.tags, title: htmlUnescape(wsData.titleEncodedFancy), question_id: wsData.id, }; } function onQuestionActive(qData){ checkQuestion(qData); hiderInstall(); checkSiteHasSocket(qData.site); questionQueuePush(qData); } function questionQueuePush(qData){ var site = qData.site; var id = qData.question_id; var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0}; if(!queue.questions[id]) queue.length++; queue.questions[id] = qData; if(queue.length >= 100){ flushQuestionQueue(queue); }else{ if(!queue.timeout){ queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT); } } } function flushQuestionQueue(queue){ var ids = Object.keys(queue.questions); queue.length = 0; queue.questions = {}; clearTimeout(queue.timeout); queue.timeout = null; console.log("requesting answers for " + ids.length + " questions on " + queue.site); seApiCall("questions", ids.join(";"), { filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq", site: queue.site}) .then(function(response){ response.items.forEach(function(question){ question.site = queue.site; question.title = htmlUnescape(question.title); checkQuestion(question); if(question.answers) question.answers.forEach(function(answer){ checkAnswer(question, answer); }); }); }); } function checkPost(question, answer){var title = question.title; var host = question.site; var site = hostNameToSiteName(host); var site_class = "realtime-" + siteToClass(site); var classname = site_class + "-" + question.question_id; var q_body = $("<div/>", {html: question.body}); var a_body; if(answer) a_body = $("<div/>", {html: answer.body}); var text = answer ? a_body.text() : title + "\n" + q_body.text(); var id = answer ? answer.answer_id : question.question_id; var link = answer ? answer.link : question.link; if(!notifiedOf[site]) notifiedOf[site] = {}; if(!notifiedOf[site][id]){ if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) || is.mostlyUppercase(text) || /\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) || !answer && ( site == "meta" || site == "drupal" || /(?:[^a-hj-np-z ] *){9,}/i.test(title) || is.mostlyUppercase(title) || /\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title) ) ){ css.textContent += "." + classname + " {background-color: #FCC}\n"; notify(site, title, answer ? "A - " + a_body.text() : question.body ? "Q - " + q_body.text() : undefined, link); } notifiedOf[site][id] = true; if(!notifiedOfToday[site]) notifiedOfToday[site] = {}; notifiedOfToday[site][id] = true; } } function hiderInstall(){ var children = document.getElementById("mainArea").children; for(var i = 0; i < children.length; i++){ if(children[i].getElementsByClassName("spam-helper-site-hider").length) break; var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/); if(!match) break; var siteClass = match[1]; var hider = imgPool.get(function(){ var hider = document.createElement("img"); hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png"; hider.title = "I'm out of spam flags for today here"; hider.className = "spam-helper-site-hider"; hider.style.cursor = "pointer"; return hider; }); hider.onclick = function(siteClass){ daily_css.textContent += "." + siteClass + " {display: none}\n"; ooflagSites[siteClass] = true; }.bind(null, siteClass); children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider); children[i].classList.add(siteClass); } } function notify(site, title, body, url){ if(notification_granted && !ooflagSites[site]){ var notification = new Notification(title, { icon: classToImageUrl(siteToClass(site)), body: body || '' }); notification.onclick = function(){ GM_openInTab(url); GM_setClipboard(url); }; } } function menu_init(){ menu = document.createElement("div");
{ $(".realtime-question:visible").each(function(){ var qLink = this.querySelector("a.realtime-question-url"); onQuestionActive({ body: undefined, link: qLink.href, site: hostNameToSiteName(qLink.hostname), tags: $(".post-tag", this).map(function(){return this.textContent;}), title: $("h2", this).text().trim(), question_id: qLink.href.match(/\/questions\/(\d+)\//)[1], }); }); hiderInstall(); }
identifier_body
channel_router.rs
See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize { self.ranges.iter().map(|r| r.end - r.start).sum() } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn is_free(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) { if let Some(k) = self.tasks.get_mut(&from)
else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from] != end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since .add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)| !v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() || !end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available
{ k.push(to); }
conditional_block
channel_router.rs
See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize { self.ranges.iter().map(|r| r.end - r.start).sum() } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn
(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) { if let Some(k) = self.tasks.get_mut(&from) { k.push(to); } else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from] != end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since .add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)| !v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() || !end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available
is_free
identifier_name
channel_router.rs
See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize { self.ranges.iter().map(|r| r.end - r.start).sum() } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn is_free(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) {
} else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from] != end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since .add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)| !v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() || !end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available.
if let Some(k) = self.tasks.get_mut(&from) { k.push(to);
random_line_split
channel_router.rs
the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize
} #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn is_free(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) { if let Some(k) = self.tasks.get_mut(&from) { k.push(to); } else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from] != end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since .add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)| !v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() || !end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available
{ self.ranges.iter().map(|r| r.end - r.start).sum() }
identifier_body
chain.rs
how much of // this sector is actually used. if block[1] < 1 { // It's not valid for a chain sector to not include the first two bytes // as allocated. return Err(DiskError::InvalidChainLink.into()); } Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256 } else { Ok(ChainLink::Next(Location::new(block[0], block[1]))) } } #[inline] pub fn to_bytes(&self, bytes: &mut [u8]) { assert!(bytes.len() >= 2); match &self { ChainLink::Next(location) => location.write_bytes(bytes), ChainLink::Tail(size) => { assert!(*size >= 2 && *size <= 256); bytes[0] = 0x00; bytes[1] = (*size - 1) as u8; } } } } /// A ChainSector is the result of a chain iteration, and provides the block contents and the /// location from which it was read. pub struct ChainSector { /// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link. pub data: Vec<u8>, pub location: Location, }
visited_sectors: HashSet<Location>, block: [u8; BLOCK_SIZE], } impl ChainIterator { /// Create a new chain iterator starting at the specified location. pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator { ChainIterator { blocks, next_sector: Some(starting_sector), visited_sectors: HashSet::new(), block: [0u8; BLOCK_SIZE], } } /// Read the entire chain and return a list of locations. pub fn locations(self) -> io::Result<Vec<Location>> { self.map(|r| r.map(|cs| cs.location)).collect() } } impl Iterator for ChainIterator { type Item = io::Result<ChainSector>; fn next(&mut self) -> Option<io::Result<ChainSector>> { let location = match self.next_sector.take() { Some(next) => next, None => return None, }; // Loop detection. if !self.visited_sectors.insert(location) { return Some(Err(DiskError::ChainLoop.into())); } // Read the next sector. { let blocks = self.blocks.borrow(); let block = match blocks.sector(location) { Ok(b) => b, Err(e) => return Some(Err(e)), }; self.block.copy_from_slice(block); } // Trim the block if needed. let size = match ChainLink::new(&self.block[..]) { Ok(ChainLink::Next(location)) => { self.next_sector = Some(location); BLOCK_SIZE // The entire sector is used. } Ok(ChainLink::Tail(size)) => size, Err(e) => return Some(Err(e)), }; let block = &self.block[..size]; Some(Ok(ChainSector { data: block.to_vec(), location, })) } } /// ChainReader objects implement the Read trait are used to read a byte stream /// represented as a series of chained sectors on the disk image. Simple files /// (e.g. CBM PRG and SEQ files) store data in a single chain where the /// beginning track and sector is provided in the directory entry. More exotic /// file types (GEOS, REL, etc.) use more complex structures, possibly with /// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a /// ChainReader for each record). pub struct ChainReader { chain: ChainIterator, block: Option<Vec<u8>>, eof: bool, } impl ChainReader { pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader { let chain = ChainIterator::new(blocks, start); ChainReader { chain, block: None, eof: false, } } } impl io::Read for ChainReader { fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { let mut total_nbytes = 0; while !buf.is_empty() && !self.eof { match self.block.take() { Some(mut block) => { // Copy as much of this block as possible into the caller-provided buffer. let nbytes = block.len().min(buf.len()); let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]); total_nbytes += nbytes; // Reduce the block slice to the unread portion (which may be zero bytes). if block.len() == nbytes { } else { // Reduce let mut tail = block.split_off(nbytes); ::std::mem::swap(&mut block, &mut tail); // Return the unread portion self.block = Some(block); } // Reduce the provided buffer slice to the unwritten portion. let buf_ref = &mut buf; let value: &mut [u8] = std::mem::take(buf_ref); *buf_ref = &mut value[nbytes..]; } None => { // Read the next block. match self.chain.next() { Some(Ok(mut block)) => { // discard the next-track/sector bytes self.block = Some(block.data.split_off(2)); // Loop back to the Some(_) case to process the block. } Some(Err(e)) => { self.eof = true; return Err(e); } None => self.eof = true, } } } } Ok(total_nbytes) } } /// A writer for writing data to a chain. The chain is extended as needed according to the /// allocation algorithm for the disk format. pub struct ChainWriter { blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, location: Location, block: Vec<u8>, dirty: bool, } impl ChainWriter { pub fn new( blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, start: Location, ) -> io::Result<ChainWriter> { // Advance to the last block in the chain. let tail_block; let mut tail_location; { let blocks = blocks.borrow(); let mut block = blocks.sector(start)?; tail_location = start; while let ChainLink::Next(location) = ChainLink::new(block)? { block = blocks.sector(location)?; tail_location = location; } tail_block = block.to_vec(); } Ok(ChainWriter { blocks, bam, entry, location: tail_location, block: tail_block, dirty: true, }) } fn increment_entry_blocks(&mut self) -> io::Result<()> { let mut blocks = self.blocks.borrow_mut(); blocks.positioned_read(&mut self.entry)?; self.entry.file_size += 1; blocks.positioned_write(&self.entry)?; Ok(()) } fn allocate_next_block(&mut self) -> io::Result<usize> { // NOTE: The ordering of these steps is important for consistency. We don't // want a block to be allocated in BAM, then not used because an error // was thrown later. // Write the current block without the updated link. self.write_current_block()?; // Find a new block. let next_location = self.bam.borrow_mut().next_free_block(None)?; // Initialize a fresh block in memory with a link indicating a tail block with // zero bytes used. (Really, two bytes used for the link, but zero data // bytes used.) for i in 2..BLOCK_SIZE { self.block[i] = 0; } ChainLink::Tail(2).to_bytes(&mut self.block[..]); // Write the fresh block to the new location self.blocks .borrow_mut() .sector_mut(next_location)? .copy_from_slice(&self.block); // Allocate the next block. self.bam.borrow_mut().allocate(next_location)?; // Increment the directory entry's file size (measured in blocks) self.increment_entry_blocks()?; // If allocation succeeds, only then do we link the current block to the next // block. let mut blocks = self.blocks.borrow_mut(); let block = match blocks.sector_mut(self.location) { Ok(block) => block, Err(e) => { // Roll back the allocation. self.bam.borrow_mut().free(next_location)?; return Err(e); } }; next_location.write_bytes(block); // Update state self.location = next_location; // Return the available bytes in the newly loaded block, which is always two // less than the block size. Ok(BLOCK_SIZE - 2) } fn write_current_block(&mut self) -> io::Result<()> { // Write the current block let mut blocks = self.blocks.borrow_mut(); blocks .sector_mut(self.location)? .copy_from_slice(&self.block); Ok(()) } } impl Drop
/// Returns a ChainSector which includes the NTS (next track and sector) link. pub struct ChainIterator { blocks: BlockDeviceRef, next_sector: Option<Location>,
random_line_split
chain.rs
how much of // this sector is actually used. if block[1] < 1 { // It's not valid for a chain sector to not include the first two bytes // as allocated. return Err(DiskError::InvalidChainLink.into()); } Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256 } else { Ok(ChainLink::Next(Location::new(block[0], block[1]))) } } #[inline] pub fn to_bytes(&self, bytes: &mut [u8]) { assert!(bytes.len() >= 2); match &self { ChainLink::Next(location) => location.write_bytes(bytes), ChainLink::Tail(size) => { assert!(*size >= 2 && *size <= 256); bytes[0] = 0x00; bytes[1] = (*size - 1) as u8; } } } } /// A ChainSector is the result of a chain iteration, and provides the block contents and the /// location from which it was read. pub struct ChainSector { /// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link. pub data: Vec<u8>, pub location: Location, } /// Returns a ChainSector which includes the NTS (next track and sector) link. pub struct ChainIterator { blocks: BlockDeviceRef, next_sector: Option<Location>, visited_sectors: HashSet<Location>, block: [u8; BLOCK_SIZE], } impl ChainIterator { /// Create a new chain iterator starting at the specified location. pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator { ChainIterator { blocks, next_sector: Some(starting_sector), visited_sectors: HashSet::new(), block: [0u8; BLOCK_SIZE], } } /// Read the entire chain and return a list of locations. pub fn locations(self) -> io::Result<Vec<Location>> { self.map(|r| r.map(|cs| cs.location)).collect() } } impl Iterator for ChainIterator { type Item = io::Result<ChainSector>; fn next(&mut self) -> Option<io::Result<ChainSector>> { let location = match self.next_sector.take() { Some(next) => next, None => return None, }; // Loop detection. if !self.visited_sectors.insert(location) { return Some(Err(DiskError::ChainLoop.into())); } // Read the next sector. { let blocks = self.blocks.borrow(); let block = match blocks.sector(location) { Ok(b) => b, Err(e) => return Some(Err(e)), }; self.block.copy_from_slice(block); } // Trim the block if needed. let size = match ChainLink::new(&self.block[..]) { Ok(ChainLink::Next(location)) => { self.next_sector = Some(location); BLOCK_SIZE // The entire sector is used. } Ok(ChainLink::Tail(size)) => size, Err(e) => return Some(Err(e)), }; let block = &self.block[..size]; Some(Ok(ChainSector { data: block.to_vec(), location, })) } } /// ChainReader objects implement the Read trait are used to read a byte stream /// represented as a series of chained sectors on the disk image. Simple files /// (e.g. CBM PRG and SEQ files) store data in a single chain where the /// beginning track and sector is provided in the directory entry. More exotic /// file types (GEOS, REL, etc.) use more complex structures, possibly with /// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a /// ChainReader for each record). pub struct ChainReader { chain: ChainIterator, block: Option<Vec<u8>>, eof: bool, } impl ChainReader { pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader { let chain = ChainIterator::new(blocks, start); ChainReader { chain, block: None, eof: false, } } } impl io::Read for ChainReader { fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { let mut total_nbytes = 0; while !buf.is_empty() && !self.eof { match self.block.take() { Some(mut block) => { // Copy as much of this block as possible into the caller-provided buffer. let nbytes = block.len().min(buf.len()); let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]); total_nbytes += nbytes; // Reduce the block slice to the unread portion (which may be zero bytes). if block.len() == nbytes { } else { // Reduce let mut tail = block.split_off(nbytes); ::std::mem::swap(&mut block, &mut tail); // Return the unread portion self.block = Some(block); } // Reduce the provided buffer slice to the unwritten portion. let buf_ref = &mut buf; let value: &mut [u8] = std::mem::take(buf_ref); *buf_ref = &mut value[nbytes..]; } None => { // Read the next block. match self.chain.next() { Some(Ok(mut block)) => { // discard the next-track/sector bytes self.block = Some(block.data.split_off(2)); // Loop back to the Some(_) case to process the block. } Some(Err(e)) => { self.eof = true; return Err(e); } None => self.eof = true, } } } } Ok(total_nbytes) } } /// A writer for writing data to a chain. The chain is extended as needed according to the /// allocation algorithm for the disk format. pub struct ChainWriter { blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, location: Location, block: Vec<u8>, dirty: bool, } impl ChainWriter { pub fn new( blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, start: Location, ) -> io::Result<ChainWriter> { // Advance to the last block in the chain. let tail_block; let mut tail_location; { let blocks = blocks.borrow(); let mut block = blocks.sector(start)?; tail_location = start; while let ChainLink::Next(location) = ChainLink::new(block)? { block = blocks.sector(location)?; tail_location = location; } tail_block = block.to_vec(); } Ok(ChainWriter { blocks, bam, entry, location: tail_location, block: tail_block, dirty: true, }) } fn increment_entry_blocks(&mut self) -> io::Result<()> { let mut blocks = self.blocks.borrow_mut(); blocks.positioned_read(&mut self.entry)?; self.entry.file_size += 1; blocks.positioned_write(&self.entry)?; Ok(()) } fn allocate_next_block(&mut self) -> io::Result<usize> { // NOTE: The ordering of these steps is important for consistency. We don't // want a block to be allocated in BAM, then not used because an error // was thrown later. // Write the current block without the updated link. self.write_current_block()?; // Find a new block. let next_location = self.bam.borrow_mut().next_free_block(None)?; // Initialize a fresh block in memory with a link indicating a tail block with // zero bytes used. (Really, two bytes used for the link, but zero data // bytes used.) for i in 2..BLOCK_SIZE { self.block[i] = 0; } ChainLink::Tail(2).to_bytes(&mut self.block[..]); // Write the fresh block to the new location self.blocks .borrow_mut() .sector_mut(next_location)? .copy_from_slice(&self.block); // Allocate the next block. self.bam.borrow_mut().allocate(next_location)?; // Increment the directory entry's file size (measured in blocks) self.increment_entry_blocks()?; // If allocation succeeds, only then do we link the current block to the next // block. let mut blocks = self.blocks.borrow_mut(); let block = match blocks.sector_mut(self.location) { Ok(block) => block, Err(e) => { // Roll back the allocation. self.bam.borrow_mut().free(next_location)?; return Err(e); } }; next_location.write_bytes(block); // Update state self.location = next_location; // Return the available bytes in the newly loaded block, which is always two // less than the block size. Ok(BLOCK_SIZE - 2) } fn
(&mut self) -> io::Result<()> { // Write the current block let mut blocks = self.blocks.borrow_mut(); blocks .sector_mut(self.location)? .copy_from_slice(&self.block); Ok(()) } } impl
write_current_block
identifier_name
TTA.py
0:index] arr2 = arr[index + 1:] return torch.cat((arr1, arr2), dim=0) def del_under_threshold(result, threshold=0.): idxes = [] for idx in range(len(result[0]['scores'])): if result[0]['scores'][idx] < threshold: idxes.append(idx) for i in idxes: result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1) result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1) result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1) return result def del_fusion_under_threshold(boxes, labels, scores, threshold=0.): idxes = [] for idx in range(len(scores)): if scores[idx] < threshold: idxes.append(idx) for i in idxes: scores = del_tensor_ele(scores, len(scores) - 1) labels = del_tensor_ele(labels, len(labels) - 1) boxes = del_tensor_ele(boxes, len(boxes) - 1) return boxes, labels, scores def py_cpu_nms(boxes, scores, thresh=0.55): """Pure Python NMS baseline.""" # x1、y1、x2、y2、以及score赋值 boxes = boxes.detach().numpy() x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] scores = scores # 每一个检测框的面积 areas = (x2 - x1 + 1) * (y2 - y1 + 1) # 按照score置信度降序排序 # order = scores.argsort()[::-1] all_scores, order = scores.sort(descending=True) keep = [] # 保留的结果框集合 # print(order) while int(len(order.detach().numpy())) > 0: i = order[0] keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个 # 得到相交区域,左上及右下 xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) # 计算相交的面积,不重叠时面积为0 w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h # 计算IoU:重叠面积 /(面积1+面积2-重叠面积) ovr = inter / (areas[i] + areas[order[1:]] - inter) # 保留IoU小于阈值的box inds = np.where(ovr <= thresh)[0] order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位 return keep def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2): # 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标 # res_bboxes = deepcopy(bboxes) N = bboxes.shape[0] # 总的 box 的数量 indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1 bboxes = bboxes.detach().numpy() bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响 # 计算每个 box 的面积 x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] scores = scores areas = (x2 - x1 + 1) * (y2 - y1 + 1) scores, order = scores.sort(descending=True)
pos = i + 1 if i != N - 1: maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[-1] maxpos = 0 # 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大 if scores[i] < maxscore: bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]] scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]] areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]] # IoU calculate xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0]) yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1]) xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2]) yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h iou = intersection / (areas[i] + areas[pos:] - intersection) # Three methods: 1.linear 2.gaussian 3.original NMS if method == 1: # linear weight = np.ones(iou.shape) weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt] elif method == 2: # gaussian weight = np.exp(-(iou * iou) / sigma2) else: # original NMS weight = np.ones(iou.shape) weight[iou > Nt] = 0 scores[pos:] = weight * scores[pos:] # select the boxes and keep the corresponding indexes inds = bboxes[:, 4][scores > score_thresh] keep = inds.astype(int) return keep # image_path = './data/Images/2020-01-11_21_43_14_145.jpg' # image_path = './data/Images/2020-03-07_08_34_30_467.jpg' # image_path = './data/Images/2020-01-11_21_41_15_002.jpg' image_path = './data/Images/2020-01-11_21_36_02_642.jpg' # image_path = './data/Images/2020-03-10_16_18_20_688.jpg' # image_path = './data/Images/2021-05-29-18-44-02.jpg' # image_path = './data/Images/2021-05-16-18-51-54.jpg' # image_path = './data/Images/2021-05-16-14-58-28.jpg' # model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth' # model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth' model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth' results = [] predictions = [] # you can try own combinations: transform1 = TTACompose([ TTAHorizontalFlip(), # TTAVerticalFlip() ]) transform2 = TTACompose([ # TTAHorizontalFlip(), TTAVerticalFlip() ]) fig, ax = plt.subplots(3, 2, figsize=(16, 10)) image1 = Image.open(image_path).convert("RGB") image1_vf = F.vflip(image1) image_tensor = torch.from_numpy(np.array(image1)) image_tensor_vf = torch.from_numpy(np.array(image1_vf)) # image_tensor = image_tensor.permute(0
scores = scores.detach().numpy() for i in range(N): # 找出 i 后面的最大 score 及其下标
random_line_split
TTA.py
0:index] arr2 = arr[index + 1:] return torch.cat((arr1, arr2), dim=0) def del_under_threshold(result, threshold=0.): idxes = [] for idx in range(len(result[0]['scores'])): if result[0]['scores'][idx] < threshold:
for i in idxes: result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1) result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1) result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1) return result def del_fusion_under_threshold(boxes, labels, scores, threshold=0.): idxes = [] for idx in range(len(scores)): if scores[idx] < threshold: idxes.append(idx) for i in idxes: scores = del_tensor_ele(scores, len(scores) - 1) labels = del_tensor_ele(labels, len(labels) - 1) boxes = del_tensor_ele(boxes, len(boxes) - 1) return boxes, labels, scores def py_cpu_nms(boxes, scores, thresh=0.55): """Pure Python NMS baseline.""" # x1、y1、x2、y2、以及score赋值 boxes = boxes.detach().numpy() x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] scores = scores # 每一个检测框的面积 areas = (x2 - x1 + 1) * (y2 - y1 + 1) # 按照score置信度降序排序 # order = scores.argsort()[::-1] all_scores, order = scores.sort(descending=True) keep = [] # 保留的结果框集合 # print(order) while int(len(order.detach().numpy())) > 0: i = order[0] keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个 # 得到相交区域,左上及右下 xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) # 计算相交的面积,不重叠时面积为0 w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h # 计算IoU:重叠面积 /(面积1+面积2-重叠面积) ovr = inter / (areas[i] + areas[order[1:]] - inter) # 保留IoU小于阈值的box inds = np.where(ovr <= thresh)[0] order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位 return keep def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2): # 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标 # res_bboxes = deepcopy(bboxes) N = bboxes.shape[0] # 总的 box 的数量 indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1 bboxes = bboxes.detach().numpy() bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响 # 计算每个 box 的面积 x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] scores = scores areas = (x2 - x1 + 1) * (y2 - y1 + 1) scores, order = scores.sort(descending=True) scores = scores.detach().numpy() for i in range(N): # 找出 i 后面的最大 score 及其下标 pos = i + 1 if i != N - 1: maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[-1] maxpos = 0 # 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大 if scores[i] < maxscore: bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]] scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]] areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]] # IoU calculate xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0]) yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1]) xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2]) yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h iou = intersection / (areas[i] + areas[pos:] - intersection) # Three methods: 1.linear 2.gaussian 3.original NMS if method == 1: # linear weight = np.ones(iou.shape) weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt] elif method == 2: # gaussian weight = np.exp(-(iou * iou) / sigma2) else: # original NMS weight = np.ones(iou.shape) weight[iou > Nt] = 0 scores[pos:] = weight * scores[pos:] # select the boxes and keep the corresponding indexes inds = bboxes[:, 4][scores > score_thresh] keep = inds.astype(int) return keep # image_path = './data/Images/2020-01-11_21_43_14_145.jpg' # image_path = './data/Images/2020-03-07_08_34_30_467.jpg' # image_path = './data/Images/2020-01-11_21_41_15_002.jpg' image_path = './data/Images/2020-01-11_21_36_02_642.jpg' # image_path = './data/Images/2020-03-10_16_18_20_688.jpg' # image_path = './data/Images/2021-05-29-18-44-02.jpg' # image_path = './data/Images/2021-05-16-18-51-54.jpg' # image_path = './data/Images/2021-05-16-14-58-28.jpg' # model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth' # model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth' model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth' results = [] predictions = [] # you can try own combinations: transform1 = TTACompose([ TTAHorizontalFlip(), # TTAVerticalFlip() ]) transform2 = TTACompose([ # TTAHorizontalFlip(), TTAVerticalFlip() ]) fig, ax = plt.subplots(3, 2, figsize=(16, 10)) image1 = Image.open(image_path).convert("RGB") image1_vf = F.vflip(image1) image_tensor = torch.from_numpy(np.array(image1)) image_tensor_vf = torch.from_numpy(np.array(image1_vf)) # image_tensor = image_tensor.permute
idxes.append(idx)
conditional_block
TTA.py
class TTAVerticalFlip(BaseWheatTTA): """ author: @shonenkov """ def augment(self, image): return image def batch_augment(self, images): return images.flip(3) def deaugment_boxes(self, boxes, image): height = image.height boxes[:, [3, 1]] = height - boxes[:, [1, 3]] return boxes class TTACompose(BaseWheatTTA): """ author: @shonenkov """ def __init__(self, transforms): self.transforms = transforms def augment(self, image): for transform in self.transforms: image = transform.augment(image) return image def batch_augment(self, images): for transform in self.transforms: images = transform.batch_augment(images) return images def prepare_boxes(self, boxes): result_boxes = boxes boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) return boxes def deaugment_boxes(self, boxes, image): for transform in self.transforms[::-1]: boxes = transform.deaugment_boxes(boxes, image) return self.prepare_boxes(boxes) def tensor_to_PIL(tensor): image = tensor.cpu().clone() image = image.squeeze(0) image = transforms.ToPILImage()(image) return image def del_tensor_ele(arr, index): arr1 = arr[0:index] arr2 = arr[index + 1:] return torch.cat((arr1, arr2), dim=0) def del_under_threshold(result, threshold=0.): idxes = [] for idx in range(len(result[0]['scores'])): if result[0]['scores'][idx] < threshold: idxes.append(idx) for i in idxes: result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1) result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1) result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1) return result def del_fusion_under_threshold(boxes, labels, scores, threshold=0.): idxes = [] for idx in range(len(scores)): if scores[idx] < threshold: idxes.append(idx) for i in idxes: scores = del_tensor_ele(scores, len(scores) - 1) labels = del_tensor_ele(labels, len(labels) - 1) boxes = del_tensor_ele(boxes, len(boxes) - 1) return boxes, labels, scores def py_cpu_nms(boxes, scores, thresh=0.55): """Pure Python NMS baseline.""" # x1、y1、x2、y2、以及score赋值 boxes = boxes.detach().numpy() x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] scores = scores # 每一个检测框的面积 areas = (x2 - x1 + 1) * (y2 - y1 + 1) # 按照score置信度降序排序 # order = scores.argsort()[::-1] all_scores, order = scores.sort(descending=True) keep = [] # 保留的结果框集合 # print(order) while int(len(order.detach().numpy())) > 0: i = order[0] keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个 # 得到相交区域,左上及右下 xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) # 计算相交的面积,不重叠时面积为0 w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h # 计算IoU:重叠面积 /(面积1+面积2-重叠面积) ovr = inter / (areas[i] + areas[order[1:]] - inter) # 保留IoU小于阈值的box inds = np.where(ovr <= thresh)[0] order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位 return keep def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2): # 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标 # res_bboxes = deepcopy(bboxes) N = bboxes.shape[0] # 总的 box 的数量 indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1 bboxes = bboxes.detach().numpy() bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响 # 计算每个 box 的面积 x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] scores = scores areas = (x2 - x1 + 1) * (y2 - y1 + 1) scores, order = scores.sort(descending=True) scores = scores.detach().numpy() for i in range(N): # 找出 i 后面的最大 score 及其下标 pos = i + 1 if i != N - 1: maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[-1] maxpos = 0 # 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大 if scores[i] < maxscore: bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]] scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]] areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]] # IoU calculate xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0]) yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1]) xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2]) yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h iou = intersection / (areas[i] + areas[pos:] - intersection) # Three methods: 1.linear 2.gaussian 3.original NMS if method == 1: # linear weight = np.ones(iou.shape) weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt] elif method == 2: # gaussian weight = np.exp(-(iou * iou) / sigma2) else: # original NMS weight = np.ones(iou.shape) weight[iou > Nt] = 0 scores[pos:] = weight * scores[pos:] # select the boxes and keep the corresponding indexes inds = bboxes[:, 4][scores > score_thresh] keep = inds.astype(int) return keep # image_path = './data/Images/2020-01-11_21_43_14_145.jpg' # image_path = './data/Images/2020-03-07_08_34_30_467.jpg' # image_path = './data/Images/2020-01-11_
width = image.width boxes[:, [2, 0]] = width - boxes[:, [0, 2]] return boxes
identifier_body
TTA.py
: """ author: @shonenkov """ image_size = 512 def augment(self, image): raise NotImplementedError def batch_augment(self, images): raise NotImplementedError def deaugment_boxes(self, boxes, image): raise NotImplementedError def get_object_detector(num_classes): # load an instance segmentation model pre-trained pre-trained on COCO model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False) # get number of input features for the classifier in_features = model.roi_heads.box_predictor.cls_score.in_features # replace the pre-trained head with a new one model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) return model class TTAHorizontalFlip(BaseWheatTTA): """ author: @shonenkov """ def augment(self, image): return image.flip(1) def batch_augment(self, images): return images.flip(2) def deaugment_boxes(self, boxes, image): width = image.width boxes[:, [2, 0]] = width - boxes[:, [0, 2]] return boxes class TTAVerticalFlip(BaseWheatTTA): """ author: @shonenkov """ def augment(self, image): return image def batch_augment(self, images): return images.flip(3) def deaugment_boxes(self, boxes, image): height = image.height boxes[:, [3, 1]] = height - boxes[:, [1, 3]] return boxes class TTACompose(BaseWheatTTA): """ author: @shonenkov """ def __init__(self, transforms): self.transforms = transforms def augment(self, image): for transform in self.transforms: image = transform.augment(image) return image def batch_augment(self, images): for transform in self.transforms: images = transform.batch_augment(images) return images def prepare_boxes(self, boxes): result_boxes = boxes boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1) return boxes def deaugment_boxes(self, boxes, image): for transform in self.transforms[::-1]: boxes = transform.deaugment_boxes(boxes, image) return self.prepare_boxes(boxes) def tensor_to_PIL(tensor): image = tensor.cpu().clone() image = image.squeeze(0) image = transforms.ToPILImage()(image) return image def del_tensor_ele(arr, index): arr1 = arr[0:index] arr2 = arr[index + 1:] return torch.cat((arr1, arr2), dim=0) def del_under_threshold(result, threshold=0.): idxes = [] for idx in range(len(result[0]['scores'])): if result[0]['scores'][idx] < threshold: idxes.append(idx) for i in idxes: result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1) result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1) result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1) return result def del_fusion_under_threshold(boxes, labels, scores, threshold=0.): idxes = [] for idx in range(len(scores)): if scores[idx] < threshold: idxes.append(idx) for i in idxes: scores = del_tensor_ele(scores, len(scores) - 1) labels = del_tensor_ele(labels, len(labels) - 1) boxes = del_tensor_ele(boxes, len(boxes) - 1) return boxes, labels, scores def py_cpu_nms(boxes, scores, thresh=0.55): """Pure Python NMS baseline.""" # x1、y1、x2、y2、以及score赋值 boxes = boxes.detach().numpy() x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] scores = scores # 每一个检测框的面积 areas = (x2 - x1 + 1) * (y2 - y1 + 1) # 按照score置信度降序排序 # order = scores.argsort()[::-1] all_scores, order = scores.sort(descending=True) keep = [] # 保留的结果框集合 # print(order) while int(len(order.detach().numpy())) > 0: i = order[0] keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个 # 得到相交区域,左上及右下 xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) # 计算相交的面积,不重叠时面积为0 w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h # 计算IoU:重叠面积 /(面积1+面积2-重叠面积) ovr = inter / (areas[i] + areas[order[1:]] - inter) # 保留IoU小于阈值的box inds = np.where(ovr <= thresh)[0] order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位 return keep def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2): # 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标 # res_bboxes = deepcopy(bboxes) N = bboxes.shape[0] # 总的 box 的数量 indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1 bboxes = bboxes.detach().numpy() bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响 # 计算每个 box 的面积 x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] scores = scores areas = (x2 - x1 + 1) * (y2 - y1 + 1) scores, order = scores.sort(descending=True) scores = scores.detach().numpy() for i in range(N): # 找出 i 后面的最大 score 及其下标 pos = i + 1 if i != N - 1: maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[-1] maxpos = 0 # 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大 if scores[i] < maxscore: bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]] scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]] areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]] # IoU calculate xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0]) yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1]) xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2]) yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h iou = intersection / (areas[i] + areas[pos:] - intersection) # Three methods: 1.linear 2.gaussian 3.original NMS
BaseWheatTTA
identifier_name
app_multiple_databus.go
time time.Time) (index string) { var ( week = map[int]string{ 0: "0108", 1: "0916", 2: "1724", 3: "2531", } ) return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1) } // NewAppMultipleDatabus . func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) { var err error amd = &AppMultipleDatabus{ d: d, appid: appid, attrs: d.AttrPool[appid], offsets: make(map[int]*model.LoopOffset), tableName: []string{}, indexNameSuffix: []string{}, commits: make(map[int32]*databus.Message), } amd.db = d.DBPool[amd.attrs.DBName] amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus] if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ { tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i) amd.tableName = append(amd.tableName, tableName) amd.offsets[i] = &model.LoopOffset{} } } else { var tableNameSuffix []string tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",") if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil { log.Error("amd.IndexNameSuffix(%v)", err) return } for _, v := range tableNameSuffix { amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v) } for i := range amd.tableName { amd.offsets[i] = &model.LoopOffset{} } } return } // Business return business. func (amd *AppMultipleDatabus) Business() string { return amd.attrs.Business } // InitIndex . func (amd *AppMultipleDatabus) InitIndex(c context.Context) { var ( err error indexAliasName string indexEntityName string ) indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",") aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix) if indexFormat[0] == "int" || indexFormat[0] == "single" { for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ { // == "0" 有问题,不通用 if amd.attrs.Index.IndexZero == "0" { indexAliasName = amd.attrs.Index.IndexAliasPrefix indexEntityName = amd.attrs.Index.IndexEntityPrefix } else { indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i) indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i) } if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } } } else { if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil { log.Error("amd.IndexNameSuffix(%v)", err) return } for _, v := range amd.indexNameSuffix { if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } } } } // InitOffset insert init value to offset. func (amd *AppMultipleDatabus) InitOffset(c context.Context) { amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName) } // Offset . func (amd *AppMultipleDatabus) Offset(c context.Context) { for i, v := range amd.tableName { offset, err := amd.d.Offset(c, amd.attrs.AppID, v) if err != nil { log.Error("amd.d.offset error(%v)", err) time.Sleep(time.Second * 3) } amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime) amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime()) } } // SetRecover set recover func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) { amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime) } // IncrMessages . func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) { ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker))) defer ticker.Stop() for { select { case msg, ok := <-amd.dtb.Messages(): if !ok { log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus) break } m := &model.Message{} amd.commits[msg.Partition] = msg if err = json.Unmarshal(msg.Value, m); err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } if amd.attrs.Business == "creative_reply" { r, _ := regexp.Compile("reply_\\d+") if !r.MatchString(m.Table) { continue } } if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) || (amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) { if m.Action == "insert" || m.Action == "update" { var parseMap map[string]interface{} parseMap, err = amd.d.JSON2map(m.New) if err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } // esports fav type filter if amd.attrs.AppID == "esports_fav" { if t, ok := parseMap["type"]; ok && t.(int64) != 10 { continue } } // playlist fav type and attr filter if amd.attrs.AppID == "fav_playlist" { if t, ok := parseMap["type"]; ok && t.(int64) != 2 { continue
if t, ok := parseMap["attr"]; ok { if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) { continue } } } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, m.Table, parseMap) if err != nil { if amd.attrs.AppID == "creative_reply" { continue } log.Error("amd.newParseMap error(%v)", err) continue } amd.mapData = append(amd.mapData, newParseMap) } } if len(amd.mapData) < amd.attrs.Databus.AggCount { continue } case <-ticker.C: } break } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "dtb") return } // AllMessages . func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) { amd.mapData = []model.MapData{} for i, v := range amd.tableName { var ( rows *xsql.Rows sql string ) tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",") if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i) } else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名 sql = fmt
}
random_line_split
app_multiple_databus.go
string indexEntityName string ) indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",") aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix) if indexFormat[0] == "int" || indexFormat[0] == "single" { for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ { // == "0" 有问题,不通用 if amd.attrs.Index.IndexZero == "0" { indexAliasName = amd.attrs.Index.IndexAliasPrefix indexEntityName = amd.attrs.Index.IndexEntityPrefix } else { indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i) indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i) } if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } } } else { if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil { log.Error("amd.IndexNameSuffix(%v)", err) return } for _, v := range amd.indexNameSuffix { if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } } } } // InitOffset insert init value to offset. func (amd *AppMultipleDatabus) InitOffset(c context.Context) { amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName) } // Offset . func (amd *AppMultipleDatabus) Offset(c context.Context) { for i, v := range amd.tableName { offset, err := amd.d.Offset(c, amd.attrs.AppID, v) if err != nil { log.Error("amd.d.offset error(%v)", err) time.Sleep(time.Second * 3) } amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime) amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime()) } } // SetRecover set recover func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) { amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime) } // IncrMessages . func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) { ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker))) defer ticker.Stop() for { select { case msg, ok := <-amd.dtb.Messages(): if !ok { log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus) break } m := &model.Message{} amd.commits[msg.Partition] = msg if err = json.Unmarshal(msg.Value, m); err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } if amd.attrs.Business == "creative_reply" { r, _ := regexp.Compile("reply_\\d+") if !r.MatchString(m.Table) { continue } } if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) || (amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) { if m.Action == "insert" || m.Action == "update" { var parseMap map[string]interface{} parseMap, err = amd.d.JSON2map(m.New) if err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } // esports fav type filter if amd.attrs.AppID == "esports_fav" { if t, ok := parseMap["type"]; ok && t.(int64) != 10 { continue } } // playlist fav type and attr filter if amd.attrs.AppID == "fav_playlist" { if t, ok := parseMap["type"]; ok && t.(int64) != 2 { continue } if t, ok := parseMap["attr"]; ok { if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) { continue } } } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, m.Table, parseMap) if err != nil { if amd.attrs.AppID == "creative_reply" { continue } log.Error("amd.newParseMap error(%v)", err) continue } amd.mapData = append(amd.mapData, newParseMap) } } if len(amd.mapData) < amd.attrs.Databus.AggCount { continue } case <-ticker.C: } break } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "dtb") return } // AllMessages . func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) { amd.mapData = []model.MapData{} for i, v := range amd.tableName { var ( rows *xsql.Rows sql string ) tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",") if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i) } else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名 sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i) log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size) } else { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v) } if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil { log.Error("AllMessages db.Query error(%v)", err) return } tempList := []model.MapData{} for rows.Next() { item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields) if err = rows.Scan(row...); err != nil { log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err) continue } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, v, item) if err != nil { log.Error("amd.newParseMap error(%v)", err) continue } tempList = append(tempList, newParseMap) amd.mapData = append(amd.mapData, newParseMap) } rows.Close() tmpLength := len(tempList) if tmpLength > 0 { amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime()) } } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "db") return } // BulkIndex . func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) { partData := amd.mapData[start:end] if amd.d.c.Business.Index { err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...) } else { err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...) } return } // Commit . func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) { if amd.d.c.Business.Index {
if
identifier_name
app_multiple_databus.go
context.Context) { var ( err error indexAliasName string indexEntityName string ) indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",") aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix) if indexFormat[0] == "int" || indexFormat[0] == "single" { for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ { // == "0" 有问题,不通用 if amd.attrs.Index.IndexZero == "0" { indexAliasName = amd.attrs.Index.IndexAliasPrefix indexEntityName = amd.attrs.Index.IndexEntityPrefix } else { indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i) indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i) } if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } } } else { if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil { log.Error("amd.IndexNameSuffix(%v)", err) return } for _, v := range amd.indexNameSuffix { if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } } } } // InitOffset insert init value to offset. func (amd *AppMultipleDatabus) InitOffset(c context.Context) { amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName) } // Offset . func (amd *AppMultipleDatabus) Offset(c context.Context) { for i, v := range amd.tableName { offset, err := amd.d.Offset(c, amd.attrs.AppID, v) if err != nil { log.Error("amd.d.offset error(%v)", err) time.Sleep(time.Second * 3) } amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime) amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime()) } } // SetRecover set recover func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) { amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime) } // IncrMessages . func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) { ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker))) defer ticker.Stop() for { select { case msg, ok := <-amd.dtb.Messages(): if !ok { log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus) break } m := &model.Message{} amd.commits[msg.Partition] = msg if err = json.Unmarshal(msg.Value, m); err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } if amd.attrs.Business == "creative_reply" { r, _ := regexp.Compile("reply_\\d+") if !r.MatchString(m.Table) { continue } } if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) || (amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) { if m.Action == "insert" || m.Action == "update" { var parseMap map[string]interface{} parseMap, err = amd.d.JSON2map(m.New) if err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } // esports fav type filter if amd.attrs.AppID == "esports_fav" { if t, ok := parseMap["type"]; ok && t.(int64) != 10 { continue } } // playlist fav type and attr filter if amd.attrs.AppID == "fav_playlist" { if t, ok := parseMap["type"]; ok && t.(int64) != 2 { continue } if t, ok := parseMap["attr"]; ok { if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) { continue } } } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, m.Table, parseMap) if err != nil { if amd.attrs.AppID == "creative_reply" { continue } log.Error("amd.newParseMap error(%v)", err) continue } amd.mapData = append(amd.mapData, newParseMap) } } if len(amd.mapData) < amd.attrs.Databus.AggCount { continue } case <-ticker.C: } break } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "dtb") return } // AllMessages . func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) { amd.mapData = []model.MapData{} for i, v := range amd.tableName { var ( rows *xsql.Rows sql string ) tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",") if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i) } else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名 sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i) log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size) } else { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v) } if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil { log.Error("AllMessages db.Query error(%v)", err) return } tempList := []model.MapData{} for rows.Next() { item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields) if err = rows.Scan(row...); err != nil { log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err) continue } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, v, item) if err != nil { log.Error("amd.newParseMap error(%v)", err) continue } tempList = append(tempList, newParseMap) amd.mapData = append(amd.mapData, newParseMap) } rows.Close() tmpLength := len(tempList) if tmpLength > 0 { amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime()) } } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "db") return } // BulkIndex . func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) { partData := amd.mapData[start:end] if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...) } else { err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...) } return } // Commit . func (amd *AppMultipleDatabus) Commit(c context.Contex
identifier_body
app_multiple_databus.go
ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping) } } } else { if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil { log.Error("amd.IndexNameSuffix(%v)", err) return } for _, v := range amd.indexNameSuffix { if aliasErr != nil { amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } else { amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping) } } } } // InitOffset insert init value to offset. func (amd *AppMultipleDatabus) InitOffset(c context.Context) { amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName) } // Offset . func (amd *AppMultipleDatabus) Offset(c context.Context) { for i, v := range amd.tableName { offset, err := amd.d.Offset(c, amd.attrs.AppID, v) if err != nil { log.Error("amd.d.offset error(%v)", err) time.Sleep(time.Second * 3) } amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime) amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime()) } } // SetRecover set recover func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) { amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime) } // IncrMessages . func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) { ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker))) defer ticker.Stop() for { select { case msg, ok := <-amd.dtb.Messages(): if !ok { log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus) break } m := &model.Message{} amd.commits[msg.Partition] = msg if err = json.Unmarshal(msg.Value, m); err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } if amd.attrs.Business == "creative_reply" { r, _ := regexp.Compile("reply_\\d+") if !r.MatchString(m.Table) { continue } } if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) || (amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) { if m.Action == "insert" || m.Action == "update" { var parseMap map[string]interface{} parseMap, err = amd.d.JSON2map(m.New) if err != nil { log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err) continue } // esports fav type filter if amd.attrs.AppID == "esports_fav" { if t, ok := parseMap["type"]; ok && t.(int64) != 10 { continue } } // playlist fav type and attr filter if amd.attrs.AppID == "fav_playlist" { if t, ok := parseMap["type"]; ok && t.(int64) != 2 { continue } if t, ok := parseMap["attr"]; ok { if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) { continue } } } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, m.Table, parseMap) if err != nil { if amd.attrs.AppID == "creative_reply" { continue } log.Error("amd.newParseMap error(%v)", err) continue } amd.mapData = append(amd.mapData, newParseMap) } } if len(amd.mapData) < amd.attrs.Databus.AggCount { continue } case <-ticker.C: } break } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "dtb") return } // AllMessages . func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) { amd.mapData = []model.MapData{} for i, v := range amd.tableName { var ( rows *xsql.Rows sql string ) tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",") if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i) } else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名 sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i) log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size) } else { sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v) } if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil { log.Error("AllMessages db.Query error(%v)", err) return } tempList := []model.MapData{} for rows.Next() { item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields) if err = rows.Scan(row...); err != nil { log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err) continue } var newParseMap map[string]interface{} newParseMap, err = amd.newParseMap(c, v, item) if err != nil { log.Error("amd.newParseMap error(%v)", err) continue } tempList = append(tempList, newParseMap) amd.mapData = append(amd.mapData, newParseMap) } rows.Close() tmpLength := len(tempList) if tmpLength > 0 { amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime()) } } if len(amd.mapData) > 0 { amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{}) } length = len(amd.mapData) //amd.d.extraData(c, amd, "db") return } // BulkIndex . func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) { partData := amd.mapData[start:end] if amd.d.c.Business.Index { err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...) } else { err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...) } return } // Commit . func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) { if amd.d.c.Business.Index { if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名 for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ { tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i) if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil { log.Error("AppMultipleDatabus.Commit error(%v)", err) continue } } } else { for i, v := range amd.indexNameSuffix { if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err) continue } } } } else { for k, c := range amd.commits { if err = c.Commit(); err != nil {
conditional_block
zz_generated.composition_transforms.go
return field.Required(field.NewPath("map"), "given transform type map requires configuration") } return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map")) case TransformTypeMatch: if t.Match == nil { return field.Required(field.NewPath("match"), "given transform type match requires configuration") } return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match")) case TransformTypeString: if t.String == nil { return field.Required(field.NewPath("string"), "given transform type string requires configuration") } return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string")) case TransformTypeConvert: if t.Convert == nil { return field.Required(field.NewPath("convert"), "given transform type convert requires configuration") } if err := t.Convert.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("convert")) } default: // Should never happen return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type") } return nil } // GetFormat returns the format of the transform. func (t *ConvertTransform) GetFormat() ConvertTransformFormat { if t.Format != nil { return *t.Format } return ConvertTransformFormatNone } // GetOutputType returns the output type of the transform. // It returns an error if the transform type is unknown. // It returns nil if the output type is not known. func (t *Transform) GetOutputType() (*TransformIOType, error) { var out TransformIOType switch t.Type { case TransformTypeMap, TransformTypeMatch: return nil, nil case TransformTypeMath: out = TransformIOTypeFloat64 case TransformTypeString: out = TransformIOTypeString case TransformTypeConvert: out = t.Convert.ToType default: return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type) } return &out, nil } // MathTransformType conducts mathematical operations. type MathTransformType string // Accepted MathTransformType. const ( MathTransformTypeMultiply MathTransformType = "Multiply" // Default MathTransformTypeClampMin MathTransformType = "ClampMin" MathTransformTypeClampMax MathTransformType = "ClampMax" ) // MathTransform conducts mathematical operations on the input with the given // configuration in its properties. type MathTransform struct { // Type of the math transform to be run. // +optional // +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax // +kubebuilder:default=Multiply Type MathTransformType `json:"type,omitempty"` // Multiply the value. // +optional Multiply *int64 `json:"multiply,omitempty"` // ClampMin makes sure that the value is not smaller than the given value. // +optional ClampMin *int64 `json:"clampMin,omitempty"` // ClampMax makes sure that the value is not bigger than the given value. // +optional ClampMax *int64 `json:"clampMax,omitempty"` } // GetType returns the type of the math transform, returning the default if not specified. func (m *MathTransform) GetType() MathTransformType { if m.Type == "" { return MathTransformTypeMultiply } return m.Type } // Validate checks this MathTransform is valid. func (m *MathTransform) Validate() *field.Error { switch m.GetType() { case MathTransformTypeMultiply: if m.Multiply == nil { return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified") } case MathTransformTypeClampMin: if m.ClampMin == nil { return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified") } case MathTransformTypeClampMax: if m.ClampMax == nil { return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type") } return nil } // MapTransform returns a value for the input from the given map. type MapTransform struct { // Pairs is the map that will be used for transform. // +optional Pairs map[string]extv1.JSON `json:",inline"` } // Validate checks this MapTransform is valid. func (m *MapTransform) Validate() *field.Error { if len(m.Pairs) == 0 { return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified") } return nil } // NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map // into a struct - doing so results in a seemingly successful unmarshal of the // data, but an empty map. We must keep the ,inline tag nevertheless in order to // trick the CRD generator into thinking MapTransform is an arbitrary map (i.e. // generating a validation schema with string additionalProperties), but the // actual marshalling is handled by the marshal methods below. // UnmarshalJSON into this MapTransform. func (m *MapTransform) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &m.Pairs) } // MarshalJSON from this MapTransform. func (m *MapTransform) MarshalJSON() ([]byte, error) { return json.Marshal(m.Pairs) } // MatchFallbackTo defines how a match operation will fallback. type MatchFallbackTo string // Valid MatchFallbackTo. const ( MatchFallbackToTypeValue MatchFallbackTo = "Value" MatchFallbackToTypeInput MatchFallbackTo = "Input" ) // MatchTransform is a more complex version of a map transform that matches a // list of patterns. type MatchTransform struct { // The patterns that should be tested against the input string. // Patterns are tested in order. The value of the first match is used as // result of this transform. Patterns []MatchTransformPattern `json:"patterns,omitempty"` // The fallback value that should be returned by the transform if now pattern // matches. FallbackValue extv1.JSON `json:"fallbackValue,omitempty"` // Determines to what value the transform should fallback if no pattern matches. // +optional // +kubebuilder:validation:Enum=Value;Input // +kubebuilder:default=Value FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"` } // Validate checks this MatchTransform is valid. func (m *MatchTransform) Validate() *field.Error { if len(m.Patterns) == 0 { return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified") } for i, p := range m.Patterns { if err := p.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i)) } } return nil } // MatchTransformPatternType defines the type of a MatchTransformPattern. type MatchTransformPatternType string
// MatchTransformPattern is a transform that returns the value that matches a // pattern. type MatchTransformPattern struct { // Type specifies how the pattern matches the input. // // * `literal` - the pattern value has to exactly match (case sensitive) the // input string. This is the default. // // * `regexp` - the pattern treated as a regular expression against // which the input string is tested. Crossplane will throw an error if the // key is not a valid regexp. // // +kubebuilder:validation:Enum=literal;regexp // +kubebuilder:default=literal Type MatchTransformPatternType `json:"type"` // Literal exactly matches the input string (case sensitive). // Is required if `type` is `literal`. Literal *string `json:"literal,omitempty"` // Regexp to match against the input string. // Is required if `type` is `regexp`. Regexp *string `json:"regexp,omitempty"` // The value that is used as result of the transform if the pattern matches. Result extv1.JSON `json:"result"` } // Validate checks this MatchTransformPattern is valid. func (m *MatchTransformPattern) Validate() *field.Error { switch m.Type { case MatchTransformPatternTypeLiteral, "": if m.Literal == nil { return field.Required(field.NewPath("literal"), "literal pattern type requires a literal") } case MatchTransformPatternTypeRegexp: if m.Regexp == nil { return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp") } if _, err := regexp.Compile(*m.Regexp); err != nil { return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type") } return nil } // StringTransformType transforms a string. type StringTransformType string // Accepted StringTransformTypes. const ( StringTransformTypeFormat StringTransformType = "Format" // Default StringTransformTypeConvert
// Valid MatchTransformPatternTypes. const ( MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal" MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp" )
random_line_split
zz_generated.composition_transforms.go
) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &m.Pairs) } // MarshalJSON from this MapTransform. func (m *MapTransform) MarshalJSON() ([]byte, error) { return json.Marshal(m.Pairs) } // MatchFallbackTo defines how a match operation will fallback. type MatchFallbackTo string // Valid MatchFallbackTo. const ( MatchFallbackToTypeValue MatchFallbackTo = "Value" MatchFallbackToTypeInput MatchFallbackTo = "Input" ) // MatchTransform is a more complex version of a map transform that matches a // list of patterns. type MatchTransform struct { // The patterns that should be tested against the input string. // Patterns are tested in order. The value of the first match is used as // result of this transform. Patterns []MatchTransformPattern `json:"patterns,omitempty"` // The fallback value that should be returned by the transform if now pattern // matches. FallbackValue extv1.JSON `json:"fallbackValue,omitempty"` // Determines to what value the transform should fallback if no pattern matches. // +optional // +kubebuilder:validation:Enum=Value;Input // +kubebuilder:default=Value FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"` } // Validate checks this MatchTransform is valid. func (m *MatchTransform) Validate() *field.Error { if len(m.Patterns) == 0 { return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified") } for i, p := range m.Patterns { if err := p.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i)) } } return nil } // MatchTransformPatternType defines the type of a MatchTransformPattern. type MatchTransformPatternType string // Valid MatchTransformPatternTypes. const ( MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal" MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp" ) // MatchTransformPattern is a transform that returns the value that matches a // pattern. type MatchTransformPattern struct { // Type specifies how the pattern matches the input. // // * `literal` - the pattern value has to exactly match (case sensitive) the // input string. This is the default. // // * `regexp` - the pattern treated as a regular expression against // which the input string is tested. Crossplane will throw an error if the // key is not a valid regexp. // // +kubebuilder:validation:Enum=literal;regexp // +kubebuilder:default=literal Type MatchTransformPatternType `json:"type"` // Literal exactly matches the input string (case sensitive). // Is required if `type` is `literal`. Literal *string `json:"literal,omitempty"` // Regexp to match against the input string. // Is required if `type` is `regexp`. Regexp *string `json:"regexp,omitempty"` // The value that is used as result of the transform if the pattern matches. Result extv1.JSON `json:"result"` } // Validate checks this MatchTransformPattern is valid. func (m *MatchTransformPattern) Validate() *field.Error { switch m.Type { case MatchTransformPatternTypeLiteral, "": if m.Literal == nil { return field.Required(field.NewPath("literal"), "literal pattern type requires a literal") } case MatchTransformPatternTypeRegexp: if m.Regexp == nil { return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp") } if _, err := regexp.Compile(*m.Regexp); err != nil { return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type") } return nil } // StringTransformType transforms a string. type StringTransformType string // Accepted StringTransformTypes. const ( StringTransformTypeFormat StringTransformType = "Format" // Default StringTransformTypeConvert StringTransformType = "Convert" StringTransformTypeTrimPrefix StringTransformType = "TrimPrefix" StringTransformTypeTrimSuffix StringTransformType = "TrimSuffix" StringTransformTypeRegexp StringTransformType = "Regexp" ) // StringConversionType converts a string. type StringConversionType string // Accepted StringConversionTypes. const ( StringConversionTypeToUpper StringConversionType = "ToUpper" StringConversionTypeToLower StringConversionType = "ToLower" StringConversionTypeToJSON StringConversionType = "ToJson" StringConversionTypeToBase64 StringConversionType = "ToBase64" StringConversionTypeFromBase64 StringConversionType = "FromBase64" StringConversionTypeToSHA1 StringConversionType = "ToSha1" StringConversionTypeToSHA256 StringConversionType = "ToSha256" StringConversionTypeToSHA512 StringConversionType = "ToSha512" StringConversionTypeToAdler32 StringConversionType = "ToAdler32" ) // A StringTransform returns a string given the supplied input. type StringTransform struct { // Type of the string transform to be run. // +optional // +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp // +kubebuilder:default=Format Type StringTransformType `json:"type,omitempty"` // Format the input using a Go format string. See // https://golang.org/pkg/fmt/ for details. // +optional Format *string `json:"fmt,omitempty"` // Optional conversion method to be specified. // `ToUpper` and `ToLower` change the letter case of the input string. // `ToBase64` and `FromBase64` perform a base64 conversion based on the input string. // `ToJson` converts any input value into its raw JSON representation. // `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input // converted to JSON. // +optional // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512 Convert *StringConversionType `json:"convert,omitempty"` // Trim the prefix or suffix from the input // +optional Trim *string `json:"trim,omitempty"` // Extract a match from the input using a regular expression. // +optional Regexp *StringTransformRegexp `json:"regexp,omitempty"` } // Validate checks this StringTransform is valid. // //nolint:gocyclo // just a switch func (s *StringTransform) Validate() *field.Error { switch s.Type { case StringTransformTypeFormat, "": if s.Format == nil { return field.Required(field.NewPath("fmt"), "format transform requires a format") } case StringTransformTypeConvert: if s.Convert == nil { return field.Required(field.NewPath("convert"), "convert transform requires a conversion type") } case StringTransformTypeTrimPrefix, StringTransformTypeTrimSuffix: if s.Trim == nil { return field.Required(field.NewPath("trim"), "trim transform requires a trim value") } case StringTransformTypeRegexp: if s.Regexp == nil { return field.Required(field.NewPath("regexp"), "regexp transform requires a regexp") } if s.Regexp.Match == "" { return field.Required(field.NewPath("regexp", "match"), "regexp transform requires a match") } if _, err := regexp.Compile(s.Regexp.Match); err != nil { return field.Invalid(field.NewPath("regexp", "match"), s.Regexp.Match, "invalid regexp") } default: return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type") } return nil } // A StringTransformRegexp extracts a match from the input using a regular // expression. type StringTransformRegexp struct { // Match string. May optionally include submatches, aka capture groups. // See https://pkg.go.dev/regexp/ for details. Match string `json:"match"` // Group number to match. 0 (the default) matches the entire expression. // +optional Group *int `json:"group,omitempty"` } // TransformIOType defines the type of a ConvertTransform. type TransformIOType string // The list of supported Transform input and output types. const ( TransformIOTypeString TransformIOType = "string" TransformIOTypeBool TransformIOType = "bool" TransformIOTypeInt TransformIOType = "int" TransformIOTypeInt64 TransformIOType = "int64" TransformIOTypeFloat64 TransformIOType = "float64" TransformIOTypeObject TransformIOType = "object" TransformIOTypeArray TransformIOType = "array" ) // IsValid checks if the given TransformIOType is valid. func (c TransformIOType) IsValid() bool
{ switch c { case TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray: return true } return false }
identifier_body
zz_generated.composition_transforms.go
return field.Required(field.NewPath("map"), "given transform type map requires configuration") } return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map")) case TransformTypeMatch: if t.Match == nil { return field.Required(field.NewPath("match"), "given transform type match requires configuration") } return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match")) case TransformTypeString: if t.String == nil { return field.Required(field.NewPath("string"), "given transform type string requires configuration") } return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string")) case TransformTypeConvert: if t.Convert == nil { return field.Required(field.NewPath("convert"), "given transform type convert requires configuration") } if err := t.Convert.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("convert")) } default: // Should never happen return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type") } return nil } // GetFormat returns the format of the transform. func (t *ConvertTransform) GetFormat() ConvertTransformFormat { if t.Format != nil { return *t.Format } return ConvertTransformFormatNone } // GetOutputType returns the output type of the transform. // It returns an error if the transform type is unknown. // It returns nil if the output type is not known. func (t *Transform) GetOutputType() (*TransformIOType, error) { var out TransformIOType switch t.Type { case TransformTypeMap, TransformTypeMatch: return nil, nil case TransformTypeMath: out = TransformIOTypeFloat64 case TransformTypeString: out = TransformIOTypeString case TransformTypeConvert: out = t.Convert.ToType default: return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type) } return &out, nil } // MathTransformType conducts mathematical operations. type MathTransformType string // Accepted MathTransformType. const ( MathTransformTypeMultiply MathTransformType = "Multiply" // Default MathTransformTypeClampMin MathTransformType = "ClampMin" MathTransformTypeClampMax MathTransformType = "ClampMax" ) // MathTransform conducts mathematical operations on the input with the given // configuration in its properties. type MathTransform struct { // Type of the math transform to be run. // +optional // +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax // +kubebuilder:default=Multiply Type MathTransformType `json:"type,omitempty"` // Multiply the value. // +optional Multiply *int64 `json:"multiply,omitempty"` // ClampMin makes sure that the value is not smaller than the given value. // +optional ClampMin *int64 `json:"clampMin,omitempty"` // ClampMax makes sure that the value is not bigger than the given value. // +optional ClampMax *int64 `json:"clampMax,omitempty"` } // GetType returns the type of the math transform, returning the default if not specified. func (m *MathTransform)
() MathTransformType { if m.Type == "" { return MathTransformTypeMultiply } return m.Type } // Validate checks this MathTransform is valid. func (m *MathTransform) Validate() *field.Error { switch m.GetType() { case MathTransformTypeMultiply: if m.Multiply == nil { return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified") } case MathTransformTypeClampMin: if m.ClampMin == nil { return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified") } case MathTransformTypeClampMax: if m.ClampMax == nil { return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type") } return nil } // MapTransform returns a value for the input from the given map. type MapTransform struct { // Pairs is the map that will be used for transform. // +optional Pairs map[string]extv1.JSON `json:",inline"` } // Validate checks this MapTransform is valid. func (m *MapTransform) Validate() *field.Error { if len(m.Pairs) == 0 { return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified") } return nil } // NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map // into a struct - doing so results in a seemingly successful unmarshal of the // data, but an empty map. We must keep the ,inline tag nevertheless in order to // trick the CRD generator into thinking MapTransform is an arbitrary map (i.e. // generating a validation schema with string additionalProperties), but the // actual marshalling is handled by the marshal methods below. // UnmarshalJSON into this MapTransform. func (m *MapTransform) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &m.Pairs) } // MarshalJSON from this MapTransform. func (m *MapTransform) MarshalJSON() ([]byte, error) { return json.Marshal(m.Pairs) } // MatchFallbackTo defines how a match operation will fallback. type MatchFallbackTo string // Valid MatchFallbackTo. const ( MatchFallbackToTypeValue MatchFallbackTo = "Value" MatchFallbackToTypeInput MatchFallbackTo = "Input" ) // MatchTransform is a more complex version of a map transform that matches a // list of patterns. type MatchTransform struct { // The patterns that should be tested against the input string. // Patterns are tested in order. The value of the first match is used as // result of this transform. Patterns []MatchTransformPattern `json:"patterns,omitempty"` // The fallback value that should be returned by the transform if now pattern // matches. FallbackValue extv1.JSON `json:"fallbackValue,omitempty"` // Determines to what value the transform should fallback if no pattern matches. // +optional // +kubebuilder:validation:Enum=Value;Input // +kubebuilder:default=Value FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"` } // Validate checks this MatchTransform is valid. func (m *MatchTransform) Validate() *field.Error { if len(m.Patterns) == 0 { return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified") } for i, p := range m.Patterns { if err := p.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i)) } } return nil } // MatchTransformPatternType defines the type of a MatchTransformPattern. type MatchTransformPatternType string // Valid MatchTransformPatternTypes. const ( MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal" MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp" ) // MatchTransformPattern is a transform that returns the value that matches a // pattern. type MatchTransformPattern struct { // Type specifies how the pattern matches the input. // // * `literal` - the pattern value has to exactly match (case sensitive) the // input string. This is the default. // // * `regexp` - the pattern treated as a regular expression against // which the input string is tested. Crossplane will throw an error if the // key is not a valid regexp. // // +kubebuilder:validation:Enum=literal;regexp // +kubebuilder:default=literal Type MatchTransformPatternType `json:"type"` // Literal exactly matches the input string (case sensitive). // Is required if `type` is `literal`. Literal *string `json:"literal,omitempty"` // Regexp to match against the input string. // Is required if `type` is `regexp`. Regexp *string `json:"regexp,omitempty"` // The value that is used as result of the transform if the pattern matches. Result extv1.JSON `json:"result"` } // Validate checks this MatchTransformPattern is valid. func (m *MatchTransformPattern) Validate() *field.Error { switch m.Type { case MatchTransformPatternTypeLiteral, "": if m.Literal == nil { return field.Required(field.NewPath("literal"), "literal pattern type requires a literal") } case MatchTransformPatternTypeRegexp: if m.Regexp == nil { return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp") } if _, err := regexp.Compile(*m.Regexp); err != nil { return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type") } return nil } // StringTransformType transforms a string. type StringTransformType string // Accepted StringTransformTypes. const ( StringTransformTypeFormat StringTransformType = "Format" // Default StringTransformTypeConvert
GetType
identifier_name
zz_generated.composition_transforms.go
return field.Required(field.NewPath("map"), "given transform type map requires configuration") } return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map")) case TransformTypeMatch: if t.Match == nil { return field.Required(field.NewPath("match"), "given transform type match requires configuration") } return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match")) case TransformTypeString: if t.String == nil { return field.Required(field.NewPath("string"), "given transform type string requires configuration") } return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string")) case TransformTypeConvert: if t.Convert == nil { return field.Required(field.NewPath("convert"), "given transform type convert requires configuration") } if err := t.Convert.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("convert")) } default: // Should never happen return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type") } return nil } // GetFormat returns the format of the transform. func (t *ConvertTransform) GetFormat() ConvertTransformFormat { if t.Format != nil { return *t.Format } return ConvertTransformFormatNone } // GetOutputType returns the output type of the transform. // It returns an error if the transform type is unknown. // It returns nil if the output type is not known. func (t *Transform) GetOutputType() (*TransformIOType, error) { var out TransformIOType switch t.Type { case TransformTypeMap, TransformTypeMatch: return nil, nil case TransformTypeMath: out = TransformIOTypeFloat64 case TransformTypeString: out = TransformIOTypeString case TransformTypeConvert: out = t.Convert.ToType default: return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type) } return &out, nil } // MathTransformType conducts mathematical operations. type MathTransformType string // Accepted MathTransformType. const ( MathTransformTypeMultiply MathTransformType = "Multiply" // Default MathTransformTypeClampMin MathTransformType = "ClampMin" MathTransformTypeClampMax MathTransformType = "ClampMax" ) // MathTransform conducts mathematical operations on the input with the given // configuration in its properties. type MathTransform struct { // Type of the math transform to be run. // +optional // +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax // +kubebuilder:default=Multiply Type MathTransformType `json:"type,omitempty"` // Multiply the value. // +optional Multiply *int64 `json:"multiply,omitempty"` // ClampMin makes sure that the value is not smaller than the given value. // +optional ClampMin *int64 `json:"clampMin,omitempty"` // ClampMax makes sure that the value is not bigger than the given value. // +optional ClampMax *int64 `json:"clampMax,omitempty"` } // GetType returns the type of the math transform, returning the default if not specified. func (m *MathTransform) GetType() MathTransformType { if m.Type == ""
return m.Type } // Validate checks this MathTransform is valid. func (m *MathTransform) Validate() *field.Error { switch m.GetType() { case MathTransformTypeMultiply: if m.Multiply == nil { return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified") } case MathTransformTypeClampMin: if m.ClampMin == nil { return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified") } case MathTransformTypeClampMax: if m.ClampMax == nil { return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type") } return nil } // MapTransform returns a value for the input from the given map. type MapTransform struct { // Pairs is the map that will be used for transform. // +optional Pairs map[string]extv1.JSON `json:",inline"` } // Validate checks this MapTransform is valid. func (m *MapTransform) Validate() *field.Error { if len(m.Pairs) == 0 { return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified") } return nil } // NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map // into a struct - doing so results in a seemingly successful unmarshal of the // data, but an empty map. We must keep the ,inline tag nevertheless in order to // trick the CRD generator into thinking MapTransform is an arbitrary map (i.e. // generating a validation schema with string additionalProperties), but the // actual marshalling is handled by the marshal methods below. // UnmarshalJSON into this MapTransform. func (m *MapTransform) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &m.Pairs) } // MarshalJSON from this MapTransform. func (m *MapTransform) MarshalJSON() ([]byte, error) { return json.Marshal(m.Pairs) } // MatchFallbackTo defines how a match operation will fallback. type MatchFallbackTo string // Valid MatchFallbackTo. const ( MatchFallbackToTypeValue MatchFallbackTo = "Value" MatchFallbackToTypeInput MatchFallbackTo = "Input" ) // MatchTransform is a more complex version of a map transform that matches a // list of patterns. type MatchTransform struct { // The patterns that should be tested against the input string. // Patterns are tested in order. The value of the first match is used as // result of this transform. Patterns []MatchTransformPattern `json:"patterns,omitempty"` // The fallback value that should be returned by the transform if now pattern // matches. FallbackValue extv1.JSON `json:"fallbackValue,omitempty"` // Determines to what value the transform should fallback if no pattern matches. // +optional // +kubebuilder:validation:Enum=Value;Input // +kubebuilder:default=Value FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"` } // Validate checks this MatchTransform is valid. func (m *MatchTransform) Validate() *field.Error { if len(m.Patterns) == 0 { return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified") } for i, p := range m.Patterns { if err := p.Validate(); err != nil { return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i)) } } return nil } // MatchTransformPatternType defines the type of a MatchTransformPattern. type MatchTransformPatternType string // Valid MatchTransformPatternTypes. const ( MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal" MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp" ) // MatchTransformPattern is a transform that returns the value that matches a // pattern. type MatchTransformPattern struct { // Type specifies how the pattern matches the input. // // * `literal` - the pattern value has to exactly match (case sensitive) the // input string. This is the default. // // * `regexp` - the pattern treated as a regular expression against // which the input string is tested. Crossplane will throw an error if the // key is not a valid regexp. // // +kubebuilder:validation:Enum=literal;regexp // +kubebuilder:default=literal Type MatchTransformPatternType `json:"type"` // Literal exactly matches the input string (case sensitive). // Is required if `type` is `literal`. Literal *string `json:"literal,omitempty"` // Regexp to match against the input string. // Is required if `type` is `regexp`. Regexp *string `json:"regexp,omitempty"` // The value that is used as result of the transform if the pattern matches. Result extv1.JSON `json:"result"` } // Validate checks this MatchTransformPattern is valid. func (m *MatchTransformPattern) Validate() *field.Error { switch m.Type { case MatchTransformPatternTypeLiteral, "": if m.Literal == nil { return field.Required(field.NewPath("literal"), "literal pattern type requires a literal") } case MatchTransformPatternTypeRegexp: if m.Regexp == nil { return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp") } if _, err := regexp.Compile(*m.Regexp); err != nil { return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp") } default: return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type") } return nil } // StringTransformType transforms a string. type StringTransformType string // Accepted StringTransformTypes. const ( StringTransformTypeFormat StringTransformType = "Format" // Default StringTransformType
{ return MathTransformTypeMultiply }
conditional_block
input.py
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # prompt and confirm based on https://github.com/pallets/click # Copyright 2014 Pallets # | Redistribution and use in source and binary forms, with or without modification, # | are permitted provided that the following conditions are met: # | # | * Redistributions of source code must retain the above copyright notice, # | this list of conditions and the following disclaimer. # | * Redistributions in binary form must reproduce the above copyright notice, # | this list of conditions and the following disclaimer in the documentation # | and/or other materials provided with the distribution. # | * Neither the name of the copyright holder nor the names of its contributors # | may be used to endorse or promote products derived from this software without # | specific prior written permission. # | # | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # | # # stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy # PyPy Copyright holders 2003-2020 # MIT Licenced # # stdlib import sys from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload # 3rd party import click from click.termui import _build_prompt, hidden_prompt_func from click.types import Path, convert_type # this package from consolekit._types import _ConvertibleType from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa __all__ = [ "prompt", "confirm", "stderr_input", "choice", ] if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover try: # stdlib import readline readline.set_history_length(0) readline.set_auto_history(False) except (ImportError, AttributeError): # Attribute error on PyPy, ImportError on Windows etc. pass def prompt( text: str, default: Optional[str] = None, hide_input: bool = False, confirmation_prompt: bool = False, type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin value_proc: Optional[Callable[[Optional[str]], Any]] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, show_choices: bool = True, ): """ Prompts a user for input. If the user aborts the input by sending an interrupt signal, this function will catch it and raise a :exc:`click.Abort` exception. :param text: The text to show for the prompt. :param default: The default value to use if no input happens. If this is not given it will prompt until it is aborted. :param hide_input: If :py:obj:`True` then the input value will be hidden. :param confirmation_prompt: Asks for confirmation for the value. :param type: The type to check the value against. :param value_proc: If this parameter is provided it must be a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with :func:`click.echo`. :param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`. For example, if the choice is either ``day`` or ``week``, ``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the prompt will be ``'Group by (day, week): '``. """ result = None # noqa def prompt_func(text): try: return _prompt(text, err=err, hide_input=hide_input) except (KeyboardInterrupt, EOFError): if hide_input: click.echo(None, err=err) raise click.Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore while True: while True: value = prompt_func(prompt) if value: break elif default is not None: if isinstance(value_proc, Path): # validate Path default value (exists, dir_okay etc.) value = default break return default try: result = value_proc(value) except click.UsageError as e: click.echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: return result while True: value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result click.echo("Error: the two entered values do not match", err=err) def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): """ Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`click.Abort` exception. :param text: The question to ask. :param default: The default for the prompt. :param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv def s
prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover """ Read a string from standard input, but prompt to standard error. The trailing newline is stripped. If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`. On Unix, GNU readline is used if enabled. The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading. """ if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line def _prompt(text, err: bool, hide_input: bool): if sys.platform != "linux": # Write the prompt separately so that we get nice # coloring through colorama on Windows click.echo(text, nl=False, err=err) text = '' if hide_input: return hidden_prompt_func(text) elif err: return stderr_input(text, file=sys.stderr) else: return click.termui.visible_prompt_func(text) # type: ignore @overload def choice( options: List[str],
tderr_input(
identifier_name
input.py
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # prompt and confirm based on https://github.com/pallets/click # Copyright 2014 Pallets # | Redistribution and use in source and binary forms, with or without modification, # | are permitted provided that the following conditions are met: # | # | * Redistributions of source code must retain the above copyright notice, # | this list of conditions and the following disclaimer. # | * Redistributions in binary form must reproduce the above copyright notice, # | this list of conditions and the following disclaimer in the documentation # | and/or other materials provided with the distribution. # | * Neither the name of the copyright holder nor the names of its contributors # | may be used to endorse or promote products derived from this software without # | specific prior written permission. # | # | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # | # # stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy # PyPy Copyright holders 2003-2020 # MIT Licenced # # stdlib import sys from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload # 3rd party import click from click.termui import _build_prompt, hidden_prompt_func from click.types import Path, convert_type # this package from consolekit._types import _ConvertibleType from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa __all__ = [ "prompt", "confirm", "stderr_input", "choice", ] if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover try: # stdlib import readline readline.set_history_length(0) readline.set_auto_history(False) except (ImportError, AttributeError): # Attribute error on PyPy, ImportError on Windows etc. pass def prompt( text: str, default: Optional[str] = None, hide_input: bool = False, confirmation_prompt: bool = False, type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin value_proc: Optional[Callable[[Optional[str]], Any]] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, show_choices: bool = True, ): """ Prompts a user for input. If the user aborts the input by sending an interrupt signal, this function will catch it and raise a :exc:`click.Abort` exception. :param text: The text to show for the prompt. :param default: The default value to use if no input happens. If this is not given it will prompt until it is aborted. :param hide_input: If :py:obj:`True` then the input value will be hidden. :param confirmation_prompt: Asks for confirmation for the value. :param type: The type to check the value against. :param value_proc: If this parameter is provided it must be a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with :func:`click.echo`. :param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`. For example, if the choice is either ``day`` or ``week``, ``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the prompt will be ``'Group by (day, week): '``. """ result = None # noqa def prompt_func(text): try: return _prompt(text, err=err, hide_input=hide_input) except (KeyboardInterrupt, EOFError): if hide_input: click.echo(None, err=err) raise click.Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore while True: while True: value = prompt_func(prompt) if value: break elif default is not None: if isinstance(value_proc, Path): # validate Path default value (exists, dir_okay etc.) value = default break return default try: result = value_proc(value) except click.UsageError as e: click.echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: r
while True: value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result click.echo("Error: the two entered values do not match", err=err) def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): """ Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`click.Abort` exception. :param text: The question to ask. :param default: The default for the prompt. :param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover """ Read a string from standard input, but prompt to standard error. The trailing newline is stripped. If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`. On Unix, GNU readline is used if enabled. The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading. """ if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line def _prompt(text, err: bool, hide_input: bool): if sys.platform != "linux": # Write the prompt separately so that we get nice # coloring through colorama on Windows click.echo(text, nl=False, err=err) text = '' if hide_input: return hidden_prompt_func(text) elif err: return stderr_input(text, file=sys.stderr) else: return click.termui.visible_prompt_func(text) # type: ignore @overload def choice( options: List[str],
eturn result
conditional_block
input.py
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # prompt and confirm based on https://github.com/pallets/click # Copyright 2014 Pallets # | Redistribution and use in source and binary forms, with or without modification, # | are permitted provided that the following conditions are met: # | # | * Redistributions of source code must retain the above copyright notice, # | this list of conditions and the following disclaimer. # | * Redistributions in binary form must reproduce the above copyright notice, # | this list of conditions and the following disclaimer in the documentation # | and/or other materials provided with the distribution. # | * Neither the name of the copyright holder nor the names of its contributors # | may be used to endorse or promote products derived from this software without # | specific prior written permission. # | # | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # | # # stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy # PyPy Copyright holders 2003-2020 # MIT Licenced # # stdlib import sys from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload # 3rd party import click from click.termui import _build_prompt, hidden_prompt_func from click.types import Path, convert_type # this package from consolekit._types import _ConvertibleType from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa __all__ = [ "prompt", "confirm", "stderr_input", "choice", ] if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover try: # stdlib import readline readline.set_history_length(0) readline.set_auto_history(False) except (ImportError, AttributeError): # Attribute error on PyPy, ImportError on Windows etc. pass def prompt( text: str, default: Optional[str] = None, hide_input: bool = False, confirmation_prompt: bool = False, type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin value_proc: Optional[Callable[[Optional[str]], Any]] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, show_choices: bool = True, ): """ Prompts a user for input. If the user aborts the input by sending an interrupt signal, this function will catch it and raise a :exc:`click.Abort` exception. :param text: The text to show for the prompt. :param default: The default value to use if no input happens. If this is not given it will prompt until it is aborted. :param hide_input: If :py:obj:`True` then the input value will be hidden. :param confirmation_prompt: Asks for confirmation for the value. :param type: The type to check the value against. :param value_proc: If this parameter is provided it must be a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with :func:`click.echo`. :param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`. For example, if the choice is either ``day`` or ``week``, ``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the prompt will be ``'Group by (day, week): '``. """ result = None # noqa def prompt_func(text): try: return _prompt(text, err=err, hide_input=hide_input) except (KeyboardInterrupt, EOFError): if hide_input: click.echo(None, err=err) raise click.Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore while True: while True: value = prompt_func(prompt) if value: break elif default is not None: if isinstance(value_proc, Path): # validate Path default value (exists, dir_okay etc.) value = default break return default try: result = value_proc(value) except click.UsageError as e: click.echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: return result while True: value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result click.echo("Error: the two entered values do not match", err=err) def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): """ Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`click.Abort` exception. :param text: The question to ask. :param default: The default for the prompt. :param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover """ Read a string from standard input, but prompt to standard error. The trailing newline is stripped. If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`. On Unix, GNU readline is used if enabled. The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading. """ if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line def _prompt(text, err: bool, hide_input: bool): i
@overload def choice( options: List[str],
f sys.platform != "linux": # Write the prompt separately so that we get nice # coloring through colorama on Windows click.echo(text, nl=False, err=err) text = '' if hide_input: return hidden_prompt_func(text) elif err: return stderr_input(text, file=sys.stderr) else: return click.termui.visible_prompt_func(text) # type: ignore
identifier_body
input.py
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # prompt and confirm based on https://github.com/pallets/click # Copyright 2014 Pallets # | Redistribution and use in source and binary forms, with or without modification, # | are permitted provided that the following conditions are met: # | # | * Redistributions of source code must retain the above copyright notice, # | this list of conditions and the following disclaimer. # | * Redistributions in binary form must reproduce the above copyright notice, # | this list of conditions and the following disclaimer in the documentation # | and/or other materials provided with the distribution. # | * Neither the name of the copyright holder nor the names of its contributors # | may be used to endorse or promote products derived from this software without # | specific prior written permission. # | # | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # | # # stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy # PyPy Copyright holders 2003-2020 # MIT Licenced # # stdlib import sys from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload # 3rd party import click from click.termui import _build_prompt, hidden_prompt_func from click.types import Path, convert_type # this package from consolekit._types import _ConvertibleType from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa __all__ = [ "prompt", "confirm", "stderr_input", "choice", ] if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover try: # stdlib import readline readline.set_history_length(0) readline.set_auto_history(False) except (ImportError, AttributeError): # Attribute error on PyPy, ImportError on Windows etc. pass def prompt( text: str, default: Optional[str] = None, hide_input: bool = False, confirmation_prompt: bool = False, type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin value_proc: Optional[Callable[[Optional[str]], Any]] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, show_choices: bool = True, ): """ Prompts a user for input. If the user aborts the input by sending an interrupt signal, this function will catch it and raise a :exc:`click.Abort` exception. :param text: The text to show for the prompt. :param default: The default value to use if no input happens. If this is not given it will prompt until it is aborted. :param hide_input: If :py:obj:`True` then the input value will be hidden. :param confirmation_prompt: Asks for confirmation for the value. :param type: The type to check the value against. :param value_proc: If this parameter is provided it must be a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with :func:`click.echo`. :param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`. For example, if the choice is either ``day`` or ``week``, ``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the prompt will be ``'Group by (day, week): '``. """ result = None # noqa def prompt_func(text): try: return _prompt(text, err=err, hide_input=hide_input) except (KeyboardInterrupt, EOFError): if hide_input: click.echo(None, err=err) raise click.Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore while True: while True: value = prompt_func(prompt) if value: break elif default is not None: if isinstance(value_proc, Path): # validate Path default value (exists, dir_okay etc.) value = default break return default try: result = value_proc(value) except click.UsageError as e: click.echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: return result while True: value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result click.echo("Error: the two entered values do not match", err=err) def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): """ Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`click.Abort` exception. :param text: The question to ask. :param default: The default for the prompt. :param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`. :param prompt_suffix: A suffix that should be added to the prompt. :param show_default: Shows or hides the default value in the prompt. :param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover """ Read a string from standard input, but prompt to standard error. The trailing newline is stripped. If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`. On Unix, GNU readline is used if enabled. The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading. """ if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n'
return line[:-1] return line def _prompt(text, err: bool, hide_input: bool): if sys.platform != "linux": # Write the prompt separately so that we get nice # coloring through colorama on Windows click.echo(text, nl=False, err=err) text = '' if hide_input: return hidden_prompt_func(text) elif err: return stderr_input(text, file=sys.stderr) else: return click.termui.visible_prompt_func(text) # type: ignore @overload def choice( options: List[str], text
raise EOFError elif line[-1] == '\n':
random_line_split
main.go
rrg.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-pretensor config error.") } // Create a new redis-pretensor connection pool redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConnPretensor, err = redisPretensorPool.Dial() if err != nil { logger.Fatal("Could not connect to redis-pretensor Redis") } rd4 := redisconf{} if *d4 { // Check redis-d4 configuration // Parse Input Redis Config tmp = config.ReadConfigFile(*confdir, "redis_d4") ss = strings.Split(string(tmp), "/") if len(ss) <= 1 { logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping") checkredisd4 = false } else { rd4.databasename = ss[1] ret, ss[0] = config.IsNet(ss[0]) if ret { sss := strings.Split(string(ss[0]), ":") rrg.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-d4 config error.") } // Create a new redis-graph connection pool redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConn, err = redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } // Get that the redis_d4_queue file redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue")) } } // Checking that the log folder exists log_folder := string(config.ReadConfigFile(*confdir, "folder")) _, err = os.ReadDir(log_folder) if err != nil { logger.Println(err) walk_folder = false } // Loading Requests to monitor tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor") // Loading proxy list to remove from Hosts mitm = config.ReadConfigFileLines(*confdir, "mitm") // Init maps curls = make(map[string]string) bashs = make(map[string]*pretensorhit.PHit) binurls = make(map[string]*pretensorhit.PHit) // Init redis graph graph := rg.GraphNew("pretensor", redisConnPretensor) if *delete { graph.Delete() } // Create processing channels binchan = make(chan bindesc, 2000) bashchan = make(chan bindesc, 2000) // Unbuffered channel for the parser filechan = make(chan filedesc) wg.Add(3) // Launch the download routine go downloadBin(binchan, sortie) // Write no ELF files to files go writeBashs(bashchan, sortie) // Launch the Pretensor routine // Leaving the existing redis connection to pretensorParse go pretensorParse(filechan, sortie, &graph) // Walking folder err = filepath.Walk(log_folder, func(path string, info os.FileInfo, err error) error { filechan <- filedesc{path: path, info: info} if err != nil { return err } if *verbose { logger.Println(info.Name(), info.Size()) } return nil }) if checkredisd4 && *d4 { redisConnD4, err := redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil { redisConnD4.Close() logger.Println(err) return } // Once the walk is over, we start listening to d4 to get new files rateLimiter := time.Tick(*rate) redisNormal: err = redisRead(redisConnD4, redisd4Queue) for { select { case <-rateLimiter: // Use the ratelimiter while the connection hangs logger.Println("Limited read") goto redisNormal case <-sortie: goto gtfo } } } //// Write curl commands to a file //for _, v := range curls { // f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) // if err != nil { // logger.Println(err) // } // if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil { // f.Close() // logger.Println(err) // } // if err := f.Close(); err != nil { // logger.Println(err) // } //} // Waiting for the binary handling routines wg.Wait() gtfo: logger.Println("Exiting") } func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error { for { buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue)) // If redis return empty: EOF (user should not stop) if err == redis.ErrNil { // no new record we break until the tick return io.EOF // oops } else if err != nil { logger.Println(err) return err } fileinfo, err := os.Stat(buf) if err != nil { logger.Println(err) return err } filechan <- filedesc{path: buf, info: fileinfo} } } // Parsing whatever is thrown into filechan func
(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error { logger.Println("Entering pretensorparse") defer wg.Done() for { select { case file := <-filechan: if *debug { logger.Println(file.path) } info := file.info path := file.path if !info.IsDir() { content, err := os.ReadFile(path) if err != nil { return err } if len(content) == 0 { if *debug { logger.Println("Empty File: " + path) } break } // Load JSON contents := string(content) if !gj.Valid(contents) { if *debug { logger.Println("Invalid json: " + path) } break } // For each request to monitor for _, v := range tomonitor { request := gj.Get(contents, "request.request_line") if strings.Contains(request.String(), string(v)) { // We are in a file of interest tmp := new(pretensorhit.PHit) tmp.SetTimestamp(gj.Get(contents, "transaction.time")) tmp.SetIp(gj.Get(contents, "transaction.remote_address")) tmp.SetLine(gj.Get(contents, "request.request_line")) tmp.SetReferer(gj.Get(contents, "request.headers.Referer")) tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent")) tmp.SetStatus(gj.Get(contents, "response.status")) tmp.SetBody(gj.Get(contents, "response.body")) tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type")) tmp.SetLength(gj.Get(contents, "response.headers.Content-Length")) tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host"))) // Complete the graph // Create bot if not exist query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen` result, err := graph.Query(query) if err != nil { fmt.Println(err) fmt.Println(result) } if result.Empty() { if *debug { logger.Println(tmp.GetBotNode()) } graph.AddNode(tmp.GetBotNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } // Update Firstseen / Lastseen if already seen } else { result.Next() r := result.Record() fsstr, _ := r.Get("b.firstseen") lsstr, _ := r.Get("b.lastseen") fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr)) ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr)) if tmp.GetParsedTimeStamp().Before(fs) { query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) SET b.firstseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } if tmp.GetParsedTimeStamp().After(ls) { query = `
pretensorParse
identifier_name
main.go
} } if tmp.GetParsedTimeStamp().After(ls) { query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) SET b.lastseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } } // Create CC if not exist query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) RETURN c.host, c.firstseen, c.lastseen` result, err = graph.Query(query) if err != nil { fmt.Println(err) } if result.Empty() { graph.AddNode(tmp.GetCCNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } // Update Firstseen / Lastseen if already seen } else { result.Next() r := result.Record() fsstr, _ := r.Get("c.firstseen") lsstr, _ := r.Get("c.lastseen") fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr)) ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr)) if tmp.GetParsedTimeStamp().Before(fs) { query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) SET c.firstseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } if tmp.GetParsedTimeStamp().After(ls) { query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) SET c.lastseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } } // Use Merge to create the relationship between the bot and the CC query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) MATCH (c:CC {host:"` + tmp.GetHost() + `"}) MERGE (b)-[r:reach {name: "reach"}]->(c)` result, err = graph.Query(query) if err != nil { fmt.Println(err) } // If the bot downloaded a binary if tmp.GetContenttype() == "application/octet-stream" && tmp.GetStatus() == "200" { // Logging all bash scripts and curl commands to download binaries if strings.Contains(fmt.Sprintf("%v", tmp.GetBody()), "ELF") { //tmpsha256 := sha256.Sum256([]byte(tmp.Curl())) //curls[fmt.Sprintf("%x", tmpsha256)] = tmp.Curl() binchan <- bindesc{url: tmp.GetBinurl(), phit: tmp} } else { bashchan <- bindesc{url: tmp.GetBinurl(), phit: tmp} } // Create binary if not exist query := `MATCH (bin` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin` // The following is causing a panic -- it looks like a redigo issue //query := `MATCH (bin:Binary` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin` qres, err := graph.Query(query) if err != nil { fmt.Println(err) } if qres.Empty() { //fmt.Println("Add binary: "+tmp.GetBinaryMatchQuerySelector()) graph.AddNode(tmp.GetBinaryNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } } // Use Merge to create the relationship bot, binaries and CC query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) MATCH (c:CC {host:"` + tmp.GetHost() + `"}) MATCH (bin:Binary ` + tmp.GetBinaryMatchQuerySelector() + `) MERGE (b)-[d:download {name: "download"}]->(bin) MERGE (c)-[h:host {name: "host"}]->(bin)` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } // Bot set a referer command if tmp.GetCmdRawCommand() != "" { // First we update what we know about this bot query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) MATCH (c:CC {host:"` + tmp.GetHost() + `"}) SET b.user="` + tmp.GetCmdUser() + `" SET b.hostname="` + tmp.GetCmdHostname() + `" SET b.fingerprint="` + tmp.GetCmdFingerprint() + `" SET b.architecture="` + tmp.GetCmdArchitecture() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } // Then we create a command node for this command query = `MATCH (c:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) RETURN c.content` result, err = graph.Query(query) if err != nil { fmt.Println(err) } if result.Empty() { graph.AddNode(tmp.GetCommandNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } } // Finally we tie the Bot and the issued Command query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) MATCH (c:CC {host:"` + tmp.GetHost() + `"}) MATCH (co:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) MERGE (b)-[e:execute {name: "execute"}]->(co) MERGE (c)-[l:launch {name: "launch"}]->(co)` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } if *verbose { fmt.Println(tmp) } // We treated the request break } } } case <-sortie: return nil } } } // Write Bashs scripts to files func writeBashs(bc chan bindesc, sortie chan os.Signal) error { defer wg.Done() for { select { case v := <-bc: var err error redisGR, err = redisPretensorPool.Dial() if err != nil { logger.Fatal("Could not connect routine to pretensor Redis") } graphGR := rg.GraphNew("pretensor", redisGR) if _, ok := bashs[v.url]; !ok { if *debug { logger.Println("Writing " + v.url) } // Set Sha 256 hash to the object if s, err := strconv.Atoi(v.phit.GetLength()); err == nil && s > 0 { tmpsha256 := sha256.Sum256([]byte(v.phit.GetBody())) v.phit.SetSha256(fmt.Sprintf("%x", tmpsha256)) // Add binary's hash to the graph query := `MATCH (b:Bot {ip:"` + v.phit.GetIp() + `"}) MATCH (c:CC {host:"` + v.phit.GetHost() + `"}) MATCH (bin:Binary ` + v.phit.GetBinaryMatchQuerySelector() + `) MERGE (b)-[d:download {name: "download"}]->(bin) MERGE (c)-[h:host {name: "host"}]->(bin) ON MATCH SET bin.sha256 = "` + v.phit.GetSha256() + `"` result, err := graphGR.Query(query) if err != nil { logger.Println(err) } if *debug { logger.Println(query) logger.Println(result) } err = os.WriteFile("./infected_bash/"+v.phit.GetSha256(), []byte(v.phit.GetBody()), 0644) if err != nil { logger.Println(err) } } // Update de binbash map bashs[v.phit.GetBinurl()] = v.phit } else if *debug { logger.Println("Skipping bash " + v.url) } case <-sortie: return nil } } } // Gathering Binaries ourselves func downloadBin(phitchan chan bindesc, sortie chan os.Signal) error { defer wg.Done() downloading: for { select { case vi := <-phitchan: // Check whether we already touched it
if _, ok := binurls[vi.url]; !ok { //do something here
random_line_split
main.go
g.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-pretensor config error.") } // Create a new redis-pretensor connection pool redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConnPretensor, err = redisPretensorPool.Dial() if err != nil { logger.Fatal("Could not connect to redis-pretensor Redis") } rd4 := redisconf{} if *d4 { // Check redis-d4 configuration // Parse Input Redis Config tmp = config.ReadConfigFile(*confdir, "redis_d4") ss = strings.Split(string(tmp), "/") if len(ss) <= 1 { logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping") checkredisd4 = false } else { rd4.databasename = ss[1] ret, ss[0] = config.IsNet(ss[0]) if ret { sss := strings.Split(string(ss[0]), ":") rrg.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-d4 config error.") } // Create a new redis-graph connection pool redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConn, err = redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } // Get that the redis_d4_queue file redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue")) } } // Checking that the log folder exists log_folder := string(config.ReadConfigFile(*confdir, "folder")) _, err = os.ReadDir(log_folder) if err != nil { logger.Println(err) walk_folder = false } // Loading Requests to monitor tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor") // Loading proxy list to remove from Hosts mitm = config.ReadConfigFileLines(*confdir, "mitm") // Init maps curls = make(map[string]string) bashs = make(map[string]*pretensorhit.PHit) binurls = make(map[string]*pretensorhit.PHit) // Init redis graph graph := rg.GraphNew("pretensor", redisConnPretensor) if *delete { graph.Delete() } // Create processing channels binchan = make(chan bindesc, 2000) bashchan = make(chan bindesc, 2000) // Unbuffered channel for the parser filechan = make(chan filedesc) wg.Add(3) // Launch the download routine go downloadBin(binchan, sortie) // Write no ELF files to files go writeBashs(bashchan, sortie) // Launch the Pretensor routine // Leaving the existing redis connection to pretensorParse go pretensorParse(filechan, sortie, &graph) // Walking folder err = filepath.Walk(log_folder, func(path string, info os.FileInfo, err error) error { filechan <- filedesc{path: path, info: info} if err != nil { return err } if *verbose { logger.Println(info.Name(), info.Size()) } return nil }) if checkredisd4 && *d4 { redisConnD4, err := redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil { redisConnD4.Close() logger.Println(err) return } // Once the walk is over, we start listening to d4 to get new files rateLimiter := time.Tick(*rate) redisNormal: err = redisRead(redisConnD4, redisd4Queue) for { select { case <-rateLimiter: // Use the ratelimiter while the connection hangs logger.Println("Limited read") goto redisNormal case <-sortie: goto gtfo } } } //// Write curl commands to a file //for _, v := range curls { // f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) // if err != nil { // logger.Println(err) // } // if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil { // f.Close() // logger.Println(err) // } // if err := f.Close(); err != nil { // logger.Println(err) // } //} // Waiting for the binary handling routines wg.Wait() gtfo: logger.Println("Exiting") } func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error { for { buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue)) // If redis return empty: EOF (user should not stop) if err == redis.ErrNil { // no new record we break until the tick return io.EOF // oops } else if err != nil { logger.Println(err) return err } fileinfo, err := os.Stat(buf) if err != nil { logger.Println(err) return err } filechan <- filedesc{path: buf, info: fileinfo} } } // Parsing whatever is thrown into filechan func pretensorParse(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error { logger.Println("Entering pretensorparse") defer wg.Done() for { select { case file := <-filechan: if *debug { logger.Println(file.path) } info := file.info path := file.path if !info.IsDir() { content, err := os.ReadFile(path) if err != nil { return err } if len(content) == 0 { if *debug { logger.Println("Empty File: " + path) } break } // Load JSON contents := string(content) if !gj.Valid(contents) { if *debug { logger.Println("Invalid json: " + path) } break } // For each request to monitor for _, v := range tomonitor { request := gj.Get(contents, "request.request_line") if strings.Contains(request.String(), string(v)) { // We are in a file of interest tmp := new(pretensorhit.PHit) tmp.SetTimestamp(gj.Get(contents, "transaction.time")) tmp.SetIp(gj.Get(contents, "transaction.remote_address")) tmp.SetLine(gj.Get(contents, "request.request_line")) tmp.SetReferer(gj.Get(contents, "request.headers.Referer")) tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent")) tmp.SetStatus(gj.Get(contents, "response.status")) tmp.SetBody(gj.Get(contents, "response.body")) tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type")) tmp.SetLength(gj.Get(contents, "response.headers.Content-Length")) tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host"))) // Complete the graph // Create bot if not exist query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen` result, err := graph.Query(query) if err != nil { fmt.Println(err) fmt.Println(result) } if result.Empty() { if *debug
graph.AddNode(tmp.GetBotNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } // Update Firstseen / Lastseen if already seen } else { result.Next() r := result.Record() fsstr, _ := r.Get("b.firstseen") lsstr, _ := r.Get("b.lastseen") fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr)) ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr)) if tmp.GetParsedTimeStamp().Before(fs) { query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) SET b.firstseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } if tmp.GetParsedTimeStamp().After(ls) { query = `
{ logger.Println(tmp.GetBotNode()) }
conditional_block
main.go
g.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-pretensor config error.") } // Create a new redis-pretensor connection pool redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConnPretensor, err = redisPretensorPool.Dial() if err != nil { logger.Fatal("Could not connect to redis-pretensor Redis") } rd4 := redisconf{} if *d4 { // Check redis-d4 configuration // Parse Input Redis Config tmp = config.ReadConfigFile(*confdir, "redis_d4") ss = strings.Split(string(tmp), "/") if len(ss) <= 1 { logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping") checkredisd4 = false } else { rd4.databasename = ss[1] ret, ss[0] = config.IsNet(ss[0]) if ret { sss := strings.Split(string(ss[0]), ":") rrg.redisHost = sss[0] rrg.redisPort = sss[1] } else { logger.Fatal("Redis-d4 config error.") } // Create a new redis-graph connection pool redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400) redisConn, err = redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } // Get that the redis_d4_queue file redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue")) } } // Checking that the log folder exists log_folder := string(config.ReadConfigFile(*confdir, "folder")) _, err = os.ReadDir(log_folder) if err != nil { logger.Println(err) walk_folder = false } // Loading Requests to monitor tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor") // Loading proxy list to remove from Hosts mitm = config.ReadConfigFileLines(*confdir, "mitm") // Init maps curls = make(map[string]string) bashs = make(map[string]*pretensorhit.PHit) binurls = make(map[string]*pretensorhit.PHit) // Init redis graph graph := rg.GraphNew("pretensor", redisConnPretensor) if *delete { graph.Delete() } // Create processing channels binchan = make(chan bindesc, 2000) bashchan = make(chan bindesc, 2000) // Unbuffered channel for the parser filechan = make(chan filedesc) wg.Add(3) // Launch the download routine go downloadBin(binchan, sortie) // Write no ELF files to files go writeBashs(bashchan, sortie) // Launch the Pretensor routine // Leaving the existing redis connection to pretensorParse go pretensorParse(filechan, sortie, &graph) // Walking folder err = filepath.Walk(log_folder, func(path string, info os.FileInfo, err error) error { filechan <- filedesc{path: path, info: info} if err != nil { return err } if *verbose { logger.Println(info.Name(), info.Size()) } return nil }) if checkredisd4 && *d4 { redisConnD4, err := redisd4Pool.Dial() if err != nil { logger.Fatal("Could not connect to d4 Redis") } if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil { redisConnD4.Close() logger.Println(err) return } // Once the walk is over, we start listening to d4 to get new files rateLimiter := time.Tick(*rate) redisNormal: err = redisRead(redisConnD4, redisd4Queue) for { select { case <-rateLimiter: // Use the ratelimiter while the connection hangs logger.Println("Limited read") goto redisNormal case <-sortie: goto gtfo } } } //// Write curl commands to a file //for _, v := range curls { // f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) // if err != nil { // logger.Println(err) // } // if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil { // f.Close() // logger.Println(err) // } // if err := f.Close(); err != nil { // logger.Println(err) // } //} // Waiting for the binary handling routines wg.Wait() gtfo: logger.Println("Exiting") } func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error
// Parsing whatever is thrown into filechan func pretensorParse(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error { logger.Println("Entering pretensorparse") defer wg.Done() for { select { case file := <-filechan: if *debug { logger.Println(file.path) } info := file.info path := file.path if !info.IsDir() { content, err := os.ReadFile(path) if err != nil { return err } if len(content) == 0 { if *debug { logger.Println("Empty File: " + path) } break } // Load JSON contents := string(content) if !gj.Valid(contents) { if *debug { logger.Println("Invalid json: " + path) } break } // For each request to monitor for _, v := range tomonitor { request := gj.Get(contents, "request.request_line") if strings.Contains(request.String(), string(v)) { // We are in a file of interest tmp := new(pretensorhit.PHit) tmp.SetTimestamp(gj.Get(contents, "transaction.time")) tmp.SetIp(gj.Get(contents, "transaction.remote_address")) tmp.SetLine(gj.Get(contents, "request.request_line")) tmp.SetReferer(gj.Get(contents, "request.headers.Referer")) tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent")) tmp.SetStatus(gj.Get(contents, "response.status")) tmp.SetBody(gj.Get(contents, "response.body")) tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type")) tmp.SetLength(gj.Get(contents, "response.headers.Content-Length")) tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host"))) // Complete the graph // Create bot if not exist query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen` result, err := graph.Query(query) if err != nil { fmt.Println(err) fmt.Println(result) } if result.Empty() { if *debug { logger.Println(tmp.GetBotNode()) } graph.AddNode(tmp.GetBotNode()) _, err := graph.Flush() if err != nil { fmt.Println(err) } // Update Firstseen / Lastseen if already seen } else { result.Next() r := result.Record() fsstr, _ := r.Get("b.firstseen") lsstr, _ := r.Get("b.lastseen") fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr)) ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr)) if tmp.GetParsedTimeStamp().Before(fs) { query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) SET b.firstseen="` + tmp.GetTimestamp() + `"` result, err = graph.Query(query) if err != nil { fmt.Println(err) } } if tmp.GetParsedTimeStamp().After(ls) { query = `
{ for { buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue)) // If redis return empty: EOF (user should not stop) if err == redis.ErrNil { // no new record we break until the tick return io.EOF // oops } else if err != nil { logger.Println(err) return err } fileinfo, err := os.Stat(buf) if err != nil { logger.Println(err) return err } filechan <- filedesc{path: buf, info: fileinfo} } }
identifier_body
trixer.py
.table = [] def generateFontStrip(self): if self.fontName is not None: base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255)) txt = Image.new('RGBA', base.size, (255,255,255,0)) fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize) d = ImageDraw.Draw(txt) for num in range(self.charRange[0],self.charRange[1]): pos = (num-self.charRange[0])*self.blockWidth d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(base, txt) # write to stdout out.save(("lumitables/" + self.fontName + ".png"), "PNG") v_print(2,"Fontstrip generated!") def generateLuminanceTable(self): for block in xrange(0,self.charNumber): self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)]) self.table.sort(key=getKey1) with open("lumitables/" + self.fontName + ".lut", 'wb') as f: pickle.dump(self, f) v_print(2,"Lumitable generated!") ### IMAGETABLE CLASS ### class imagetable: def __init__(self,file,lumitable,colorMode): self.file = file self.image = Image.open(file) self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__() self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__() self.lumitable = lumitable self.colorMode = colorMode self.table = [] if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)] ready = 0.0 total = self.xBlocks*self.yBlocks for x in range(0,self.xBlocks): for y in range(0,self.yBlocks): luminance = calcBlockLuminance(self.file,x,y,self.lumitable) if colorMode == "colors": self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable) found = False for i in self.table: if i[0] == luminance: i[1].append((x,y)) found = True break if not found: self.table.append([luminance,[(x,y)]]) ready += 1.0 v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0)) self.table.sort(key=getKey0) v_print(2,"Imagetable generated!") def calcColorAverage(self,blockx,blocky,lumitable): im = Image.open(self.file) px = im.load() red = 0 green = 0 blue = 0 for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth): for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight): red += px[x,y][0] green += px[x,y][1] blue += px[x,y][2] red = red / (lumitable.blockHeight*lumitable.blockWidth) green = green / (lumitable.blockHeight*lumitable.blockWidth) blue = blue / (lumitable.blockHeight*lumitable.blockWidth) return (red,green,blue) ### Trix Class ### class trix: def __init__(self,name,lumi,imagetb): self.name = name self.lumitable = lumi self.imagetable = imagetb self.imagetable.table.reverse() self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255)) self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__()) self.trixtable = [] def generateTrixtable(self): trixindex = -1 ready = 0.0 total = len(self.lumitable.table) for i in self.lumitable.table: trixindex += 1 self.trixtable.append([i[0],[]]) for n in range(0,self.blockPerChar.__int__()): if(len(self.imagetable.table)>0): self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1])) else: break ready += 1.0 v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0)) v_print(2,"Trixtable generated!") def printTrix(self,output): txt = Image.new('RGBA', self.image.size, (255,255,255,0)) fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize) d = ImageDraw.Draw(txt) for currtrix in self.trixtable: if len(currtrix) > 0: for i in range(0,self.blockPerChar.__int__()): if len(currtrix[1]) > i: for tuple in currtrix[1][i]: x = tuple[0] * self.lumitable.blockWidth y = tuple[1] * self.lumitable.blockHeight if self.imagetable.colorMode == "colors": red = self.imagetable.colorTable[tuple[0]][tuple[1]][0] green = self.imagetable.colorTable[tuple[0]][tuple[1]][1] blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2] d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255)) else: d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(self.image, txt) out.save("output/" + output) v_print(2,"Trix saved!") ### Default Configs ### class configs(): def __init__(self): # Open defauls.cfg if it exists or create a new one if it doesn't. ConfigPrs = ConfigParser.ConfigParser() if os.path.isfile("defaults.cfg"): ConfigPrs.read("defaults.cfg") else: cfgfile = open("defaults.cfg",'w') # add the settings to the structure of the file, and lets write it out... ConfigPrs.add_section('Defaults') ConfigPrs.set('Defaults','lumitable','courier.lut') ConfigPrs.set('Defaults','colorMode', 'colors') ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info ConfigPrs.write(cfgfile) cfgfile.close() # -/ self.input = "" self.output = "" self.lumitable = ConfigPrs.get("Defaults","lumitable") self.colorMode = ConfigPrs.get("Defaults","colorMode") self.verbosity = int(ConfigPrs.get("Defaults","verbosity")) ### --------------- ### ### MAIN FUNCTION ### def main(): ### Arguments parsing ### # Configure and parse the command line parameters. parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.') parser.add_argument('-i','--input',help="Input file pathname.",required=True) parser.add_argument('-o','--output',help="Output file pathname.",required=True) parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False) parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False) parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False) args = parser.parse_args() ### ----------------- ### if not os.path.isfile(args.input): v_print(1,"EXITING: Input file not found!") sys.exit(-1) else: conf.input = args.input # TODO: solve permission problems on Windows if os.path.isfile("output/" + args.output): o
p = raw_input("Output file already exists. Overwrite existing file? (Y/N)") if(op == "n" or op == "N"): v_print(1,"EXITING: Process canceled. Output file already exists.") sys.exit(-1)
conditional_block
trixer.py
umitable.blockHeight*lumitable.blockWidth) return luminance ### LUMITABLE CLASS ### class lumitable: def __init__(self,fontname,fontsize,range,blockheight,blockwidth): self.fontName = fontname self.fontSize = fontsize self.charRange = range self.charNumber = range[1] - range[0] self.blockHeight = blockheight self.blockWidth = blockwidth self.table = [] def generateFontStrip(self): if self.fontName is not None: base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255)) txt = Image.new('RGBA', base.size, (255,255,255,0)) fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize) d = ImageDraw.Draw(txt) for num in range(self.charRange[0],self.charRange[1]): pos = (num-self.charRange[0])*self.blockWidth d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(base, txt) # write to stdout out.save(("lumitables/" + self.fontName + ".png"), "PNG") v_print(2,"Fontstrip generated!") def generateLuminanceTable(self): for block in xrange(0,self.charNumber): self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)]) self.table.sort(key=getKey1) with open("lumitables/" + self.fontName + ".lut", 'wb') as f: pickle.dump(self, f) v_print(2,"Lumitable generated!") ### IMAGETABLE CLASS ### class imagetable: def __init__(self,file,lumitable,colorMode): self.file = file self.image = Image.open(file) self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__() self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__() self.lumitable = lumitable self.colorMode = colorMode self.table = [] if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)] ready = 0.0 total = self.xBlocks*self.yBlocks for x in range(0,self.xBlocks): for y in range(0,self.yBlocks): luminance = calcBlockLuminance(self.file,x,y,self.lumitable) if colorMode == "colors": self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable) found = False for i in self.table: if i[0] == luminance: i[1].append((x,y)) found = True break if not found: self.table.append([luminance,[(x,y)]]) ready += 1.0 v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0)) self.table.sort(key=getKey0) v_print(2,"Imagetable generated!") def calcColorAverage(self,blockx,blocky,lumitable): i
### Trix Class ### class trix: def __init__(self,name,lumi,imagetb): self.name = name self.lumitable = lumi self.imagetable = imagetb self.imagetable.table.reverse() self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255)) self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__()) self.trixtable = [] def generateTrixtable(self): trixindex = -1 ready = 0.0 total = len(self.lumitable.table) for i in self.lumitable.table: trixindex += 1 self.trixtable.append([i[0],[]]) for n in range(0,self.blockPerChar.__int__()): if(len(self.imagetable.table)>0): self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1])) else: break ready += 1.0 v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0)) v_print(2,"Trixtable generated!") def printTrix(self,output): txt = Image.new('RGBA', self.image.size, (255,255,255,0)) fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize) d = ImageDraw.Draw(txt) for currtrix in self.trixtable: if len(currtrix) > 0: for i in range(0,self.blockPerChar.__int__()): if len(currtrix[1]) > i: for tuple in currtrix[1][i]: x = tuple[0] * self.lumitable.blockWidth y = tuple[1] * self.lumitable.blockHeight if self.imagetable.colorMode == "colors": red = self.imagetable.colorTable[tuple[0]][tuple[1]][0] green = self.imagetable.colorTable[tuple[0]][tuple[1]][1] blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2] d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255)) else: d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(self.image, txt) out.save("output/" + output) v_print(2,"Trix saved!") ### Default Configs ### class configs(): def __init__(self): # Open defauls.cfg if it exists or create a new one if it doesn't. ConfigPrs = ConfigParser.ConfigParser() if os.path.isfile("defaults.cfg"): ConfigPrs.read("defaults.cfg") else: cfgfile = open("defaults.cfg",'w') # add the settings to the structure of the file, and lets write it out... ConfigPrs.add_section('Defaults') ConfigPrs.set('Defaults','lumitable','courier.lut') ConfigPrs.set('Defaults','colorMode', 'colors') ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info ConfigPrs.write(cfgfile) cfgfile.close() # -/ self.input = "" self.output = "" self.lumitable = ConfigPrs.get("Defaults","lumitable") self.colorMode = ConfigPrs.get("Defaults","colorMode") self.verbosity = int(ConfigPrs.get("Defaults","verbosity")) ### --------------- ### ### MAIN FUNCTION ### def main(): ### Arguments parsing ### # Configure and parse the command line parameters. parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.') parser.add_argument('-i','--input',help="Input file pathname.",required=True) parser.add_argument('-o','--output',help="Output file pathname.",required=True) parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False) parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False) parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False) args = parser.parse_args() ### ----------------- ### if not os.path.isfile(args.input): v_print(1,"EXIT
m = Image.open(self.file) px = im.load() red = 0 green = 0 blue = 0 for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth): for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight): red += px[x,y][0] green += px[x,y][1] blue += px[x,y][2] red = red / (lumitable.blockHeight*lumitable.blockWidth) green = green / (lumitable.blockHeight*lumitable.blockWidth) blue = blue / (lumitable.blockHeight*lumitable.blockWidth) return (red,green,blue)
identifier_body
trixer.py
umitable.blockHeight*lumitable.blockWidth) return luminance ### LUMITABLE CLASS ### class lumitable: def __init__(self,fontname,fontsize,range,blockheight,blockwidth): self.fontName = fontname self.fontSize = fontsize self.charRange = range self.charNumber = range[1] - range[0] self.blockHeight = blockheight self.blockWidth = blockwidth self.table = [] def generateFontStrip(self): if self.fontName is not None: base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255)) txt = Image.new('RGBA', base.size, (255,255,255,0)) fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize) d = ImageDraw.Draw(txt) for num in range(self.charRange[0],self.charRange[1]): pos = (num-self.charRange[0])*self.blockWidth d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(base, txt) # write to stdout out.save(("lumitables/" + self.fontName + ".png"), "PNG") v_print(2,"Fontstrip generated!") def generateLuminanceTable(self): for block in xrange(0,self.charNumber): self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)]) self.table.sort(key=getKey1) with open("lumitables/" + self.fontName + ".lut", 'wb') as f: pickle.dump(self, f) v_print(2,"Lumitable generated!") ### IMAGETABLE CLASS ### class imagetable: def __init__(self,file,lumitable,colorMode): self.file = file self.image = Image.open(file) self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__() self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__() self.lumitable = lumitable self.colorMode = colorMode self.table = [] if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)] ready = 0.0 total = self.xBlocks*self.yBlocks for x in range(0,self.xBlocks): for y in range(0,self.yBlocks): luminance = calcBlockLuminance(self.file,x,y,self.lumitable) if colorMode == "colors": self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable) found = False for i in self.table: if i[0] == luminance: i[1].append((x,y)) found = True break if not found: self.table.append([luminance,[(x,y)]]) ready += 1.0 v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0)) self.table.sort(key=getKey0) v_print(2,"Imagetable generated!") def calcColorAverage(self,blockx,blocky,lumitable): im = Image.open(self.file) px = im.load() red = 0 green = 0 blue = 0 for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth): for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight): red += px[x,y][0] green += px[x,y][1] blue += px[x,y][2] red = red / (lumitable.blockHeight*lumitable.blockWidth) green = green / (lumitable.blockHeight*lumitable.blockWidth) blue = blue / (lumitable.blockHeight*lumitable.blockWidth) return (red,green,blue) ### Trix Class ### class trix: def __init__(self,name,lumi,imagetb): self.name = name self.lumitable = lumi self.imagetable = imagetb self.imagetable.table.reverse() self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255)) self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__()) self.trixtable = [] def generateTrixtable(self): trixindex = -1 ready = 0.0 total = len(self.lumitable.table) for i in self.lumitable.table: trixindex += 1 self.trixtable.append([i[0],[]]) for n in range(0,self.blockPerChar.__int__()): if(len(self.imagetable.table)>0): self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1])) else: break ready += 1.0 v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0)) v_print(2,"Trixtable generated!") def printTrix(self,output): txt = Image.new('RGBA', self.image.size, (255,255,255,0)) fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize) d = ImageDraw.Draw(txt) for currtrix in self.trixtable: if len(currtrix) > 0: for i in range(0,self.blockPerChar.__int__()): if len(currtrix[1]) > i: for tuple in currtrix[1][i]:
blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2] d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255)) else: d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(self.image, txt) out.save("output/" + output) v_print(2,"Trix saved!") ### Default Configs ### class configs(): def __init__(self): # Open defauls.cfg if it exists or create a new one if it doesn't. ConfigPrs = ConfigParser.ConfigParser() if os.path.isfile("defaults.cfg"): ConfigPrs.read("defaults.cfg") else: cfgfile = open("defaults.cfg",'w') # add the settings to the structure of the file, and lets write it out... ConfigPrs.add_section('Defaults') ConfigPrs.set('Defaults','lumitable','courier.lut') ConfigPrs.set('Defaults','colorMode', 'colors') ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info ConfigPrs.write(cfgfile) cfgfile.close() # -/ self.input = "" self.output = "" self.lumitable = ConfigPrs.get("Defaults","lumitable") self.colorMode = ConfigPrs.get("Defaults","colorMode") self.verbosity = int(ConfigPrs.get("Defaults","verbosity")) ### --------------- ### ### MAIN FUNCTION ### def main(): ### Arguments parsing ### # Configure and parse the command line parameters. parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.') parser.add_argument('-i','--input',help="Input file pathname.",required=True) parser.add_argument('-o','--output',help="Output file pathname.",required=True) parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False) parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False) parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False) args = parser.parse_args() ### ----------------- ### if not os.path.isfile(args.input): v_print(1,"EXITING:
x = tuple[0] * self.lumitable.blockWidth y = tuple[1] * self.lumitable.blockHeight if self.imagetable.colorMode == "colors": red = self.imagetable.colorTable[tuple[0]][tuple[1]][0] green = self.imagetable.colorTable[tuple[0]][tuple[1]][1]
random_line_split
trixer.py
umitable.blockHeight*lumitable.blockWidth) return luminance ### LUMITABLE CLASS ### class lumitable: def __init__(self,fontname,fontsize,range,blockheight,blockwidth): self.fontName = fontname self.fontSize = fontsize self.charRange = range self.charNumber = range[1] - range[0] self.blockHeight = blockheight self.blockWidth = blockwidth self.table = [] def generateFontStrip(self): if self.fontName is not None: base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255)) txt = Image.new('RGBA', base.size, (255,255,255,0)) fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize) d = ImageDraw.Draw(txt) for num in range(self.charRange[0],self.charRange[1]): pos = (num-self.charRange[0])*self.blockWidth d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(base, txt) # write to stdout out.save(("lumitables/" + self.fontName + ".png"), "PNG") v_print(2,"Fontstrip generated!") def generateLuminanceTable(self): for block in xrange(0,self.charNumber): self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)]) self.table.sort(key=getKey1) with open("lumitables/" + self.fontName + ".lut", 'wb') as f: pickle.dump(self, f) v_print(2,"Lumitable generated!") ### IMAGETABLE CLASS ### class imagetable: def __init__(self,file,lumitable,colorMode): self.file = file self.image = Image.open(file) self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__() self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__() self.lumitable = lumitable self.colorMode = colorMode self.table = [] if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)] ready = 0.0 total = self.xBlocks*self.yBlocks for x in range(0,self.xBlocks): for y in range(0,self.yBlocks): luminance = calcBlockLuminance(self.file,x,y,self.lumitable) if colorMode == "colors": self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable) found = False for i in self.table: if i[0] == luminance: i[1].append((x,y)) found = True break if not found: self.table.append([luminance,[(x,y)]]) ready += 1.0 v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0)) self.table.sort(key=getKey0) v_print(2,"Imagetable generated!") def calcColorAverage(self,blockx,blocky,lumitable): im = Image.open(self.file) px = im.load() red = 0 green = 0 blue = 0 for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth): for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight): red += px[x,y][0] green += px[x,y][1] blue += px[x,y][2] red = red / (lumitable.blockHeight*lumitable.blockWidth) green = green / (lumitable.blockHeight*lumitable.blockWidth) blue = blue / (lumitable.blockHeight*lumitable.blockWidth) return (red,green,blue) ### Trix Class ### class trix: def _
self,name,lumi,imagetb): self.name = name self.lumitable = lumi self.imagetable = imagetb self.imagetable.table.reverse() self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255)) self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__()) self.trixtable = [] def generateTrixtable(self): trixindex = -1 ready = 0.0 total = len(self.lumitable.table) for i in self.lumitable.table: trixindex += 1 self.trixtable.append([i[0],[]]) for n in range(0,self.blockPerChar.__int__()): if(len(self.imagetable.table)>0): self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1])) else: break ready += 1.0 v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0)) v_print(2,"Trixtable generated!") def printTrix(self,output): txt = Image.new('RGBA', self.image.size, (255,255,255,0)) fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize) d = ImageDraw.Draw(txt) for currtrix in self.trixtable: if len(currtrix) > 0: for i in range(0,self.blockPerChar.__int__()): if len(currtrix[1]) > i: for tuple in currtrix[1][i]: x = tuple[0] * self.lumitable.blockWidth y = tuple[1] * self.lumitable.blockHeight if self.imagetable.colorMode == "colors": red = self.imagetable.colorTable[tuple[0]][tuple[1]][0] green = self.imagetable.colorTable[tuple[0]][tuple[1]][1] blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2] d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255)) else: d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255)) out = Image.alpha_composite(self.image, txt) out.save("output/" + output) v_print(2,"Trix saved!") ### Default Configs ### class configs(): def __init__(self): # Open defauls.cfg if it exists or create a new one if it doesn't. ConfigPrs = ConfigParser.ConfigParser() if os.path.isfile("defaults.cfg"): ConfigPrs.read("defaults.cfg") else: cfgfile = open("defaults.cfg",'w') # add the settings to the structure of the file, and lets write it out... ConfigPrs.add_section('Defaults') ConfigPrs.set('Defaults','lumitable','courier.lut') ConfigPrs.set('Defaults','colorMode', 'colors') ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info ConfigPrs.write(cfgfile) cfgfile.close() # -/ self.input = "" self.output = "" self.lumitable = ConfigPrs.get("Defaults","lumitable") self.colorMode = ConfigPrs.get("Defaults","colorMode") self.verbosity = int(ConfigPrs.get("Defaults","verbosity")) ### --------------- ### ### MAIN FUNCTION ### def main(): ### Arguments parsing ### # Configure and parse the command line parameters. parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.') parser.add_argument('-i','--input',help="Input file pathname.",required=True) parser.add_argument('-o','--output',help="Output file pathname.",required=True) parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False) parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False) parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False) args = parser.parse_args() ### ----------------- ### if not os.path.isfile(args.input): v_print(1,"EXITING:
_init__(
identifier_name
main.rs
-> Error { match error.kind() { io::ErrorKind::PermissionDenied => Error::MustRunAsRoot, _e => Error::IoError(dbg!(error)), } } } fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { map_res(digit1, |s: &str| s.parse::<u8>())(input) } fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { preceded(tag("."), octet)(input) } fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input) } fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { map_res(hex_digit1, |s: &str| s.parse::<u16>())(input) } fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { preceded(tag("::"), hextet)(input) } fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet)); recognize(parser)(input) } fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> { map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input) } fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple(( alpha1, many0(alt((alphanumeric1, recognize(one_of("-."))))), )))(input) } fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> { all_consuming(hostname)(input).map(|(input, _)| (input, ())) } fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> { let (input, aliases) = separated_list(tag(" "), hostname)(input)?; Ok((input, aliases.into_iter().map(String::from).collect())) } fn
<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { preceded(tag("#"), rest)(input) } #[derive(Debug)] struct HostsLine { ip: IpAddr, canonical_hostname: String, aliases: Vec<String>, comment: Option<String>, } impl HostsLine { fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine { let aliases = Vec::new(); let comment = None; HostsLine { ip, canonical_hostname, aliases, comment, } } } impl fmt::Display for HostsLine { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let HostsLine { ip, canonical_hostname, aliases, comment, } = self; let sep = match ip.to_string().chars().count() { 0..=8 => "\t\t", 7..=16 => "\t", _ => " ", }; write!( f, "{ip}{sep}{ch}", ip = ip, sep = sep, ch = canonical_hostname, )?; if !aliases.is_empty() { write!(f, "\t{}", aliases.join(" "))?; } if let Some(comment) = comment { write!(f, "#{}", comment)?; } Ok(()) } } fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> { let (input, ip) = ip_addr(input)?; let (input, _) = space1(input)?; let (input, canonical_hostname) = hostname(input)?; let (input, _) = space1(input)?; let (input, aliases) = opt(aliases)(input)?; let (input, comment) = opt(comment)(input)?; let canonical_hostname = String::from(canonical_hostname); let aliases = aliases.unwrap_or_else(Vec::new); let comment = comment.map(String::from); Ok(( input, HostsLine { ip, canonical_hostname, aliases, comment, }, )) } #[derive(Debug)] enum Line { Unstructured(String), Structured(HostsLine), } impl Line { fn structured(ip: IpAddr, canonical_name: String) -> Line { Line::Structured(HostsLine::new(ip, canonical_name)) } fn structured_ref(&self) -> Option<&HostsLine> { match self { Line::Structured(line) => Some(line), Line::Unstructured(_) => None, } } } impl fmt::Display for Line { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Line::Unstructured(line) => write!(f, "{}", line), Line::Structured(hosts_line) => write!(f, "{}", hosts_line), } } } fn parse_line(line: &str) -> Line { match hosts_line::<(&str, ErrorKind)>(&line) { Ok((_, hosts_line)) => Line::Structured(hosts_line), Err(_error) => Line::Unstructured(String::from(line)), } } fn validate_alias(alias: &str) -> Result<(), Error> { check_hostname::<VerboseError<&str>>(alias) .map(|_| ()) .map_err(|error| match error { Err::Incomplete(_) => Error::IncompleteAlias, Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)), }) } fn iptables_rules_exist(options: &Options) -> Result<bool, Error> { let rule_match = format!( "-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.", alias = options.alias, ); let output = Command::new("iptables") .args(&["-t", "nat", "-S", "OUTPUT"]) .output()?; let stdout = Cursor::new(output.stdout); let matched_lines: Vec<_> = stdout .lines() .filter_map(|line_ret| { line_ret.ok().and_then(|line| { let line: String = dbg!(line); line.rfind(&rule_match).map(|index| dbg!((index, line))) }) }) .collect(); let port = options.port.to_string(); if let Some((idx, line)) = matched_lines.first() { if dbg!(&line[*idx..]) == port { return Ok(true); } else { return Err(Error::AliasAlreadyInUse); } } Ok(false) } fn write_iptables_rules(options: &Options) -> Result<(), Error> { let status = Command::new("iptables") .args(&[ "-t", "nat", "--append", "OUTPUT", "--protocol", "tcp", "--dport", "80", "--source", "127.0.0.1", "--destination", &options.alias, "--jump", "DNAT", "--to-destination", &format!("127.0.0.{ip}:{port}", ip = "1", port = options.port), ]) .status()?; if !status.success() { return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1))); } Ok(()) } fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr { for b in 0..128 { for c in 0..128 { for d in 1..128 { let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d)); if !in_use_ips.contains(&ip) { return ip; } } } } "127.0.0.1".parse().unwrap() } fn run() -> Result<(), Error> { let options = Options::from_args(); validate_alias(&options.alias)?; let mut file = File::open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let reader =
comment
identifier_name
main.rs
-> Error { match error.kind() { io::ErrorKind::PermissionDenied => Error::MustRunAsRoot, _e => Error::IoError(dbg!(error)), } } } fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { map_res(digit1, |s: &str| s.parse::<u8>())(input) } fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { preceded(tag("."), octet)(input) } fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input) } fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { map_res(hex_digit1, |s: &str| s.parse::<u16>())(input) } fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { preceded(tag("::"), hextet)(input) } fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet)); recognize(parser)(input) } fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> { map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input) } fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple(( alpha1, many0(alt((alphanumeric1, recognize(one_of("-."))))), )))(input) } fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> { all_consuming(hostname)(input).map(|(input, _)| (input, ())) } fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> { let (input, aliases) = separated_list(tag(" "), hostname)(input)?; Ok((input, aliases.into_iter().map(String::from).collect())) }
#[derive(Debug)] struct HostsLine { ip: IpAddr, canonical_hostname: String, aliases: Vec<String>, comment: Option<String>, } impl HostsLine { fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine { let aliases = Vec::new(); let comment = None; HostsLine { ip, canonical_hostname, aliases, comment, } } } impl fmt::Display for HostsLine { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let HostsLine { ip, canonical_hostname, aliases, comment, } = self; let sep = match ip.to_string().chars().count() { 0..=8 => "\t\t", 7..=16 => "\t", _ => " ", }; write!( f, "{ip}{sep}{ch}", ip = ip, sep = sep, ch = canonical_hostname, )?; if !aliases.is_empty() { write!(f, "\t{}", aliases.join(" "))?; } if let Some(comment) = comment { write!(f, "#{}", comment)?; } Ok(()) } } fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> { let (input, ip) = ip_addr(input)?; let (input, _) = space1(input)?; let (input, canonical_hostname) = hostname(input)?; let (input, _) = space1(input)?; let (input, aliases) = opt(aliases)(input)?; let (input, comment) = opt(comment)(input)?; let canonical_hostname = String::from(canonical_hostname); let aliases = aliases.unwrap_or_else(Vec::new); let comment = comment.map(String::from); Ok(( input, HostsLine { ip, canonical_hostname, aliases, comment, }, )) } #[derive(Debug)] enum Line { Unstructured(String), Structured(HostsLine), } impl Line { fn structured(ip: IpAddr, canonical_name: String) -> Line { Line::Structured(HostsLine::new(ip, canonical_name)) } fn structured_ref(&self) -> Option<&HostsLine> { match self { Line::Structured(line) => Some(line), Line::Unstructured(_) => None, } } } impl fmt::Display for Line { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Line::Unstructured(line) => write!(f, "{}", line), Line::Structured(hosts_line) => write!(f, "{}", hosts_line), } } } fn parse_line(line: &str) -> Line { match hosts_line::<(&str, ErrorKind)>(&line) { Ok((_, hosts_line)) => Line::Structured(hosts_line), Err(_error) => Line::Unstructured(String::from(line)), } } fn validate_alias(alias: &str) -> Result<(), Error> { check_hostname::<VerboseError<&str>>(alias) .map(|_| ()) .map_err(|error| match error { Err::Incomplete(_) => Error::IncompleteAlias, Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)), }) } fn iptables_rules_exist(options: &Options) -> Result<bool, Error> { let rule_match = format!( "-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.", alias = options.alias, ); let output = Command::new("iptables") .args(&["-t", "nat", "-S", "OUTPUT"]) .output()?; let stdout = Cursor::new(output.stdout); let matched_lines: Vec<_> = stdout .lines() .filter_map(|line_ret| { line_ret.ok().and_then(|line| { let line: String = dbg!(line); line.rfind(&rule_match).map(|index| dbg!((index, line))) }) }) .collect(); let port = options.port.to_string(); if let Some((idx, line)) = matched_lines.first() { if dbg!(&line[*idx..]) == port { return Ok(true); } else { return Err(Error::AliasAlreadyInUse); } } Ok(false) } fn write_iptables_rules(options: &Options) -> Result<(), Error> { let status = Command::new("iptables") .args(&[ "-t", "nat", "--append", "OUTPUT", "--protocol", "tcp", "--dport", "80", "--source", "127.0.0.1", "--destination", &options.alias, "--jump", "DNAT", "--to-destination", &format!("127.0.0.{ip}:{port}", ip = "1", port = options.port), ]) .status()?; if !status.success() { return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1))); } Ok(()) } fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr { for b in 0..128 { for c in 0..128 { for d in 1..128 { let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d)); if !in_use_ips.contains(&ip) { return ip; } } } } "127.0.0.1".parse().unwrap() } fn run() -> Result<(), Error> { let options = Options::from_args(); validate_alias(&options.alias)?; let mut file = File::open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let reader =
fn comment<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { preceded(tag("#"), rest)(input) }
random_line_split
dummy.py
zope.container.contained import notifyContainerModified from zope.datetime import rfc1123_date from zope.event import notify from zope.interface import implementer from ...ActionProviderBase import ActionProviderBase from ...interfaces import IContentish from ...interfaces import ISiteRoot from ...interfaces import ITypeInformation from ...PortalContent import PortalContent from ..base.security import DummyUser from ..base.security import OmnipotentUser class DummyObject(Implicit): """ A dummy callable object. Comes with getIconURL and restrictedTraverse methods. """ def __init__(self, id='dummy', **kw): self._id = id self.__dict__.update(kw) def __str__(self): return self._id def __call__(self): return self._id def restrictedTraverse(self, path): if not path: return self parent = self path_elements = path.split('/') path_elements.reverse() while path_elements: path_element = path_elements.pop() parent = getattr(parent, path_element) return parent def icon(self): return f'{self._id} ICON' def getIconURL(self): return f'{self._id} ICON' def getId(self): return self._id @implementer(ITypeInformation) class DummyType(DummyObject): """ A Dummy Type object """ def __init__(self, id='Dummy Content', title='Dummy Content', actions=()): """ To fake out some actions, pass in a sequence of tuples where the first element represents the ID or alias of the action and the second element is the path to the object to be invoked, such as a page template. """ self.id = self._id = id self.title = title self._actions = {} self._setActions(actions) def _setActions(self, actions=()): for action_id, action_path in actions: self._actions[action_id] = action_path def Title(self): return self.title def allowType(self, contentType): return True def allowDiscussion(self): return False def queryMethodID(self, alias, default=None, context=None): return self._actions.get(alias, default) def isConstructionAllowed(self, container): return True @implementer(IContentish) class DummyContent(PortalContent, Item): """ A Dummy piece of PortalContent """ meta_type = 'Dummy' portal_type = 'Dummy Content' url = 'foo_url' after_add_called = before_delete_called = 0 def __init__(self, id='dummy', *args, **kw): self.id = id self._args = args self._kw = {} self._kw.update(kw) self.reset() self.catalog = kw.get('catalog', 0) self.url = kw.get('url', None) self.view_id = kw.get('view_id', None) def manage_afterAdd(self, item, container): self.after_add_called = 1 def manage_beforeDelete(self, item, container): self.before_delete_called = 1 def absolute_url(self): return self.url def reset(self): self.after_add_called = self.before_delete_called = 0 # Make sure normal Database export/import stuff doesn't trip us up. def _getCopy(self, container): return DummyContent(self.id, catalog=self.catalog) def _safe_get(self, attr): if self.catalog: return getattr(self, attr, '') else: return getattr(self, attr) def Title(self): return self.title def listCreators(self): return self._safe_get('creators') def Subject(self): return self._safe_get('subject') def Description(self): return self._safe_get('description') def created(self): return self._safe_get('created_date') def modified(self): return self._safe_get('modified_date') def Type(self): return 'Dummy Content Title' def __call__(self): if self.view_id is None: return DummyContent.inheritedAttribute('__call__')(self) else: # view_id control for testing template = getattr(self, self.view_id) if getattr(aq_base(template), 'isDocTemp', 0): return template(self, self.REQUEST, self.REQUEST['RESPONSE']) else: return template() DummyFactory = Factory(DummyContent) class DummyFactoryDispatcher: """ Dummy Product Factory Dispatcher """ def __init__(self, folder): self._folder = folder def getId(self): return 'DummyFactoryDispatcher' def addFoo(self, id, *args, **kw): if getattr(self._folder, '_prefix', None): id = f'{self._folder._prefix}_{id}' foo = DummyContent(id, *args, **kw) self._folder._setObject(id, foo, suppress_events=True) if getattr(self._folder, '_prefix', None): return id __roles__ = ('FooAdder',) __allow_access_to_unprotected_subobjects__ = {'addFoo': 1} @implementer(IObjectManager) class DummyFolder(DummyObject): """Dummy Container for testing. """ def __init__(self, id='dummy', fake_product=0, prefix=''): self._prefix = prefix self._id = id if fake_product: self.manage_addProduct = { 'FooProduct': DummyFactoryDispatcher(self)} def _setOb(self, id, object): setattr(self, id, object) def _delOb(self, id): delattr(self, id) def _getOb(self, id): return getattr(self, id) def _setObject(self, id, object, suppress_events=False): if not suppress_events: notify(ObjectWillBeAddedEvent(object, self, id)) self._setOb(id, object) object = self._getOb(id) if hasattr(aq_base(object), 'manage_afterAdd'): object.manage_afterAdd(object, self) if not suppress_events: notify(ObjectAddedEvent(object, self, id)) notifyContainerModified(self) return object def _delObject(self, id): object = self._getOb(id) notify(ObjectWillBeRemovedEvent(object, self, id)) if hasattr(aq_base(object), 'manage_beforeDelete'): object.manage_beforeDelete(object, self) self._delOb(id) notify(ObjectRemovedEvent(object, self, id)) notifyContainerModified(self) def getPhysicalPath(self): p = aq_parent(aq_inner(self)) path = (self._id,) if p is not None: path = p.getPhysicalPath() + path return path def getId(self): return self._id def reindexObjectSecurity(self): pass def contentIds(self): return ('user_bar',) def all_meta_types(self): return ({'name': 'Dummy', 'permission': 'addFoo'},) def getTypeInfo(self): return self.portal_types.getTypeInfo(self) # Can return None. @implementer(ISiteRoot) class DummySite(DummyFolder): """ A dummy portal folder. """ _domain = 'http://www.foobar.com' _path = 'bar' def absolute_url(self, relative=0): return '/'.join((self._domain, self._path, self._id)) def getPhysicalPath(self): return ('', self._path, self._id) def getPhysicalRoot(self): return self def unrestrictedTraverse(self, path, default=None, restricted=0): if path == ['acl_users']: return self.acl_users else:
def userdefined_roles(self): return ('Member', 'Reviewer') def getProperty(self, id, default=None): return getattr(self, id, default) class DummyUserFolder(Implicit): """ A dummy User Folder with 2 dummy Users. """ id = 'acl_users' def __init__(self): setattr(self, 'user_foo', DummyUser(id='user_foo')) setattr(self, 'user_bar', DummyUser(id='user_bar')) setattr(self, 'all_powerful_Oz', OmnipotentUser()) def getUsers(self): pass def getUser(self, name): return getattr(self, name, None) def getUserById(self, id, default=None): return self.getUser(id) def userFolderDelUsers(self, names): for user_id in names: delattr(self, user_id) class DummyTool(Implicit, ActionProviderBase): """ This is a Dummy Tool that behaves as a a MemberShipTool, a URLTool and an Action Provider """ def __init__(self, anon=1): self.anon = anon # IMembershipTool def getAuthenticatedMember(self): return DummyUser() def isAnonymousUser(self): return self.anon def checkPermission(self, permissionName, object, subobjectName=None): return True # ITypesTool _type_id = 'Dummy Content' _type
obj = self for id in path[3:]: obj = getattr(obj, id) return obj
conditional_block
dummy.py
zope.container.contained import notifyContainerModified from zope.datetime import rfc1123_date from zope.event import notify from zope.interface import implementer from ...ActionProviderBase import ActionProviderBase from ...interfaces import IContentish from ...interfaces import ISiteRoot from ...interfaces import ITypeInformation from ...PortalContent import PortalContent from ..base.security import DummyUser from ..base.security import OmnipotentUser class DummyObject(Implicit): """ A dummy callable object. Comes with getIconURL and restrictedTraverse methods. """ def __init__(self, id='dummy', **kw): self._id = id self.__dict__.update(kw) def __str__(self): return self._id def __call__(self): return self._id def restrictedTraverse(self, path): if not path: return self parent = self path_elements = path.split('/') path_elements.reverse() while path_elements: path_element = path_elements.pop() parent = getattr(parent, path_element) return parent def icon(self): return f'{self._id} ICON' def getIconURL(self): return f'{self._id} ICON' def getId(self): return self._id @implementer(ITypeInformation) class DummyType(DummyObject): """ A Dummy Type object """ def __init__(self, id='Dummy Content', title='Dummy Content', actions=()): """ To fake out some actions, pass in a sequence of tuples where the first element represents the ID or alias of the action and the second element is the path to the object to be invoked, such as a page template. """ self.id = self._id = id self.title = title self._actions = {} self._setActions(actions) def _setActions(self, actions=()): for action_id, action_path in actions: self._actions[action_id] = action_path def Title(self): return self.title def allowType(self, contentType): return True def allowDiscussion(self): return False def queryMethodID(self, alias, default=None, context=None): return self._actions.get(alias, default) def isConstructionAllowed(self, container): return True @implementer(IContentish) class DummyContent(PortalContent, Item): """ A Dummy piece of PortalContent """ meta_type = 'Dummy' portal_type = 'Dummy Content' url = 'foo_url' after_add_called = before_delete_called = 0 def __init__(self, id='dummy', *args, **kw): self.id = id self._args = args self._kw = {} self._kw.update(kw) self.reset() self.catalog = kw.get('catalog', 0) self.url = kw.get('url', None) self.view_id = kw.get('view_id', None) def manage_afterAdd(self, item, container): self.after_add_called = 1 def manage_beforeDelete(self, item, container): self.before_delete_called = 1 def absolute_url(self): return self.url def reset(self): self.after_add_called = self.before_delete_called = 0 # Make sure normal Database export/import stuff doesn't trip us up. def _getCopy(self, container): return DummyContent(self.id, catalog=self.catalog) def _safe_get(self, attr): if self.catalog: return getattr(self, attr, '') else: return getattr(self, attr) def Title(self): return self.title def listCreators(self): return self._safe_get('creators') def Subject(self): return self._safe_get('subject') def Description(self): return self._safe_get('description') def created(self): return self._safe_get('created_date') def modified(self): return self._safe_get('modified_date') def Type(self): return 'Dummy Content Title' def __call__(self): if self.view_id is None: return DummyContent.inheritedAttribute('__call__')(self) else: # view_id control for testing template = getattr(self, self.view_id) if getattr(aq_base(template), 'isDocTemp', 0): return template(self, self.REQUEST, self.REQUEST['RESPONSE']) else: return template() DummyFactory = Factory(DummyContent) class DummyFactoryDispatcher: """ Dummy Product Factory Dispatcher """ def __init__(self, folder): self._folder = folder def getId(self): return 'DummyFactoryDispatcher' def addFoo(self, id, *args, **kw): if getattr(self._folder, '_prefix', None): id = f'{self._folder._prefix}_{id}' foo = DummyContent(id, *args, **kw) self._folder._setObject(id, foo, suppress_events=True) if getattr(self._folder, '_prefix', None): return id __roles__ = ('FooAdder',) __allow_access_to_unprotected_subobjects__ = {'addFoo': 1} @implementer(IObjectManager) class DummyFolder(DummyObject): """Dummy Container for testing. """ def __init__(self, id='dummy', fake_product=0, prefix=''): self._prefix = prefix self._id = id if fake_product: self.manage_addProduct = { 'FooProduct': DummyFactoryDispatcher(self)} def _setOb(self, id, object): setattr(self, id, object) def _delOb(self, id): delattr(self, id) def _getOb(self, id): return getattr(self, id) def _setObject(self, id, object, suppress_events=False): if not suppress_events: notify(ObjectWillBeAddedEvent(object, self, id)) self._setOb(id, object) object = self._getOb(id) if hasattr(aq_base(object), 'manage_afterAdd'): object.manage_afterAdd(object, self) if not suppress_events: notify(ObjectAddedEvent(object, self, id)) notifyContainerModified(self) return object def _delObject(self, id): object = self._getOb(id) notify(ObjectWillBeRemovedEvent(object, self, id)) if hasattr(aq_base(object), 'manage_beforeDelete'): object.manage_beforeDelete(object, self) self._delOb(id) notify(ObjectRemovedEvent(object, self, id)) notifyContainerModified(self) def getPhysicalPath(self): p = aq_parent(aq_inner(self)) path = (self._id,) if p is not None: path = p.getPhysicalPath() + path return path def getId(self): return self._id def reindexObjectSecurity(self): pass def contentIds(self): return ('user_bar',) def all_meta_types(self): return ({'name': 'Dummy', 'permission': 'addFoo'},) def getTypeInfo(self): return self.portal_types.getTypeInfo(self) # Can return None. @implementer(ISiteRoot) class DummySite(DummyFolder): """ A dummy portal folder. """ _domain = 'http://www.foobar.com' _path = 'bar' def absolute_url(self, relative=0): return '/'.join((self._domain, self._path, self._id)) def getPhysicalPath(self): return ('', self._path, self._id) def getPhysicalRoot(self): return self
return self.acl_users else: obj = self for id in path[3:]: obj = getattr(obj, id) return obj def userdefined_roles(self): return ('Member', 'Reviewer') def getProperty(self, id, default=None): return getattr(self, id, default) class DummyUserFolder(Implicit): """ A dummy User Folder with 2 dummy Users. """ id = 'acl_users' def __init__(self): setattr(self, 'user_foo', DummyUser(id='user_foo')) setattr(self, 'user_bar', DummyUser(id='user_bar')) setattr(self, 'all_powerful_Oz', OmnipotentUser()) def getUsers(self): pass def getUser(self, name): return getattr(self, name, None) def getUserById(self, id, default=None): return self.getUser(id) def userFolderDelUsers(self, names): for user_id in names: delattr(self, user_id) class DummyTool(Implicit, ActionProviderBase): """ This is a Dummy Tool that behaves as a a MemberShipTool, a URLTool and an Action Provider """ def __init__(self, anon=1): self.anon = anon # IMembershipTool def getAuthenticatedMember(self): return DummyUser() def isAnonymousUser(self): return self.anon def checkPermission(self, permissionName, object, subobjectName=None): return True # ITypesTool _type_id = 'Dummy Content' _type
def unrestrictedTraverse(self, path, default=None, restricted=0): if path == ['acl_users']:
random_line_split
dummy.py
zope.container.contained import notifyContainerModified from zope.datetime import rfc1123_date from zope.event import notify from zope.interface import implementer from ...ActionProviderBase import ActionProviderBase from ...interfaces import IContentish from ...interfaces import ISiteRoot from ...interfaces import ITypeInformation from ...PortalContent import PortalContent from ..base.security import DummyUser from ..base.security import OmnipotentUser class DummyObject(Implicit): """ A dummy callable object. Comes with getIconURL and restrictedTraverse methods. """ def __init__(self, id='dummy', **kw): self._id = id self.__dict__.update(kw) def __str__(self): return self._id def __call__(self): return self._id def restrictedTraverse(self, path): if not path: return self parent = self path_elements = path.split('/') path_elements.reverse() while path_elements: path_element = path_elements.pop() parent = getattr(parent, path_element) return parent def icon(self): return f'{self._id} ICON' def getIconURL(self): return f'{self._id} ICON' def getId(self): return self._id @implementer(ITypeInformation) class DummyType(DummyObject): """ A Dummy Type object """ def __init__(self, id='Dummy Content', title='Dummy Content', actions=()): """ To fake out some actions, pass in a sequence of tuples where the first element represents the ID or alias of the action and the second element is the path to the object to be invoked, such as a page template. """ self.id = self._id = id self.title = title self._actions = {} self._setActions(actions) def _setActions(self, actions=()): for action_id, action_path in actions: self._actions[action_id] = action_path def Title(self): return self.title def allowType(self, contentType): return True def allowDiscussion(self): return False def queryMethodID(self, alias, default=None, context=None): return self._actions.get(alias, default) def isConstructionAllowed(self, container): return True @implementer(IContentish) class DummyContent(PortalContent, Item): """ A Dummy piece of PortalContent """ meta_type = 'Dummy' portal_type = 'Dummy Content' url = 'foo_url' after_add_called = before_delete_called = 0 def __init__(self, id='dummy', *args, **kw): self.id = id self._args = args self._kw = {} self._kw.update(kw) self.reset() self.catalog = kw.get('catalog', 0) self.url = kw.get('url', None) self.view_id = kw.get('view_id', None) def manage_afterAdd(self, item, container): self.after_add_called = 1 def manage_beforeDelete(self, item, container): self.before_delete_called = 1 def absolute_url(self): return self.url def reset(self): self.after_add_called = self.before_delete_called = 0 # Make sure normal Database export/import stuff doesn't trip us up. def _getCopy(self, container): return DummyContent(self.id, catalog=self.catalog) def _safe_get(self, attr): if self.catalog: return getattr(self, attr, '') else: return getattr(self, attr) def Title(self): return self.title def listCreators(self): return self._safe_get('creators') def Subject(self): return self._safe_get('subject') def Description(self): return self._safe_get('description') def created(self): return self._safe_get('created_date') def modified(self): return self._safe_get('modified_date') def Type(self):
def __call__(self): if self.view_id is None: return DummyContent.inheritedAttribute('__call__')(self) else: # view_id control for testing template = getattr(self, self.view_id) if getattr(aq_base(template), 'isDocTemp', 0): return template(self, self.REQUEST, self.REQUEST['RESPONSE']) else: return template() DummyFactory = Factory(DummyContent) class DummyFactoryDispatcher: """ Dummy Product Factory Dispatcher """ def __init__(self, folder): self._folder = folder def getId(self): return 'DummyFactoryDispatcher' def addFoo(self, id, *args, **kw): if getattr(self._folder, '_prefix', None): id = f'{self._folder._prefix}_{id}' foo = DummyContent(id, *args, **kw) self._folder._setObject(id, foo, suppress_events=True) if getattr(self._folder, '_prefix', None): return id __roles__ = ('FooAdder',) __allow_access_to_unprotected_subobjects__ = {'addFoo': 1} @implementer(IObjectManager) class DummyFolder(DummyObject): """Dummy Container for testing. """ def __init__(self, id='dummy', fake_product=0, prefix=''): self._prefix = prefix self._id = id if fake_product: self.manage_addProduct = { 'FooProduct': DummyFactoryDispatcher(self)} def _setOb(self, id, object): setattr(self, id, object) def _delOb(self, id): delattr(self, id) def _getOb(self, id): return getattr(self, id) def _setObject(self, id, object, suppress_events=False): if not suppress_events: notify(ObjectWillBeAddedEvent(object, self, id)) self._setOb(id, object) object = self._getOb(id) if hasattr(aq_base(object), 'manage_afterAdd'): object.manage_afterAdd(object, self) if not suppress_events: notify(ObjectAddedEvent(object, self, id)) notifyContainerModified(self) return object def _delObject(self, id): object = self._getOb(id) notify(ObjectWillBeRemovedEvent(object, self, id)) if hasattr(aq_base(object), 'manage_beforeDelete'): object.manage_beforeDelete(object, self) self._delOb(id) notify(ObjectRemovedEvent(object, self, id)) notifyContainerModified(self) def getPhysicalPath(self): p = aq_parent(aq_inner(self)) path = (self._id,) if p is not None: path = p.getPhysicalPath() + path return path def getId(self): return self._id def reindexObjectSecurity(self): pass def contentIds(self): return ('user_bar',) def all_meta_types(self): return ({'name': 'Dummy', 'permission': 'addFoo'},) def getTypeInfo(self): return self.portal_types.getTypeInfo(self) # Can return None. @implementer(ISiteRoot) class DummySite(DummyFolder): """ A dummy portal folder. """ _domain = 'http://www.foobar.com' _path = 'bar' def absolute_url(self, relative=0): return '/'.join((self._domain, self._path, self._id)) def getPhysicalPath(self): return ('', self._path, self._id) def getPhysicalRoot(self): return self def unrestrictedTraverse(self, path, default=None, restricted=0): if path == ['acl_users']: return self.acl_users else: obj = self for id in path[3:]: obj = getattr(obj, id) return obj def userdefined_roles(self): return ('Member', 'Reviewer') def getProperty(self, id, default=None): return getattr(self, id, default) class DummyUserFolder(Implicit): """ A dummy User Folder with 2 dummy Users. """ id = 'acl_users' def __init__(self): setattr(self, 'user_foo', DummyUser(id='user_foo')) setattr(self, 'user_bar', DummyUser(id='user_bar')) setattr(self, 'all_powerful_Oz', OmnipotentUser()) def getUsers(self): pass def getUser(self, name): return getattr(self, name, None) def getUserById(self, id, default=None): return self.getUser(id) def userFolderDelUsers(self, names): for user_id in names: delattr(self, user_id) class DummyTool(Implicit, ActionProviderBase): """ This is a Dummy Tool that behaves as a a MemberShipTool, a URLTool and an Action Provider """ def __init__(self, anon=1): self.anon = anon # IMembershipTool def getAuthenticatedMember(self): return DummyUser() def isAnonymousUser(self): return self.anon def checkPermission(self, permissionName, object, subobjectName=None): return True # ITypesTool _type_id = 'Dummy Content' _
return 'Dummy Content Title'
identifier_body
dummy.py
zope.container.contained import notifyContainerModified from zope.datetime import rfc1123_date from zope.event import notify from zope.interface import implementer from ...ActionProviderBase import ActionProviderBase from ...interfaces import IContentish from ...interfaces import ISiteRoot from ...interfaces import ITypeInformation from ...PortalContent import PortalContent from ..base.security import DummyUser from ..base.security import OmnipotentUser class DummyObject(Implicit): """ A dummy callable object. Comes with getIconURL and restrictedTraverse methods. """ def __init__(self, id='dummy', **kw): self._id = id self.__dict__.update(kw) def __str__(self): return self._id def __call__(self): return self._id def restrictedTraverse(self, path): if not path: return self parent = self path_elements = path.split('/') path_elements.reverse() while path_elements: path_element = path_elements.pop() parent = getattr(parent, path_element) return parent def icon(self): return f'{self._id} ICON' def getIconURL(self): return f'{self._id} ICON' def getId(self): return self._id @implementer(ITypeInformation) class DummyType(DummyObject): """ A Dummy Type object """ def __init__(self, id='Dummy Content', title='Dummy Content', actions=()): """ To fake out some actions, pass in a sequence of tuples where the first element represents the ID or alias of the action and the second element is the path to the object to be invoked, such as a page template. """ self.id = self._id = id self.title = title self._actions = {} self._setActions(actions) def _setActions(self, actions=()): for action_id, action_path in actions: self._actions[action_id] = action_path def Title(self): return self.title def allowType(self, contentType): return True def allowDiscussion(self): return False def queryMethodID(self, alias, default=None, context=None): return self._actions.get(alias, default) def isConstructionAllowed(self, container): return True @implementer(IContentish) class DummyContent(PortalContent, Item): """ A Dummy piece of PortalContent """ meta_type = 'Dummy' portal_type = 'Dummy Content' url = 'foo_url' after_add_called = before_delete_called = 0 def __init__(self, id='dummy', *args, **kw): self.id = id self._args = args self._kw = {} self._kw.update(kw) self.reset() self.catalog = kw.get('catalog', 0) self.url = kw.get('url', None) self.view_id = kw.get('view_id', None) def manage_afterAdd(self, item, container): self.after_add_called = 1 def manage_beforeDelete(self, item, container): self.before_delete_called = 1 def absolute_url(self): return self.url def reset(self): self.after_add_called = self.before_delete_called = 0 # Make sure normal Database export/import stuff doesn't trip us up. def _getCopy(self, container): return DummyContent(self.id, catalog=self.catalog) def _safe_get(self, attr): if self.catalog: return getattr(self, attr, '') else: return getattr(self, attr) def Title(self): return self.title def listCreators(self): return self._safe_get('creators') def Subject(self): return self._safe_get('subject') def Description(self): return self._safe_get('description') def
(self): return self._safe_get('created_date') def modified(self): return self._safe_get('modified_date') def Type(self): return 'Dummy Content Title' def __call__(self): if self.view_id is None: return DummyContent.inheritedAttribute('__call__')(self) else: # view_id control for testing template = getattr(self, self.view_id) if getattr(aq_base(template), 'isDocTemp', 0): return template(self, self.REQUEST, self.REQUEST['RESPONSE']) else: return template() DummyFactory = Factory(DummyContent) class DummyFactoryDispatcher: """ Dummy Product Factory Dispatcher """ def __init__(self, folder): self._folder = folder def getId(self): return 'DummyFactoryDispatcher' def addFoo(self, id, *args, **kw): if getattr(self._folder, '_prefix', None): id = f'{self._folder._prefix}_{id}' foo = DummyContent(id, *args, **kw) self._folder._setObject(id, foo, suppress_events=True) if getattr(self._folder, '_prefix', None): return id __roles__ = ('FooAdder',) __allow_access_to_unprotected_subobjects__ = {'addFoo': 1} @implementer(IObjectManager) class DummyFolder(DummyObject): """Dummy Container for testing. """ def __init__(self, id='dummy', fake_product=0, prefix=''): self._prefix = prefix self._id = id if fake_product: self.manage_addProduct = { 'FooProduct': DummyFactoryDispatcher(self)} def _setOb(self, id, object): setattr(self, id, object) def _delOb(self, id): delattr(self, id) def _getOb(self, id): return getattr(self, id) def _setObject(self, id, object, suppress_events=False): if not suppress_events: notify(ObjectWillBeAddedEvent(object, self, id)) self._setOb(id, object) object = self._getOb(id) if hasattr(aq_base(object), 'manage_afterAdd'): object.manage_afterAdd(object, self) if not suppress_events: notify(ObjectAddedEvent(object, self, id)) notifyContainerModified(self) return object def _delObject(self, id): object = self._getOb(id) notify(ObjectWillBeRemovedEvent(object, self, id)) if hasattr(aq_base(object), 'manage_beforeDelete'): object.manage_beforeDelete(object, self) self._delOb(id) notify(ObjectRemovedEvent(object, self, id)) notifyContainerModified(self) def getPhysicalPath(self): p = aq_parent(aq_inner(self)) path = (self._id,) if p is not None: path = p.getPhysicalPath() + path return path def getId(self): return self._id def reindexObjectSecurity(self): pass def contentIds(self): return ('user_bar',) def all_meta_types(self): return ({'name': 'Dummy', 'permission': 'addFoo'},) def getTypeInfo(self): return self.portal_types.getTypeInfo(self) # Can return None. @implementer(ISiteRoot) class DummySite(DummyFolder): """ A dummy portal folder. """ _domain = 'http://www.foobar.com' _path = 'bar' def absolute_url(self, relative=0): return '/'.join((self._domain, self._path, self._id)) def getPhysicalPath(self): return ('', self._path, self._id) def getPhysicalRoot(self): return self def unrestrictedTraverse(self, path, default=None, restricted=0): if path == ['acl_users']: return self.acl_users else: obj = self for id in path[3:]: obj = getattr(obj, id) return obj def userdefined_roles(self): return ('Member', 'Reviewer') def getProperty(self, id, default=None): return getattr(self, id, default) class DummyUserFolder(Implicit): """ A dummy User Folder with 2 dummy Users. """ id = 'acl_users' def __init__(self): setattr(self, 'user_foo', DummyUser(id='user_foo')) setattr(self, 'user_bar', DummyUser(id='user_bar')) setattr(self, 'all_powerful_Oz', OmnipotentUser()) def getUsers(self): pass def getUser(self, name): return getattr(self, name, None) def getUserById(self, id, default=None): return self.getUser(id) def userFolderDelUsers(self, names): for user_id in names: delattr(self, user_id) class DummyTool(Implicit, ActionProviderBase): """ This is a Dummy Tool that behaves as a a MemberShipTool, a URLTool and an Action Provider """ def __init__(self, anon=1): self.anon = anon # IMembershipTool def getAuthenticatedMember(self): return DummyUser() def isAnonymousUser(self): return self.anon def checkPermission(self, permissionName, object, subobjectName=None): return True # ITypesTool _type_id = 'Dummy Content' _
created
identifier_name
player.go
0 obj.createTime = now obj.SetModified() m.goldEquipObject = obj return } //获取金装背包 func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag { return m.goldEquipBag } //加载身上金装 func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) { //加载金装槽位 goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId()) if err != nil { return } slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList)) for _, slot := range goldEquipSlotList { pio := NewPlayerGoldEquipSlotObject(m.p) err := pio.FromEntity(slot) if err != nil { return err } slotList = append(slotList, pio) } m.fixUpstarLevel(slotList) m.goldEquipBag = createBodyBag(m.p, slotList) return } // 修正升星强化等级 func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) { for _, itemObj := range itemObjList { if itemObj.IsEmpty() { continue } goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData) if !ok { continue } itemTemp := item.GetItemService().GetItem(int(itemObj.itemId)) if itemTemp.GetGoldEquipTemplate() == nil { log.Info("itemid:", itemObj.itemId) continue } maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel() goldequipData.FixUpstarLevel(maxLeve) itemObj.SetModified() } } //获取装备 func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject { item := m.goldEquipBag.GetByPosition(pos) if item == nil { return nil } return item } //使用装备 func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) { flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData) if flag { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId) } return } //脱下装备 func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) { //判断是否可以脱下 flag := m.IfCanTakeOff(pos) if !flag { return } slot := m.goldEquipBag.GetByPosition(pos) data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData) openlightlevel := data.OpenLightLevel strengthlevel := slot.newStLevel upstarlevel := slot.level itemId = m.goldEquipBag.TakeOff(pos) if itemId > 0 { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId) eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel) gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData) } return } //获取套装数量 func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 { curGroupMap := make(map[int32]int32) for _, slot := range m.goldEquipBag.GetAll() { if slot.IsEmpty() { continue } itemTemp := item.GetItemService().GetItem(int(slot.GetItemId())) groupId := itemTemp.GetGoldEquipTemplate().SuitGroup if groupId == 0 { continue } _, ok := curGroupMap[groupId] if ok { curGroupMap[groupId] += int32(1) } else { curGroupMap[groupId] = int32(1) } } return curGroupMap } //装备改变 func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) { return pidm.goldEquipBag.GetChangedSlotAndReset() } //是否可以卸下 func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } return true } //开光 func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData) if isSuccess { propertyData.OpenLightLevel += 1 propertyData.OpenTimes = 0 } else { propertyData.OpenTimes += 1 } now := global.GetGame().GetTimeService().Now() item.updateTime = now item.SetModified() return true } //获取强化总等级 func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { totalLevel += slot.newStLevel } return totalLevel } //获取镶嵌宝石总等级 func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { for _, itemId := range slot.GemInfo { itemTemp := item.GetItemService().GetItem(int(itemId)) totalLevel += itemTemp.TypeFlag2 } } return totalLevel } func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) { for _, slot := range m.goldEquipBag.GetAll() { slotInfo := &goldequiptypes.GoldEquipSlotInfo{} slotInfo.SlotId = int32(slot.GetSlotId()) slotInfo.Level = slot.GetLevel() slotInfo.NewStLevel = slot.GetNewStLevel() slotInfo.ItemId = slot.GetItemId() slotInfo.GemUnlockInfo = slot.GemUnlockInfo slotInfo.Gems = slot.GemInfo slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo slotInfoList = append(slotInfoList, slotInfo) data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData) if !ok { //TODO xzk:临时处理bug slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData() slotInfo.PropertyData.InitBase() } else { slotInfo.PropertyData = data } } return } // func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) { now := global.GetGame().GetTimeService().Now() var obj *PlayerGoldEquipLogObject if len(m.logList) >= int(maxLogLen) { obj = m.logList[0] m.logList = m.logList[1:] } else { obj = NewPlayerGoldEquipLogObject(m.p) id, _ := idutil.GetId() obj.id = id obj.createTime = now } obj.fenJieItemIdList = fenJieItemIdList obj.rewItemStr = rewItemStr obj.updateTime = now obj.SetModified() m.logList = append(m.logList, obj) } // 获取金装日志列表 func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject { return m.logList } //设置自动分解 func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) { now := global.GetGame().GetTimeService().Now() m.equipSettingObj.fenJieIsAuto = isAuto m.equipSettingObj.fenJieQuality = quality m.equipSettingObj.fenJieZhuanShu = zhuanShu m.equipSettingObj.updateTime = now m.equipSettingObj.SetModified() // TODO: xzk25 后台日志 } //设置自动分解 func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject
{ return m.equipSettingObj } //获取特殊技能 func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) { for _, obj := range m.goldEquipBag.GetAll() { if obj.IsEmpty() { continue } itemTemplate := item.GetItemService().GetItem(int(obj.i
identifier_body
player.go
DataManager) Heartbeat() { } //加载金装日志 func (m *PlayerGoldEquipDataManager) loadLog() (err error) { entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId()) if err != nil { return } for _, entity := range entityList { logObj := NewPlayerGoldEquipLogObject(m.p) logObj.FromEntity(entity) m.logList = append(m.logList, logObj) } return } //加载金装设置 func (m *PlayerGoldEquipDataManager) loadSetting() (err error) { entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId()) if err != nil { return } if entity != nil { obj := NewPlayerGoldEquipSettingObject(m.p) obj.FromEntity(entity) m.equipSettingObj = obj } else { m.initEquipSeting() } return } //加载金装设置 func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) { entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId()) if err != nil { return } if entity != nil { obj := NewPlayerGoldEquipObject(m.p) obj.FromEntity(entity) m.goldEquipObject =
{ m.initGoldEquipObject() } return } // 初始化设置 func (m *PlayerGoldEquipDataManager) initEquipSeting() { obj := NewPlayerGoldEquipSettingObject(m.p) id, _ := idutil.GetId() now := global.GetGame().GetTimeService().Now() obj.id = id obj.fenJieIsAuto = 0 obj.fenJieQuality = 0 //zrc: 修改过的 //TODO:cjb 默认是检测过的,看完删除注释 obj.isCheckOldSt = int32(0) obj.createTime = now obj.SetModified() m.equipSettingObj = obj return } // 初始化设置 func (m *PlayerGoldEquipDataManager) initGoldEquipObject() { obj := NewPlayerGoldEquipObject(m.p) id, _ := idutil.GetId() now := global.GetGame().GetTimeService().Now() obj.id = id obj.power = 0 obj.createTime = now obj.SetModified() m.goldEquipObject = obj return } //获取金装背包 func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag { return m.goldEquipBag } //加载身上金装 func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) { //加载金装槽位 goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId()) if err != nil { return } slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList)) for _, slot := range goldEquipSlotList { pio := NewPlayerGoldEquipSlotObject(m.p) err := pio.FromEntity(slot) if err != nil { return err } slotList = append(slotList, pio) } m.fixUpstarLevel(slotList) m.goldEquipBag = createBodyBag(m.p, slotList) return } // 修正升星强化等级 func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) { for _, itemObj := range itemObjList { if itemObj.IsEmpty() { continue } goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData) if !ok { continue } itemTemp := item.GetItemService().GetItem(int(itemObj.itemId)) if itemTemp.GetGoldEquipTemplate() == nil { log.Info("itemid:", itemObj.itemId) continue } maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel() goldequipData.FixUpstarLevel(maxLeve) itemObj.SetModified() } } //获取装备 func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject { item := m.goldEquipBag.GetByPosition(pos) if item == nil { return nil } return item } //使用装备 func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) { flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData) if flag { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId) } return } //脱下装备 func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) { //判断是否可以脱下 flag := m.IfCanTakeOff(pos) if !flag { return } slot := m.goldEquipBag.GetByPosition(pos) data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData) openlightlevel := data.OpenLightLevel strengthlevel := slot.newStLevel upstarlevel := slot.level itemId = m.goldEquipBag.TakeOff(pos) if itemId > 0 { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId) eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel) gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData) } return } //获取套装数量 func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 { curGroupMap := make(map[int32]int32) for _, slot := range m.goldEquipBag.GetAll() { if slot.IsEmpty() { continue } itemTemp := item.GetItemService().GetItem(int(slot.GetItemId())) groupId := itemTemp.GetGoldEquipTemplate().SuitGroup if groupId == 0 { continue } _, ok := curGroupMap[groupId] if ok { curGroupMap[groupId] += int32(1) } else { curGroupMap[groupId] = int32(1) } } return curGroupMap } //装备改变 func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) { return pidm.goldEquipBag.GetChangedSlotAndReset() } //是否可以卸下 func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } return true } //开光 func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData) if isSuccess { propertyData.OpenLightLevel += 1 propertyData.OpenTimes = 0 } else { propertyData.OpenTimes += 1 } now := global.GetGame().GetTimeService().Now() item.updateTime = now item.SetModified() return true } //获取强化总等级 func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { totalLevel += slot.newStLevel } return totalLevel } //获取镶嵌宝石总等级 func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { for _, itemId := range slot.GemInfo { itemTemp := item.GetItemService().GetItem(int(itemId)) totalLevel += itemTemp.TypeFlag2 } } return totalLevel } func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) { for _, slot := range m.goldEquipBag.GetAll() { slotInfo := &goldequiptypes.GoldEquipSlotInfo{} slotInfo.SlotId = int32(slot.GetSlotId()) slotInfo.Level = slot.GetLevel() slotInfo.NewStLevel = slot.GetNewStLevel() slotInfo.ItemId = slot.GetItemId() slotInfo.GemUnlockInfo = slot.GemUnlockInfo slotInfo.Gems = slot.GemInfo slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo slotInfoList = append(slotInfoList, slotInfo) data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData)
obj } else
conditional_block
player.go
DataManager) Heartbeat() { } //加载金装日志 func (m *PlayerGoldEquipDataManager) loadLog() (err error) { entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId()) if err != nil { return } for _, entity := range entityList { logObj := NewPlayerGoldEquipLogObject(m.p) logObj.FromEntity(entity) m.logList = append(m.logList, logObj) } return } //加载金装设置 func (m *PlayerGoldEquipDataManager) loadSetting() (err error) { entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId()) if err != nil { return } if entity != nil { obj := NewPlayerGoldEquipSettingObject(m.p) obj.FromEntity(entity) m.equipSettingObj = obj } else { m.initEquipSeting() } return } //加载金装设置 func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) { entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId()) if err != nil { return } if entity != nil { obj := NewPlayerGoldEquipObject(m.p) obj.FromEntity(entity) m.goldEquipObject = obj } else { m.initGoldEquipObject() } return } // 初始化设置 func (m *PlayerGoldEquipDataManager) initEquipSeting() { obj := NewPlayerGoldEquipSettingObject(m.p) id, _ := idutil.GetId() now := global.GetGame().GetTimeService().Now() obj.id = id obj.fenJieIsAuto = 0 obj.fenJieQuality = 0 //zrc: 修改过的 //TODO:cjb 默认是检测过的,看完删除注释 obj.isCheckOldSt = int32(0) obj.createTime = now obj.SetModified() m.equipSettingObj = obj return } // 初始化设置 func (m *PlayerGoldEquipDataManager) initGoldEquipObject() { obj := NewPlayerGoldEquipObject(m.p) id, _ := idutil.GetId() now := global.GetGame().GetTimeService().Now() obj.id = id obj.power = 0 obj.createTime = now obj.SetModified() m.goldEquipObject = obj return } //获取金装背包 func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag { return m.goldEquipBag } //加载身上金装 func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) { //加载金装槽位 goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId()) if err != nil { return } slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList)) for _, slot := range goldEquipSlotList { pio := NewPlayerGoldEquipSlotObject(m.p) err := pio.FromEntity(slot) if err != nil { return err } slotList = append(slotList, pio) } m.fixUpstarLevel(slotList) m.goldEquipBag = createBodyBag(m.p, slotList) return } // 修正升星强化等级 func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) { for _, itemObj := range itemObjList { if itemObj.IsEmpty() { continue } goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData) if !ok { continue } itemTemp := item.GetItemService().GetItem(int(itemObj.itemId)) if itemTemp.GetGoldEquipTemplate() == nil { log.Info("itemid:", itemObj.itemId) continue } maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel() goldequipData.FixUpstarLevel(maxLeve)
func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject { item := m.goldEquipBag.GetByPosition(pos) if item == nil { return nil } return item } //使用装备 func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) { flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData) if flag { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId) } return } //脱下装备 func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) { //判断是否可以脱下 flag := m.IfCanTakeOff(pos) if !flag { return } slot := m.goldEquipBag.GetByPosition(pos) data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData) openlightlevel := data.OpenLightLevel strengthlevel := slot.newStLevel upstarlevel := slot.level itemId = m.goldEquipBag.TakeOff(pos) if itemId > 0 { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId) eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel) gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData) } return } //获取套装数量 func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 { curGroupMap := make(map[int32]int32) for _, slot := range m.goldEquipBag.GetAll() { if slot.IsEmpty() { continue } itemTemp := item.GetItemService().GetItem(int(slot.GetItemId())) groupId := itemTemp.GetGoldEquipTemplate().SuitGroup if groupId == 0 { continue } _, ok := curGroupMap[groupId] if ok { curGroupMap[groupId] += int32(1) } else { curGroupMap[groupId] = int32(1) } } return curGroupMap } //装备改变 func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) { return pidm.goldEquipBag.GetChangedSlotAndReset() } //是否可以卸下 func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } return true } //开光 func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData) if isSuccess { propertyData.OpenLightLevel += 1 propertyData.OpenTimes = 0 } else { propertyData.OpenTimes += 1 } now := global.GetGame().GetTimeService().Now() item.updateTime = now item.SetModified() return true } //获取强化总等级 func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { totalLevel += slot.newStLevel } return totalLevel } //获取镶嵌宝石总等级 func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { for _, itemId := range slot.GemInfo { itemTemp := item.GetItemService().GetItem(int(itemId)) totalLevel += itemTemp.TypeFlag2 } } return totalLevel } func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) { for _, slot := range m.goldEquipBag.GetAll() { slotInfo := &goldequiptypes.GoldEquipSlotInfo{} slotInfo.SlotId = int32(slot.GetSlotId()) slotInfo.Level = slot.GetLevel() slotInfo.NewStLevel = slot.GetNewStLevel() slotInfo.ItemId = slot.GetItemId() slotInfo.GemUnlockInfo = slot.GemUnlockInfo slotInfo.Gems = slot.GemInfo slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo slotInfoList = append(slotInfoList, slotInfo) data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData) if
itemObj.SetModified() } } //获取装备
random_line_split
player.go
*PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) { //加载金装槽位 goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId()) if err != nil { return } slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList)) for _, slot := range goldEquipSlotList { pio := NewPlayerGoldEquipSlotObject(m.p) err := pio.FromEntity(slot) if err != nil { return err } slotList = append(slotList, pio) } m.fixUpstarLevel(slotList) m.goldEquipBag = createBodyBag(m.p, slotList) return } // 修正升星强化等级 func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) { for _, itemObj := range itemObjList { if itemObj.IsEmpty() { continue } goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData) if !ok { continue } itemTemp := item.GetItemService().GetItem(int(itemObj.itemId)) if itemTemp.GetGoldEquipTemplate() == nil { log.Info("itemid:", itemObj.itemId) continue } maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel() goldequipData.FixUpstarLevel(maxLeve) itemObj.SetModified() } } //获取装备 func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject { item := m.goldEquipBag.GetByPosition(pos) if item == nil { return nil } return item } //使用装备 func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) { flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData) if flag { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId) } return } //脱下装备 func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) { //判断是否可以脱下 flag := m.IfCanTakeOff(pos) if !flag { return } slot := m.goldEquipBag.GetByPosition(pos) data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData) openlightlevel := data.OpenLightLevel strengthlevel := slot.newStLevel upstarlevel := slot.level itemId = m.goldEquipBag.TakeOff(pos) if itemId > 0 { gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId) eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel) gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData) } return } //获取套装数量 func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 { curGroupMap := make(map[int32]int32) for _, slot := range m.goldEquipBag.GetAll() { if slot.IsEmpty() { continue } itemTemp := item.GetItemService().GetItem(int(slot.GetItemId())) groupId := itemTemp.GetGoldEquipTemplate().SuitGroup if groupId == 0 { continue } _, ok := curGroupMap[groupId] if ok { curGroupMap[groupId] += int32(1) } else { curGroupMap[groupId] = int32(1) } } return curGroupMap } //装备改变 func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) { return pidm.goldEquipBag.GetChangedSlotAndReset() } //是否可以卸下 func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } return true } //开光 func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool { item := m.GetGoldEquipByPos(pos) if item == nil { return false } if item.IsEmpty() { return false } propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData) if isSuccess { propertyData.OpenLightLevel += 1 propertyData.OpenTimes = 0 } else { propertyData.OpenTimes += 1 } now := global.GetGame().GetTimeService().Now() item.updateTime = now item.SetModified() return true } //获取强化总等级 func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { totalLevel += slot.newStLevel } return totalLevel } //获取镶嵌宝石总等级 func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 { slotList := m.goldEquipBag.GetAll() totalLevel := int32(0) for _, slot := range slotList { for _, itemId := range slot.GemInfo { itemTemp := item.GetItemService().GetItem(int(itemId)) totalLevel += itemTemp.TypeFlag2 } } return totalLevel } func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) { for _, slot := range m.goldEquipBag.GetAll() { slotInfo := &goldequiptypes.GoldEquipSlotInfo{} slotInfo.SlotId = int32(slot.GetSlotId()) slotInfo.Level = slot.GetLevel() slotInfo.NewStLevel = slot.GetNewStLevel() slotInfo.ItemId = slot.GetItemId() slotInfo.GemUnlockInfo = slot.GemUnlockInfo slotInfo.Gems = slot.GemInfo slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo slotInfoList = append(slotInfoList, slotInfo) data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData) if !ok { //TODO xzk:临时处理bug slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData() slotInfo.PropertyData.InitBase() } else { slotInfo.PropertyData = data } } return } // func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) { now := global.GetGame().GetTimeService().Now() var obj *PlayerGoldEquipLogObject if len(m.logList) >= int(maxLogLen) { obj = m.logList[0] m.logList = m.logList[1:] } else { obj = NewPlayerGoldEquipLogObject(m.p) id, _ := idutil.GetId() obj.id = id obj.createTime = now } obj.fenJieItemIdList = fenJieItemIdList obj.rewItemStr = rewItemStr obj.updateTime = now obj.SetModified() m.logList = append(m.logList, obj) } // 获取金装日志列表 func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject { return m.logList } //设置自动分解 func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) { now := global.GetGame().GetTimeService().Now() m.equipSettingObj.fenJieIsAuto = isAuto m.equipSettingObj.fenJieQuality = quality m.equipSettingObj.fenJieZhuanShu = zhuanShu m.equipSettingObj.updateTime = now m.equipSettingObj.SetModified() // TODO: xzk25 后台日志 } //设置自动分解 func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject { return m.equipSettingObj } //获取特殊技能 func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) { for _, obj := range m.goldEquipBag.GetAll() { if obj.IsEmpty() { continue } itemTemplate := item.GetItemService().GetItem(int(obj.itemId)) if itemTemplate == nil { continue } goldequipTemplate := itemTemplate.GetGoldEquipTemplate() if goldequipTemplate == nil { continue } if !goldequipTemplate.IsGodCastingEquip()
{ continue
identifier_name